![]() |
Jetson Inference
DNN Vision Library
|
This is the complete list of members for superResNet, including all inherited members.
| AllowGPUFallback() const | tensorNet | inline |
| Create() | superResNet | static |
| CreateStream(bool nonBlocking=true) | tensorNet | |
| DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type) | tensorNet | static |
| DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU) | tensorNet | static |
| DetectNativePrecisions(deviceType device=DEVICE_GPU) | tensorNet | static |
| EnableDebug() | tensorNet | |
| EnableLayerProfiler() | tensorNet | |
| FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static |
| GetDevice() const | tensorNet | inline |
| GetInputHeight() const | superResNet | inline |
| GetInputWidth() const | superResNet | inline |
| GetModelPath() const | tensorNet | inline |
| GetModelType() const | tensorNet | inline |
| GetNetworkTime() | tensorNet | inline |
| GetOutputHeight() const | superResNet | inline |
| GetOutputWidth() const | superResNet | inline |
| GetPrecision() const | tensorNet | inline |
| GetProfilerTime(profilerQuery query) | tensorNet | inline |
| GetProfilerTime(profilerQuery query, profilerDevice device) | tensorNet | inline |
| GetPrototxtPath() const | tensorNet | inline |
| GetScaleFactor() const | superResNet | inline |
| GetStream() const | tensorNet | inline |
| gLogger | tensorNet | protected |
| gProfiler | tensorNet | protected |
| IsModelType(modelType type) const | tensorNet | inline |
| IsPrecision(precisionType type) const | tensorNet | inline |
| LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| mAllowGPUFallback | tensorNet | protected |
| mCacheCalibrationPath | tensorNet | protected |
| mCacheEnginePath | tensorNet | protected |
| mContext | tensorNet | protected |
| mDevice | tensorNet | protected |
| mEnableDebug | tensorNet | protected |
| mEnableProfiler | tensorNet | protected |
| mEngine | tensorNet | protected |
| mEventsCPU | tensorNet | protected |
| mEventsGPU | tensorNet | protected |
| mHeight | tensorNet | protected |
| mInfer | tensorNet | protected |
| mInputBlobName | tensorNet | protected |
| mInputCPU | tensorNet | protected |
| mInputCUDA | tensorNet | protected |
| mInputDims | tensorNet | protected |
| mInputSize | tensorNet | protected |
| mMaxBatchSize | tensorNet | protected |
| mMeanPath | tensorNet | protected |
| mModelPath | tensorNet | protected |
| mModelType | tensorNet | protected |
| mOutputs | tensorNet | protected |
| mPrecision | tensorNet | protected |
| mProfilerQueriesDone | tensorNet | protected |
| mProfilerQueriesUsed | tensorNet | protected |
| mProfilerTimes | tensorNet | protected |
| mPrototxtPath | tensorNet | protected |
| mStream | tensorNet | protected |
| mWidth | tensorNet | protected |
| PrintProfilerTimes() | tensorNet | inline |
| ProfileModel(const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream) | tensorNet | protected |
| PROFILER_BEGIN(profilerQuery query) | tensorNet | inlineprotected |
| PROFILER_END(profilerQuery query) | tensorNet | inlineprotected |
| PROFILER_QUERY(profilerQuery query) | tensorNet | inlineprotected |
| SetStream(cudaStream_t stream) | tensorNet | |
| superResNet() | superResNet | protected |
| tensorNet() | tensorNet | protected |
| UpscaleRGBA(float *input, uint32_t inputWidth, uint32_t inputHeight, float *output, uint32_t outputWidth, uint32_t outputHeight, float maxPixelValue=255.0f) | superResNet | |
| UpscaleRGBA(float *input, float *output, float maxPixelValue=255.0f) | superResNet | |
| ~superResNet() | superResNet | |
| ~tensorNet() | tensorNet | virtual |