![]() |
Jetson Inference
DNN Vision Library
|
This is the complete list of members for tensorNet, including all inherited members.
| AllowGPUFallback() const | tensorNet | inline |
| ConfigureBuilder(nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator) | tensorNet | protected |
| CreateStream(bool nonBlocking=true) | tensorNet | |
| DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type) | tensorNet | static |
| DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU) | tensorNet | static |
| DetectNativePrecisions(deviceType device=DEVICE_GPU) | tensorNet | static |
| EnableDebug() | tensorNet | |
| EnableLayerProfiler() | tensorNet | |
| FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static |
| GenerateColor(uint32_t classID, float alpha=255.0f) | tensorNet | static |
| GetDevice() const | tensorNet | inline |
| GetInputDims(uint32_t layer=0) const | tensorNet | inline |
| GetInputHeight(uint32_t layer=0) const | tensorNet | inline |
| GetInputLayers() const | tensorNet | inline |
| GetInputPtr(uint32_t layer=0) const | tensorNet | inline |
| GetInputSize(uint32_t layer=0) const | tensorNet | inline |
| GetInputWidth(uint32_t layer=0) const | tensorNet | inline |
| GetModelFilename() const | tensorNet | inline |
| GetModelPath() const | tensorNet | inline |
| GetModelType() const | tensorNet | inline |
| GetNetworkFPS() | tensorNet | inline |
| GetNetworkName() const | tensorNet | inline |
| GetNetworkTime() | tensorNet | inline |
| GetOutputDims(uint32_t layer=0) const | tensorNet | inline |
| GetOutputHeight(uint32_t layer=0) const | tensorNet | inline |
| GetOutputLayers() const | tensorNet | inline |
| GetOutputPtr(uint32_t layer=0) const | tensorNet | inline |
| GetOutputSize(uint32_t layer=0) const | tensorNet | inline |
| GetOutputWidth(uint32_t layer=0) const | tensorNet | inline |
| GetPrecision() const | tensorNet | inline |
| GetProfilerTime(profilerQuery query) | tensorNet | inline |
| GetProfilerTime(profilerQuery query, profilerDevice device) | tensorNet | inline |
| GetPrototxtPath() const | tensorNet | inline |
| GetStream() const | tensorNet | inline |
| gLogger | tensorNet | protected |
| gProfiler | tensorNet | protected |
| IsModelType(modelType type) const | tensorNet | inline |
| IsPrecision(precisionType type) const | tensorNet | inline |
| LoadClassColors(const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static |
| LoadClassColors(const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static |
| LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1) | tensorNet | static |
| LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1) | tensorNet | static |
| LoadEngine(const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | |
| LoadEngine(char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | |
| LoadEngine(nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | |
| LoadEngine(const char *filename, char **stream, size_t *size) | tensorNet | |
| LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
| mAllowGPUFallback | tensorNet | protected |
| mBindings | tensorNet | protected |
| mCacheCalibrationPath | tensorNet | protected |
| mCacheEnginePath | tensorNet | protected |
| mChecksumPath | tensorNet | protected |
| mContext | tensorNet | protected |
| mDevice | tensorNet | protected |
| mEnableDebug | tensorNet | protected |
| mEnableProfiler | tensorNet | protected |
| mEngine | tensorNet | protected |
| mEventsCPU | tensorNet | protected |
| mEventsGPU | tensorNet | protected |
| mInfer | tensorNet | protected |
| mInputs | tensorNet | protected |
| mMaxBatchSize | tensorNet | protected |
| mMeanPath | tensorNet | protected |
| mModelFile | tensorNet | protected |
| mModelPath | tensorNet | protected |
| mModelType | tensorNet | protected |
| mOutputs | tensorNet | protected |
| mPrecision | tensorNet | protected |
| mProfilerQueriesDone | tensorNet | protected |
| mProfilerQueriesUsed | tensorNet | protected |
| mProfilerTimes | tensorNet | protected |
| mPrototxtPath | tensorNet | protected |
| mStream | tensorNet | protected |
| mWorkspaceSize | tensorNet | protected |
| PrintProfilerTimes() | tensorNet | inline |
| ProcessNetwork(bool sync=true) | tensorNet | protected |
| ProfileModel(const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize) | tensorNet | protected |
| PROFILER_BEGIN(profilerQuery query) | tensorNet | inlineprotected |
| PROFILER_END(profilerQuery query) | tensorNet | inlineprotected |
| PROFILER_QUERY(profilerQuery query) | tensorNet | inlineprotected |
| SelectPrecision(precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static |
| SetStream(cudaStream_t stream) | tensorNet | |
| tensorNet() | tensorNet | protected |
| ValidateEngine(const char *model_path, const char *cache_path, const char *checksum_path) | tensorNet | protected |
| ~tensorNet() | tensorNet | virtual |