|
| virtual | ~imageNet () |
| | Destroy. More...
|
| |
| int | Classify (float *rgba, uint32_t width, uint32_t height, float *confidence=NULL) |
| | Determine the maximum likelihood image class. More...
|
| |
| int | Classify (float *confidence=NULL) |
| | Determine the maximum likelihood image class. More...
|
| |
| bool | PreProcess (float *rgba, uint32_t width, uint32_t height) |
| | Perform pre-processing on the image to apply mean-value subtraction and to organize the data into NCHW format and BGR colorspace that the networks expect. More...
|
| |
| bool | Process () |
| | Process the network, without determining the classification argmax. More...
|
| |
| uint32_t | GetNumClasses () const |
| | Retrieve the number of image recognition classes (typically 1000) More...
|
| |
| const char * | GetClassDesc (uint32_t index) const |
| | Retrieve the description of a particular class. More...
|
| |
| const char * | GetClassSynset (uint32_t index) const |
| | Retrieve the class synset category of a particular class. More...
|
| |
| const char * | GetClassPath () const |
| | Retrieve the path to the file containing the class descriptions. More...
|
| |
| NetworkType | GetNetworkType () const |
| | Retrieve the network type (alexnet or googlenet) More...
|
| |
| const char * | GetNetworkName () const |
| | Retrieve a string describing the network name. More...
|
| |
| virtual | ~tensorNet () |
| | Destory. More...
|
| |
| bool | LoadNetwork (const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
| | Load a new network instance. More...
|
| |
| bool | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
| | Load a new network instance with multiple output layers. More...
|
| |
| bool | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
| | Load a new network instance (this variant is used for UFF models) More...
|
| |
| void | EnableLayerProfiler () |
| | Manually enable layer profiling times. More...
|
| |
| void | EnableDebug () |
| | Manually enable debug messages and synchronization. More...
|
| |
| bool | AllowGPUFallback () const |
| | Return true if GPU fallback is enabled. More...
|
| |
| deviceType | GetDevice () const |
| | Retrieve the device being used for execution. More...
|
| |
| precisionType | GetPrecision () const |
| | Retrieve the type of precision being used. More...
|
| |
| bool | IsPrecision (precisionType type) const |
| | Check if a particular precision is being used. More...
|
| |
| cudaStream_t | GetStream () const |
| | Retrieve the stream that the device is operating on. More...
|
| |
| cudaStream_t | CreateStream (bool nonBlocking=true) |
| | Create and use a new stream for execution. More...
|
| |
| void | SetStream (cudaStream_t stream) |
| | Set the stream that the device is operating on. More...
|
| |
| const char * | GetPrototxtPath () const |
| | Retrieve the path to the network prototxt file. More...
|
| |
| const char * | GetModelPath () const |
| | Retrieve the path to the network model file. More...
|
| |
| modelType | GetModelType () const |
| | Retrieve the format of the network model. More...
|
| |
| bool | IsModelType (modelType type) const |
| | Return true if the model is of the specified format. More...
|
| |
| float | GetNetworkTime () |
| | Retrieve the network runtime (in milliseconds). More...
|
| |
| float2 | GetProfilerTime (profilerQuery query) |
| | Retrieve the profiler runtime (in milliseconds). More...
|
| |
| float | GetProfilerTime (profilerQuery query, profilerDevice device) |
| | Retrieve the profiler runtime (in milliseconds). More...
|
| |
| void | PrintProfilerTimes () |
| | Print the profiler times (in millseconds). More...
|
| |
|
| static NetworkType | NetworkTypeFromStr (const char *model_name) |
| | Parse a string to one of the built-in pretrained models. More...
|
| |
| static const char * | NetworkTypeToStr (NetworkType network) |
| | Convert a NetworkType enum to a string. More...
|
| |
| static imageNet * | Create (NetworkType networkType=GOOGLENET, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) |
| | Load a new network instance. More...
|
| |
| static imageNet * | Create (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_labels, const char *input=IMAGENET_DEFAULT_INPUT, const char *output=IMAGENET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) |
| | Load a new network instance. More...
|
| |
| static imageNet * | Create (int argc, char **argv) |
| | Load a new network instance by parsing the command line. More...
|
| |
| static const char * | Usage () |
| | Usage string for command line arguments to Create() More...
|
| |
| static precisionType | FindFastestPrecision (deviceType device=DEVICE_GPU, bool allowInt8=true) |
| | Determine the fastest native precision on a device. More...
|
| |
| static std::vector< precisionType > | DetectNativePrecisions (deviceType device=DEVICE_GPU) |
| | Detect the precisions supported natively on a device. More...
|
| |
| static bool | DetectNativePrecision (const std::vector< precisionType > &nativeTypes, precisionType type) |
| | Detect if a particular precision is supported natively. More...
|
| |
| static bool | DetectNativePrecision (precisionType precision, deviceType device=DEVICE_GPU) |
| | Detect if a particular precision is supported natively. More...
|
| |
|
| | imageNet () |
| |
| bool | init (NetworkType networkType, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) |
| |
| bool | init (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) |
| |
| bool | loadClassInfo (const char *filename) |
| |
| | tensorNet () |
| | Constructor. More...
|
| |
| bool | ProfileModel (const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream) |
| | Create and output an optimized network model. More...
|
| |
| void | PROFILER_BEGIN (profilerQuery query) |
| | Begin a profiling query, before network is run. More...
|
| |
| void | PROFILER_END (profilerQuery query) |
| | End a profiling query, after the network is run. More...
|
| |
| bool | PROFILER_QUERY (profilerQuery query) |
| | Query the CUDA part of a profiler query. More...
|
| |
Image recognition with classification networks, using TensorRT.