![]() |
Jetson Inference
DNN Vision Library
|
Image recognition with classification networks, using TensorRT. More...
#include <imageNet.h>
Public Types | |
enum | NetworkType { CUSTOM, ALEXNET, GOOGLENET, GOOGLENET_12, RESNET_18, RESNET_50, RESNET_101, RESNET_152, VGG_16, VGG_19, INCEPTION_V4 } |
Network choice enumeration. More... | |
Public Member Functions | |
virtual | ~imageNet () |
Destroy. More... | |
int | Classify (float *rgba, uint32_t width, uint32_t height, float *confidence=NULL) |
Determine the maximum likelihood image class. More... | |
int | Classify (float *confidence=NULL) |
Determine the maximum likelihood image class. More... | |
bool | PreProcess (float *rgba, uint32_t width, uint32_t height) |
Perform pre-processing on the image to apply mean-value subtraction and to organize the data into NCHW format and BGR colorspace that the networks expect. More... | |
bool | Process () |
Process the network, without determining the classification argmax. More... | |
uint32_t | GetNumClasses () const |
Retrieve the number of image recognition classes (typically 1000) More... | |
const char * | GetClassDesc (uint32_t index) const |
Retrieve the description of a particular class. More... | |
const char * | GetClassSynset (uint32_t index) const |
Retrieve the class synset category of a particular class. More... | |
const char * | GetClassPath () const |
Retrieve the path to the file containing the class descriptions. More... | |
NetworkType | GetNetworkType () const |
Retrieve the network type (alexnet or googlenet) More... | |
const char * | GetNetworkName () const |
Retrieve a string describing the network name. More... | |
![]() | |
virtual | ~tensorNet () |
Destory. More... | |
bool | LoadNetwork (const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
Load a new network instance. More... | |
bool | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
Load a new network instance with multiple output layers. More... | |
bool | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
Load a new network instance (this variant is used for UFF models) More... | |
void | EnableLayerProfiler () |
Manually enable layer profiling times. More... | |
void | EnableDebug () |
Manually enable debug messages and synchronization. More... | |
bool | AllowGPUFallback () const |
Return true if GPU fallback is enabled. More... | |
deviceType | GetDevice () const |
Retrieve the device being used for execution. More... | |
precisionType | GetPrecision () const |
Retrieve the type of precision being used. More... | |
bool | IsPrecision (precisionType type) const |
Check if a particular precision is being used. More... | |
cudaStream_t | GetStream () const |
Retrieve the stream that the device is operating on. More... | |
cudaStream_t | CreateStream (bool nonBlocking=true) |
Create and use a new stream for execution. More... | |
void | SetStream (cudaStream_t stream) |
Set the stream that the device is operating on. More... | |
const char * | GetPrototxtPath () const |
Retrieve the path to the network prototxt file. More... | |
const char * | GetModelPath () const |
Retrieve the path to the network model file. More... | |
modelType | GetModelType () const |
Retrieve the format of the network model. More... | |
bool | IsModelType (modelType type) const |
Return true if the model is of the specified format. More... | |
float | GetNetworkTime () |
Retrieve the network runtime (in milliseconds). More... | |
float2 | GetProfilerTime (profilerQuery query) |
Retrieve the profiler runtime (in milliseconds). More... | |
float | GetProfilerTime (profilerQuery query, profilerDevice device) |
Retrieve the profiler runtime (in milliseconds). More... | |
void | PrintProfilerTimes () |
Print the profiler times (in millseconds). More... | |
Static Public Member Functions | |
static NetworkType | NetworkTypeFromStr (const char *model_name) |
Parse a string to one of the built-in pretrained models. More... | |
static const char * | NetworkTypeToStr (NetworkType network) |
Convert a NetworkType enum to a string. More... | |
static imageNet * | Create (NetworkType networkType=GOOGLENET, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) |
Load a new network instance. More... | |
static imageNet * | Create (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_labels, const char *input=IMAGENET_DEFAULT_INPUT, const char *output=IMAGENET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) |
Load a new network instance. More... | |
static imageNet * | Create (int argc, char **argv) |
Load a new network instance by parsing the command line. More... | |
static const char * | Usage () |
Usage string for command line arguments to Create() More... | |
![]() | |
static precisionType | FindFastestPrecision (deviceType device=DEVICE_GPU, bool allowInt8=true) |
Determine the fastest native precision on a device. More... | |
static std::vector< precisionType > | DetectNativePrecisions (deviceType device=DEVICE_GPU) |
Detect the precisions supported natively on a device. More... | |
static bool | DetectNativePrecision (const std::vector< precisionType > &nativeTypes, precisionType type) |
Detect if a particular precision is supported natively. More... | |
static bool | DetectNativePrecision (precisionType precision, deviceType device=DEVICE_GPU) |
Detect if a particular precision is supported natively. More... | |
Protected Member Functions | |
imageNet () | |
bool | init (NetworkType networkType, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) |
bool | init (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) |
bool | loadClassInfo (const char *filename) |
![]() | |
tensorNet () | |
Constructor. More... | |
bool | ProfileModel (const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream) |
Create and output an optimized network model. More... | |
void | PROFILER_BEGIN (profilerQuery query) |
Begin a profiling query, before network is run. More... | |
void | PROFILER_END (profilerQuery query) |
End a profiling query, after the network is run. More... | |
bool | PROFILER_QUERY (profilerQuery query) |
Query the CUDA part of a profiler query. More... | |
Image recognition with classification networks, using TensorRT.
Network choice enumeration.
|
virtual |
Destroy.
|
protected |
int imageNet::Classify | ( | float * | rgba, |
uint32_t | width, | ||
uint32_t | height, | ||
float * | confidence = NULL |
||
) |
Determine the maximum likelihood image class.
This function performs pre-processing to the image (apply mean-value subtraction and NCHW format),
rgba | float4 input image in CUDA device memory. |
width | width of the input image in pixels. |
height | height of the input image in pixels. |
confidence | optional pointer to float filled with confidence value. |
int imageNet::Classify | ( | float * | confidence = NULL | ) |
Determine the maximum likelihood image class.
confidence | optional pointer to float filled with confidence value. |
|
static |
Load a new network instance.
|
static |
Load a new network instance.
prototxt_path | File path to the deployable network prototxt |
model_path | File path to the caffemodel |
mean_binary | File path to the mean value binary proto (can be NULL) |
class_labels | File path to list of class name labels |
input | Name of the input layer blob. |
output | Name of the output layer blob. |
maxBatchSize | The maximum batch size that the network will support and be optimized for. |
|
static |
Load a new network instance by parsing the command line.
|
inline |
Retrieve the description of a particular class.
|
inline |
Retrieve the path to the file containing the class descriptions.
|
inline |
Retrieve the class synset category of a particular class.
|
inline |
Retrieve a string describing the network name.
|
inline |
Retrieve the network type (alexnet or googlenet)
|
inline |
Retrieve the number of image recognition classes (typically 1000)
|
protected |
|
protected |
|
protected |
|
static |
Parse a string to one of the built-in pretrained models.
Valid names are "alexnet", "googlenet", "googlenet-12", or "googlenet_12", ect.
|
static |
Convert a NetworkType enum to a string.
bool imageNet::PreProcess | ( | float * | rgba, |
uint32_t | width, | ||
uint32_t | height | ||
) |
Perform pre-processing on the image to apply mean-value subtraction and to organize the data into NCHW format and BGR colorspace that the networks expect.
After calling PreProcess(), you can call Classify() without supplying all the parameters.
bool imageNet::Process | ( | ) |
Process the network, without determining the classification argmax.
To perform the actual classification via post-processing, Classify() should be used instead.
|
inlinestatic |
Usage string for command line arguments to Create()
|
protected |
|
protected |
|
protected |
|
protected |
|
protected |
|
protected |