Jetson Inference
DNN Vision Library

Image recognition with classification networks, using TensorRT. More...

#include <imageNet.h>

Inheritance diagram for imageNet:
tensorNet

Public Types

enum  NetworkType {
  CUSTOM, ALEXNET, GOOGLENET, GOOGLENET_12,
  RESNET_18, RESNET_50, RESNET_101, RESNET_152,
  VGG_16, VGG_19, INCEPTION_V4
}
 Network choice enumeration. More...
 

Public Member Functions

virtual ~imageNet ()
 Destroy. More...
 
template<typename T >
int Classify (T *image, uint32_t width, uint32_t height, float *confidence=NULL)
 Determine the maximum likelihood image class. More...
 
int Classify (void *image, uint32_t width, uint32_t height, imageFormat format, float *confidence=NULL)
 Determine the maximum likelihood image class. More...
 
int Classify (float *rgba, uint32_t width, uint32_t height, float *confidence=NULL, imageFormat format=IMAGE_RGBA32F)
 Determine the maximum likelihood image class. More...
 
uint32_t GetNumClasses () const
 Retrieve the number of image recognition classes (typically 1000) More...
 
const char * GetClassDesc (uint32_t index) const
 Retrieve the description of a particular class. More...
 
const char * GetClassSynset (uint32_t index) const
 Retrieve the class synset category of a particular class. More...
 
const char * GetClassPath () const
 Retrieve the path to the file containing the class descriptions. More...
 
NetworkType GetNetworkType () const
 Retrieve the network type (alexnet or googlenet) More...
 
const char * GetNetworkName () const
 Retrieve a string describing the network name. More...
 
- Public Member Functions inherited from tensorNet
virtual ~tensorNet ()
 Destory. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance with multiple output layers. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance with multiple input layers. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance (this variant is used for UFF models) More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance with multiple input layers (used for UFF models) More...
 
bool LoadEngine (const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)
 Load a network instance from a serialized engine plan file. More...
 
bool LoadEngine (char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)
 Load a network instance from a serialized engine plan file. More...
 
bool LoadEngine (nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)
 Load network resources from an existing TensorRT engine instance. More...
 
bool LoadEngine (const char *filename, char **stream, size_t *size)
 Load a serialized engine plan file into memory. More...
 
void EnableLayerProfiler ()
 Manually enable layer profiling times. More...
 
void EnableDebug ()
 Manually enable debug messages and synchronization. More...
 
bool AllowGPUFallback () const
 Return true if GPU fallback is enabled. More...
 
deviceType GetDevice () const
 Retrieve the device being used for execution. More...
 
precisionType GetPrecision () const
 Retrieve the type of precision being used. More...
 
bool IsPrecision (precisionType type) const
 Check if a particular precision is being used. More...
 
cudaStream_t GetStream () const
 Retrieve the stream that the device is operating on. More...
 
cudaStream_t CreateStream (bool nonBlocking=true)
 Create and use a new stream for execution. More...
 
void SetStream (cudaStream_t stream)
 Set the stream that the device is operating on. More...
 
const char * GetPrototxtPath () const
 Retrieve the path to the network prototxt file. More...
 
const char * GetModelPath () const
 Retrieve the path to the network model file. More...
 
modelType GetModelType () const
 Retrieve the format of the network model. More...
 
bool IsModelType (modelType type) const
 Return true if the model is of the specified format. More...
 
uint32_t GetInputLayers () const
 Retrieve the number of input layers to the network. More...
 
uint32_t GetOutputLayers () const
 Retrieve the number of output layers to the network. More...
 
Dims3 GetInputDims (uint32_t layer=0) const
 Retrieve the dimensions of network input layer. More...
 
uint32_t GetInputWidth (uint32_t layer=0) const
 Retrieve the width of network input layer. More...
 
uint32_t GetInputHeight (uint32_t layer=0) const
 Retrieve the height of network input layer. More...
 
uint32_t GetInputSize (uint32_t layer=0) const
 Retrieve the size (in bytes) of network input layer. More...
 
Dims3 GetOutputDims (uint32_t layer=0) const
 Retrieve the dimensions of network output layer. More...
 
uint32_t GetOutputWidth (uint32_t layer=0) const
 Retrieve the width of network output layer. More...
 
uint32_t GetOutputHeight (uint32_t layer=0) const
 Retrieve the height of network output layer. More...
 
uint32_t GetOutputSize (uint32_t layer=0) const
 Retrieve the size (in bytes) of network output layer. More...
 
float GetNetworkFPS ()
 Retrieve the network frames per second (FPS). More...
 
float GetNetworkTime ()
 Retrieve the network runtime (in milliseconds). More...
 
float2 GetProfilerTime (profilerQuery query)
 Retrieve the profiler runtime (in milliseconds). More...
 
float GetProfilerTime (profilerQuery query, profilerDevice device)
 Retrieve the profiler runtime (in milliseconds). More...
 
void PrintProfilerTimes ()
 Print the profiler times (in millseconds). More...
 

Static Public Member Functions

static NetworkType NetworkTypeFromStr (const char *model_name)
 Parse a string to one of the built-in pretrained models. More...
 
static const char * NetworkTypeToStr (NetworkType network)
 Convert a NetworkType enum to a string. More...
 
static imageNetCreate (NetworkType networkType=GOOGLENET, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)
 Load a new network instance. More...
 
static imageNetCreate (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_labels, const char *input=IMAGENET_DEFAULT_INPUT, const char *output=IMAGENET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)
 Load a new network instance. More...
 
static imageNetCreate (int argc, char **argv)
 Load a new network instance by parsing the command line. More...
 
static imageNetCreate (const commandLine &cmdLine)
 Load a new network instance by parsing the command line. More...
 
static const char * Usage ()
 Usage string for command line arguments to Create() More...
 
static bool LoadClassInfo (const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1)
 Load class descriptions from a label file. More...
 
static bool LoadClassInfo (const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1)
 Load class descriptions and synset strings from a label file. More...
 
- Static Public Member Functions inherited from tensorNet
static precisionType SelectPrecision (precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true)
 Resolve a desired precision to a specific one that's available. More...
 
static precisionType FindFastestPrecision (deviceType device=DEVICE_GPU, bool allowInt8=true)
 Determine the fastest native precision on a device. More...
 
static std::vector< precisionTypeDetectNativePrecisions (deviceType device=DEVICE_GPU)
 Detect the precisions supported natively on a device. More...
 
static bool DetectNativePrecision (const std::vector< precisionType > &nativeTypes, precisionType type)
 Detect if a particular precision is supported natively. More...
 
static bool DetectNativePrecision (precisionType precision, deviceType device=DEVICE_GPU)
 Detect if a particular precision is supported natively. More...
 

Protected Member Functions

 imageNet ()
 
int Classify (float *confidence=NULL)
 
bool PreProcess (void *image, uint32_t width, uint32_t height, imageFormat format)
 
bool Process ()
 
bool init (NetworkType networkType, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)
 
bool init (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)
 
bool loadClassInfo (const char *filename, int expectedClasses=-1)
 
- Protected Member Functions inherited from tensorNet
 tensorNet ()
 Constructor. More...
 
bool ProcessNetwork (bool sync=true)
 Execute processing of the network. More...
 
bool ProfileModel (const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize)
 Create and output an optimized network model. More...
 
bool ConfigureBuilder (nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator)
 Configure builder options. More...
 
void PROFILER_BEGIN (profilerQuery query)
 Begin a profiling query, before network is run. More...
 
void PROFILER_END (profilerQuery query)
 End a profiling query, after the network is run. More...
 
bool PROFILER_QUERY (profilerQuery query)
 Query the CUDA part of a profiler query. More...
 

Protected Attributes

uint32_t mOutputClasses
 
std::vector< std::string > mClassSynset
 
std::vector< std::string > mClassDesc
 
std::string mClassPath
 
NetworkType mNetworkType
 
- Protected Attributes inherited from tensorNet
tensorNet::Logger gLogger
 
tensorNet::Profiler gProfiler
 
std::string mPrototxtPath
 
std::string mModelPath
 
std::string mMeanPath
 
std::string mCacheEnginePath
 
std::string mCacheCalibrationPath
 
deviceType mDevice
 
precisionType mPrecision
 
modelType mModelType
 
cudaStream_t mStream
 
cudaEvent_t mEventsGPU [PROFILER_TOTAL *2]
 
timespec mEventsCPU [PROFILER_TOTAL *2]
 
nvinfer1::IRuntime * mInfer
 
nvinfer1::ICudaEngine * mEngine
 
nvinfer1::IExecutionContext * mContext
 
float2 mProfilerTimes [PROFILER_TOTAL+1]
 
uint32_t mProfilerQueriesUsed
 
uint32_t mProfilerQueriesDone
 
uint32_t mWorkspaceSize
 
uint32_t mMaxBatchSize
 
bool mEnableProfiler
 
bool mEnableDebug
 
bool mAllowGPUFallback
 
void ** mBindings
 
std::vector< layerInfomInputs
 
std::vector< layerInfomOutputs
 

Detailed Description

Image recognition with classification networks, using TensorRT.

Member Enumeration Documentation

◆ NetworkType

Network choice enumeration.

Enumerator
CUSTOM 

Custom model provided by the user.

ALEXNET 

AlexNet trained on 1000-class ILSVRC12.

GOOGLENET 

GoogleNet trained 1000-class ILSVRC12.

GOOGLENET_12 

GoogleNet trained on 12-class subset of ImageNet ILSVRC12 from the tutorial.

RESNET_18 

ResNet-18 trained on 1000-class ILSVRC15.

RESNET_50 

ResNet-50 trained on 1000-class ILSVRC15.

RESNET_101 

ResNet-101 trained on 1000-class ILSVRC15.

RESNET_152 

ResNet-50 trained on 1000-class ILSVRC15.

VGG_16 

VGG-16 trained on 1000-class ILSVRC14.

VGG_19 

VGG-19 trained on 1000-class ILSVRC14.

INCEPTION_V4 

Inception-v4 trained on 1000-class ILSVRC12.

Constructor & Destructor Documentation

◆ ~imageNet()

virtual imageNet::~imageNet ( )
virtual

Destroy.

◆ imageNet()

imageNet::imageNet ( )
protected

Member Function Documentation

◆ Classify() [1/4]

template<typename T >
int imageNet::Classify ( T *  image,
uint32_t  width,
uint32_t  height,
float *  confidence = NULL 
)
inline

Determine the maximum likelihood image class.

This function performs pre-processing to the image (apply mean-value subtraction and NCHW format),

See also
PreProcess()
Parameters
rgbainput image in CUDA device memory.
widthwidth of the input image in pixels.
heightheight of the input image in pixels.
confidenceoptional pointer to float filled with confidence value.
Returns
Index of the maximum class, or -1 on error.

◆ Classify() [2/4]

int imageNet::Classify ( void *  image,
uint32_t  width,
uint32_t  height,
imageFormat  format,
float *  confidence = NULL 
)

Determine the maximum likelihood image class.

This function performs pre-processing to the image (apply mean-value subtraction and NCHW format),

See also
PreProcess()
Parameters
rgbainput image in CUDA device memory.
widthwidth of the input image in pixels.
heightheight of the input image in pixels.
confidenceoptional pointer to float filled with confidence value.
Returns
Index of the maximum class, or -1 on error.

◆ Classify() [3/4]

int imageNet::Classify ( float *  rgba,
uint32_t  width,
uint32_t  height,
float *  confidence = NULL,
imageFormat  format = IMAGE_RGBA32F 
)

Determine the maximum likelihood image class.

This function performs pre-processing to the image (apply mean-value subtraction and NCHW format),

See also
PreProcess()
Deprecated:
this overload of Classify() provides legacy compatibility with float* type (RGBA32F).
Parameters
rgbafloat4 input image in CUDA device memory.
widthwidth of the input image in pixels.
heightheight of the input image in pixels.
confidenceoptional pointer to float filled with confidence value.
Returns
Index of the maximum class, or -1 on error.

◆ Classify() [4/4]

int imageNet::Classify ( float *  confidence = NULL)
protected

◆ Create() [1/4]

static imageNet* imageNet::Create ( NetworkType  networkType = GOOGLENET,
uint32_t  maxBatchSize = DEFAULT_MAX_BATCH_SIZE,
precisionType  precision = TYPE_FASTEST,
deviceType  device = DEVICE_GPU,
bool  allowGPUFallback = true 
)
static

Load a new network instance.

◆ Create() [2/4]

static imageNet* imageNet::Create ( const char *  prototxt_path,
const char *  model_path,
const char *  mean_binary,
const char *  class_labels,
const char *  input = IMAGENET_DEFAULT_INPUT,
const char *  output = IMAGENET_DEFAULT_OUTPUT,
uint32_t  maxBatchSize = DEFAULT_MAX_BATCH_SIZE,
precisionType  precision = TYPE_FASTEST,
deviceType  device = DEVICE_GPU,
bool  allowGPUFallback = true 
)
static

Load a new network instance.

Parameters
prototxt_pathFile path to the deployable network prototxt
model_pathFile path to the caffemodel
mean_binaryFile path to the mean value binary proto (can be NULL)
class_labelsFile path to list of class name labels
inputName of the input layer blob.
outputName of the output layer blob.
maxBatchSizeThe maximum batch size that the network will support and be optimized for.

◆ Create() [3/4]

static imageNet* imageNet::Create ( int  argc,
char **  argv 
)
static

Load a new network instance by parsing the command line.

◆ Create() [4/4]

static imageNet* imageNet::Create ( const commandLine cmdLine)
static

Load a new network instance by parsing the command line.

◆ GetClassDesc()

const char* imageNet::GetClassDesc ( uint32_t  index) const
inline

Retrieve the description of a particular class.

◆ GetClassPath()

const char* imageNet::GetClassPath ( ) const
inline

Retrieve the path to the file containing the class descriptions.

◆ GetClassSynset()

const char* imageNet::GetClassSynset ( uint32_t  index) const
inline

Retrieve the class synset category of a particular class.

◆ GetNetworkName()

const char* imageNet::GetNetworkName ( ) const
inline

Retrieve a string describing the network name.

◆ GetNetworkType()

NetworkType imageNet::GetNetworkType ( ) const
inline

Retrieve the network type (alexnet or googlenet)

◆ GetNumClasses()

uint32_t imageNet::GetNumClasses ( ) const
inline

Retrieve the number of image recognition classes (typically 1000)

◆ init() [1/2]

bool imageNet::init ( NetworkType  networkType,
uint32_t  maxBatchSize,
precisionType  precision,
deviceType  device,
bool  allowGPUFallback 
)
protected

◆ init() [2/2]

bool imageNet::init ( const char *  prototxt_path,
const char *  model_path,
const char *  mean_binary,
const char *  class_path,
const char *  input,
const char *  output,
uint32_t  maxBatchSize,
precisionType  precision,
deviceType  device,
bool  allowGPUFallback 
)
protected

◆ LoadClassInfo() [1/2]

static bool imageNet::LoadClassInfo ( const char *  filename,
std::vector< std::string > &  descriptions,
int  expectedClasses = -1 
)
static

Load class descriptions from a label file.

◆ LoadClassInfo() [2/2]

static bool imageNet::LoadClassInfo ( const char *  filename,
std::vector< std::string > &  descriptions,
std::vector< std::string > &  synsets,
int  expectedClasses = -1 
)
static

Load class descriptions and synset strings from a label file.

◆ loadClassInfo()

bool imageNet::loadClassInfo ( const char *  filename,
int  expectedClasses = -1 
)
protected

◆ NetworkTypeFromStr()

static NetworkType imageNet::NetworkTypeFromStr ( const char *  model_name)
static

Parse a string to one of the built-in pretrained models.

Valid names are "alexnet", "googlenet", "googlenet-12", or "googlenet_12", ect.

Returns
one of the imageNet::NetworkType enums, or imageNet::CUSTOM on invalid string.

◆ NetworkTypeToStr()

static const char* imageNet::NetworkTypeToStr ( NetworkType  network)
static

Convert a NetworkType enum to a string.

◆ PreProcess()

bool imageNet::PreProcess ( void *  image,
uint32_t  width,
uint32_t  height,
imageFormat  format 
)
protected

◆ Process()

bool imageNet::Process ( )
protected

◆ Usage()

static const char* imageNet::Usage ( )
inlinestatic

Usage string for command line arguments to Create()

Member Data Documentation

◆ mClassDesc

std::vector<std::string> imageNet::mClassDesc
protected

◆ mClassPath

std::string imageNet::mClassPath
protected

◆ mClassSynset

std::vector<std::string> imageNet::mClassSynset
protected

◆ mNetworkType

NetworkType imageNet::mNetworkType
protected

◆ mOutputClasses

uint32_t imageNet::mOutputClasses
protected

The documentation for this class was generated from the following file: