Jetson Inference
DNN Vision Library

Image recognition with classification networks, using TensorRT. More...

#include <imageNet.h>

Inheritance diagram for imageNet:
tensorNet

Public Types

enum  NetworkType {
  CUSTOM, ALEXNET, GOOGLENET, GOOGLENET_12,
  RESNET_18, RESNET_50, RESNET_101, RESNET_152,
  VGG_16, VGG_19, INCEPTION_V4
}
 Network choice enumeration. More...
 

Public Member Functions

virtual ~imageNet ()
 Destroy. More...
 
int Classify (float *rgba, uint32_t width, uint32_t height, float *confidence=NULL)
 Determine the maximum likelihood image class. More...
 
int Classify (float *confidence=NULL)
 Determine the maximum likelihood image class. More...
 
bool PreProcess (float *rgba, uint32_t width, uint32_t height)
 Perform pre-processing on the image to apply mean-value subtraction and to organize the data into NCHW format and BGR colorspace that the networks expect. More...
 
bool Process ()
 Process the network, without determining the classification argmax. More...
 
uint32_t GetNumClasses () const
 Retrieve the number of image recognition classes (typically 1000) More...
 
const char * GetClassDesc (uint32_t index) const
 Retrieve the description of a particular class. More...
 
const char * GetClassSynset (uint32_t index) const
 Retrieve the class synset category of a particular class. More...
 
const char * GetClassPath () const
 Retrieve the path to the file containing the class descriptions. More...
 
NetworkType GetNetworkType () const
 Retrieve the network type (alexnet or googlenet) More...
 
const char * GetNetworkName () const
 Retrieve a string describing the network name. More...
 
- Public Member Functions inherited from tensorNet
virtual ~tensorNet ()
 Destory. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance with multiple output layers. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance (this variant is used for UFF models) More...
 
void EnableLayerProfiler ()
 Manually enable layer profiling times. More...
 
void EnableDebug ()
 Manually enable debug messages and synchronization. More...
 
bool AllowGPUFallback () const
 Return true if GPU fallback is enabled. More...
 
deviceType GetDevice () const
 Retrieve the device being used for execution. More...
 
precisionType GetPrecision () const
 Retrieve the type of precision being used. More...
 
bool IsPrecision (precisionType type) const
 Check if a particular precision is being used. More...
 
cudaStream_t GetStream () const
 Retrieve the stream that the device is operating on. More...
 
cudaStream_t CreateStream (bool nonBlocking=true)
 Create and use a new stream for execution. More...
 
void SetStream (cudaStream_t stream)
 Set the stream that the device is operating on. More...
 
const char * GetPrototxtPath () const
 Retrieve the path to the network prototxt file. More...
 
const char * GetModelPath () const
 Retrieve the path to the network model file. More...
 
modelType GetModelType () const
 Retrieve the format of the network model. More...
 
bool IsModelType (modelType type) const
 Return true if the model is of the specified format. More...
 
float GetNetworkTime ()
 Retrieve the network runtime (in milliseconds). More...
 
float2 GetProfilerTime (profilerQuery query)
 Retrieve the profiler runtime (in milliseconds). More...
 
float GetProfilerTime (profilerQuery query, profilerDevice device)
 Retrieve the profiler runtime (in milliseconds). More...
 
void PrintProfilerTimes ()
 Print the profiler times (in millseconds). More...
 

Static Public Member Functions

static NetworkType NetworkTypeFromStr (const char *model_name)
 Parse a string to one of the built-in pretrained models. More...
 
static const char * NetworkTypeToStr (NetworkType network)
 Convert a NetworkType enum to a string. More...
 
static imageNetCreate (NetworkType networkType=GOOGLENET, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)
 Load a new network instance. More...
 
static imageNetCreate (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_labels, const char *input=IMAGENET_DEFAULT_INPUT, const char *output=IMAGENET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)
 Load a new network instance. More...
 
static imageNetCreate (int argc, char **argv)
 Load a new network instance by parsing the command line. More...
 
static const char * Usage ()
 Usage string for command line arguments to Create() More...
 
- Static Public Member Functions inherited from tensorNet
static precisionType FindFastestPrecision (deviceType device=DEVICE_GPU, bool allowInt8=true)
 Determine the fastest native precision on a device. More...
 
static std::vector< precisionTypeDetectNativePrecisions (deviceType device=DEVICE_GPU)
 Detect the precisions supported natively on a device. More...
 
static bool DetectNativePrecision (const std::vector< precisionType > &nativeTypes, precisionType type)
 Detect if a particular precision is supported natively. More...
 
static bool DetectNativePrecision (precisionType precision, deviceType device=DEVICE_GPU)
 Detect if a particular precision is supported natively. More...
 

Protected Member Functions

 imageNet ()
 
bool init (NetworkType networkType, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)
 
bool init (const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)
 
bool loadClassInfo (const char *filename)
 
- Protected Member Functions inherited from tensorNet
 tensorNet ()
 Constructor. More...
 
bool ProfileModel (const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream)
 Create and output an optimized network model. More...
 
void PROFILER_BEGIN (profilerQuery query)
 Begin a profiling query, before network is run. More...
 
void PROFILER_END (profilerQuery query)
 End a profiling query, after the network is run. More...
 
bool PROFILER_QUERY (profilerQuery query)
 Query the CUDA part of a profiler query. More...
 

Protected Attributes

uint32_t mCustomClasses
 
uint32_t mOutputClasses
 
std::vector< std::string > mClassSynset
 
std::vector< std::string > mClassDesc
 
std::string mClassPath
 
NetworkType mNetworkType
 
- Protected Attributes inherited from tensorNet
tensorNet::Logger gLogger
 
tensorNet::Profiler gProfiler
 
std::string mPrototxtPath
 
std::string mModelPath
 
std::string mMeanPath
 
std::string mInputBlobName
 
std::string mCacheEnginePath
 
std::string mCacheCalibrationPath
 
deviceType mDevice
 
precisionType mPrecision
 
modelType mModelType
 
cudaStream_t mStream
 
cudaEvent_t mEventsGPU [PROFILER_TOTAL *2]
 
timespec mEventsCPU [PROFILER_TOTAL *2]
 
nvinfer1::IRuntime * mInfer
 
nvinfer1::ICudaEngine * mEngine
 
nvinfer1::IExecutionContext * mContext
 
uint32_t mWidth
 
uint32_t mHeight
 
uint32_t mInputSize
 
float * mInputCPU
 
float * mInputCUDA
 
float2 mProfilerTimes [PROFILER_TOTAL+1]
 
uint32_t mProfilerQueriesUsed
 
uint32_t mProfilerQueriesDone
 
uint32_t mMaxBatchSize
 
bool mEnableProfiler
 
bool mEnableDebug
 
bool mAllowGPUFallback
 
Dims3 mInputDims
 
std::vector< outputLayermOutputs
 

Detailed Description

Image recognition with classification networks, using TensorRT.

Member Enumeration Documentation

◆ NetworkType

Network choice enumeration.

Enumerator
CUSTOM 

Custom model provided by the user.

ALEXNET 

AlexNet trained on 1000-class ILSVRC12.

GOOGLENET 

GoogleNet trained 1000-class ILSVRC12.

GOOGLENET_12 

GoogleNet trained on 12-class subset of ImageNet ILSVRC12 from the tutorial.

RESNET_18 

ResNet-18 trained on 1000-class ILSVRC15.

RESNET_50 

ResNet-50 trained on 1000-class ILSVRC15.

RESNET_101 

ResNet-101 trained on 1000-class ILSVRC15.

RESNET_152 

ResNet-50 trained on 1000-class ILSVRC15.

VGG_16 

VGG-16 trained on 1000-class ILSVRC14.

VGG_19 

VGG-19 trained on 1000-class ILSVRC14.

INCEPTION_V4 

Inception-v4 trained on 1000-class ILSVRC12.

Constructor & Destructor Documentation

◆ ~imageNet()

virtual imageNet::~imageNet ( )
virtual

Destroy.

◆ imageNet()

imageNet::imageNet ( )
protected

Member Function Documentation

◆ Classify() [1/2]

int imageNet::Classify ( float *  rgba,
uint32_t  width,
uint32_t  height,
float *  confidence = NULL 
)

Determine the maximum likelihood image class.

This function performs pre-processing to the image (apply mean-value subtraction and NCHW format),

See also
PreProcess()
Parameters
rgbafloat4 input image in CUDA device memory.
widthwidth of the input image in pixels.
heightheight of the input image in pixels.
confidenceoptional pointer to float filled with confidence value.
Returns
Index of the maximum class, or -1 on error.

◆ Classify() [2/2]

int imageNet::Classify ( float *  confidence = NULL)

Determine the maximum likelihood image class.

Note
before calling this function, you must call PreProcess() with the image.
Parameters
confidenceoptional pointer to float filled with confidence value.
Returns
Index of the maximum class, or -1 on error.

◆ Create() [1/3]

static imageNet* imageNet::Create ( NetworkType  networkType = GOOGLENET,
uint32_t  maxBatchSize = DEFAULT_MAX_BATCH_SIZE,
precisionType  precision = TYPE_FASTEST,
deviceType  device = DEVICE_GPU,
bool  allowGPUFallback = true 
)
static

Load a new network instance.

◆ Create() [2/3]

static imageNet* imageNet::Create ( const char *  prototxt_path,
const char *  model_path,
const char *  mean_binary,
const char *  class_labels,
const char *  input = IMAGENET_DEFAULT_INPUT,
const char *  output = IMAGENET_DEFAULT_OUTPUT,
uint32_t  maxBatchSize = DEFAULT_MAX_BATCH_SIZE,
precisionType  precision = TYPE_FASTEST,
deviceType  device = DEVICE_GPU,
bool  allowGPUFallback = true 
)
static

Load a new network instance.

Parameters
prototxt_pathFile path to the deployable network prototxt
model_pathFile path to the caffemodel
mean_binaryFile path to the mean value binary proto (can be NULL)
class_labelsFile path to list of class name labels
inputName of the input layer blob.
outputName of the output layer blob.
maxBatchSizeThe maximum batch size that the network will support and be optimized for.

◆ Create() [3/3]

static imageNet* imageNet::Create ( int  argc,
char **  argv 
)
static

Load a new network instance by parsing the command line.

◆ GetClassDesc()

const char* imageNet::GetClassDesc ( uint32_t  index) const
inline

Retrieve the description of a particular class.

◆ GetClassPath()

const char* imageNet::GetClassPath ( ) const
inline

Retrieve the path to the file containing the class descriptions.

◆ GetClassSynset()

const char* imageNet::GetClassSynset ( uint32_t  index) const
inline

Retrieve the class synset category of a particular class.

◆ GetNetworkName()

const char* imageNet::GetNetworkName ( ) const
inline

Retrieve a string describing the network name.

◆ GetNetworkType()

NetworkType imageNet::GetNetworkType ( ) const
inline

Retrieve the network type (alexnet or googlenet)

◆ GetNumClasses()

uint32_t imageNet::GetNumClasses ( ) const
inline

Retrieve the number of image recognition classes (typically 1000)

◆ init() [1/2]

bool imageNet::init ( NetworkType  networkType,
uint32_t  maxBatchSize,
precisionType  precision,
deviceType  device,
bool  allowGPUFallback 
)
protected

◆ init() [2/2]

bool imageNet::init ( const char *  prototxt_path,
const char *  model_path,
const char *  mean_binary,
const char *  class_path,
const char *  input,
const char *  output,
uint32_t  maxBatchSize,
precisionType  precision,
deviceType  device,
bool  allowGPUFallback 
)
protected

◆ loadClassInfo()

bool imageNet::loadClassInfo ( const char *  filename)
protected

◆ NetworkTypeFromStr()

static NetworkType imageNet::NetworkTypeFromStr ( const char *  model_name)
static

Parse a string to one of the built-in pretrained models.

Valid names are "alexnet", "googlenet", "googlenet-12", or "googlenet_12", ect.

Returns
one of the imageNet::NetworkType enums, or imageNet::CUSTOM on invalid string.

◆ NetworkTypeToStr()

static const char* imageNet::NetworkTypeToStr ( NetworkType  network)
static

Convert a NetworkType enum to a string.

◆ PreProcess()

bool imageNet::PreProcess ( float *  rgba,
uint32_t  width,
uint32_t  height 
)

Perform pre-processing on the image to apply mean-value subtraction and to organize the data into NCHW format and BGR colorspace that the networks expect.

After calling PreProcess(), you can call Classify() without supplying all the parameters.

◆ Process()

bool imageNet::Process ( )

Process the network, without determining the classification argmax.

To perform the actual classification via post-processing, Classify() should be used instead.

◆ Usage()

static const char* imageNet::Usage ( )
inlinestatic

Usage string for command line arguments to Create()

Member Data Documentation

◆ mClassDesc

std::vector<std::string> imageNet::mClassDesc
protected

◆ mClassPath

std::string imageNet::mClassPath
protected

◆ mClassSynset

std::vector<std::string> imageNet::mClassSynset
protected

◆ mCustomClasses

uint32_t imageNet::mCustomClasses
protected

◆ mNetworkType

NetworkType imageNet::mNetworkType
protected

◆ mOutputClasses

uint32_t imageNet::mOutputClasses
protected

The documentation for this class was generated from the following file: