Jetson Inference
DNN Vision Library
imageNet Member List

This is the complete list of members for imageNet, including all inherited members.

ALEXNET enum valueimageNet
AllowGPUFallback() consttensorNetinline
Classify(float *rgba, uint32_t width, uint32_t height, float *confidence=NULL)imageNet
Classify(float *confidence=NULL)imageNet
Create(NetworkType networkType=GOOGLENET, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)imageNetstatic
Create(const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_labels, const char *input=IMAGENET_DEFAULT_INPUT, const char *output=IMAGENET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)imageNetstatic
Create(int argc, char **argv)imageNetstatic
CreateStream(bool nonBlocking=true)tensorNet
CUSTOM enum valueimageNet
DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type)tensorNetstatic
DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU)tensorNetstatic
DetectNativePrecisions(deviceType device=DEVICE_GPU)tensorNetstatic
EnableDebug()tensorNet
EnableLayerProfiler()tensorNet
FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
GetClassDesc(uint32_t index) constimageNetinline
GetClassPath() constimageNetinline
GetClassSynset(uint32_t index) constimageNetinline
GetDevice() consttensorNetinline
GetModelPath() consttensorNetinline
GetModelType() consttensorNetinline
GetNetworkName() constimageNetinline
GetNetworkTime()tensorNetinline
GetNetworkType() constimageNetinline
GetNumClasses() constimageNetinline
GetPrecision() consttensorNetinline
GetProfilerTime(profilerQuery query)tensorNetinline
GetProfilerTime(profilerQuery query, profilerDevice device)tensorNetinline
GetPrototxtPath() consttensorNetinline
GetStream() consttensorNetinline
gLoggertensorNetprotected
GOOGLENET enum valueimageNet
GOOGLENET_12 enum valueimageNet
gProfilertensorNetprotected
imageNet()imageNetprotected
INCEPTION_V4 enum valueimageNet
init(NetworkType networkType, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)imageNetprotected
init(const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)imageNetprotected
IsModelType(modelType type) consttensorNetinline
IsPrecision(precisionType type) consttensorNetinline
loadClassInfo(const char *filename)imageNetprotected
LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
mAllowGPUFallbacktensorNetprotected
mCacheCalibrationPathtensorNetprotected
mCacheEnginePathtensorNetprotected
mClassDescimageNetprotected
mClassPathimageNetprotected
mClassSynsetimageNetprotected
mContexttensorNetprotected
mCustomClassesimageNetprotected
mDevicetensorNetprotected
mEnableDebugtensorNetprotected
mEnableProfilertensorNetprotected
mEnginetensorNetprotected
mEventsCPUtensorNetprotected
mEventsGPUtensorNetprotected
mHeighttensorNetprotected
mInfertensorNetprotected
mInputBlobNametensorNetprotected
mInputCPUtensorNetprotected
mInputCUDAtensorNetprotected
mInputDimstensorNetprotected
mInputSizetensorNetprotected
mMaxBatchSizetensorNetprotected
mMeanPathtensorNetprotected
mModelPathtensorNetprotected
mModelTypetensorNetprotected
mNetworkTypeimageNetprotected
mOutputClassesimageNetprotected
mOutputstensorNetprotected
mPrecisiontensorNetprotected
mProfilerQueriesDonetensorNetprotected
mProfilerQueriesUsedtensorNetprotected
mProfilerTimestensorNetprotected
mPrototxtPathtensorNetprotected
mStreamtensorNetprotected
mWidthtensorNetprotected
NetworkType enum nameimageNet
NetworkTypeFromStr(const char *model_name)imageNetstatic
NetworkTypeToStr(NetworkType network)imageNetstatic
PreProcess(float *rgba, uint32_t width, uint32_t height)imageNet
PrintProfilerTimes()tensorNetinline
Process()imageNet
ProfileModel(const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream)tensorNetprotected
PROFILER_BEGIN(profilerQuery query)tensorNetinlineprotected
PROFILER_END(profilerQuery query)tensorNetinlineprotected
PROFILER_QUERY(profilerQuery query)tensorNetinlineprotected
RESNET_101 enum valueimageNet
RESNET_152 enum valueimageNet
RESNET_18 enum valueimageNet
RESNET_50 enum valueimageNet
SetStream(cudaStream_t stream)tensorNet
tensorNet()tensorNetprotected
Usage()imageNetinlinestatic
VGG_16 enum valueimageNet
VGG_19 enum valueimageNet
~imageNet()imageNetvirtual
~tensorNet()tensorNetvirtual