![]() |
Jetson Inference
DNN Vision Library
|
This is the complete list of members for imageNet, including all inherited members.
ALEXNET enum value | imageNet | |
AllowGPUFallback() const | tensorNet | inline |
Classify(float *rgba, uint32_t width, uint32_t height, float *confidence=NULL) | imageNet | |
Classify(float *confidence=NULL) | imageNet | |
Create(NetworkType networkType=GOOGLENET, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | imageNet | static |
Create(const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_labels, const char *input=IMAGENET_DEFAULT_INPUT, const char *output=IMAGENET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | imageNet | static |
Create(int argc, char **argv) | imageNet | static |
CreateStream(bool nonBlocking=true) | tensorNet | |
CUSTOM enum value | imageNet | |
DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type) | tensorNet | static |
DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU) | tensorNet | static |
DetectNativePrecisions(deviceType device=DEVICE_GPU) | tensorNet | static |
EnableDebug() | tensorNet | |
EnableLayerProfiler() | tensorNet | |
FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static |
GetClassDesc(uint32_t index) const | imageNet | inline |
GetClassPath() const | imageNet | inline |
GetClassSynset(uint32_t index) const | imageNet | inline |
GetDevice() const | tensorNet | inline |
GetModelPath() const | tensorNet | inline |
GetModelType() const | tensorNet | inline |
GetNetworkName() const | imageNet | inline |
GetNetworkTime() | tensorNet | inline |
GetNetworkType() const | imageNet | inline |
GetNumClasses() const | imageNet | inline |
GetPrecision() const | tensorNet | inline |
GetProfilerTime(profilerQuery query) | tensorNet | inline |
GetProfilerTime(profilerQuery query, profilerDevice device) | tensorNet | inline |
GetPrototxtPath() const | tensorNet | inline |
GetStream() const | tensorNet | inline |
gLogger | tensorNet | protected |
GOOGLENET enum value | imageNet | |
GOOGLENET_12 enum value | imageNet | |
gProfiler | tensorNet | protected |
imageNet() | imageNet | protected |
INCEPTION_V4 enum value | imageNet | |
init(NetworkType networkType, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) | imageNet | protected |
init(const char *prototxt_path, const char *model_path, const char *mean_binary, const char *class_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) | imageNet | protected |
IsModelType(modelType type) const | tensorNet | inline |
IsPrecision(precisionType type) const | tensorNet | inline |
loadClassInfo(const char *filename) | imageNet | protected |
LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
mAllowGPUFallback | tensorNet | protected |
mCacheCalibrationPath | tensorNet | protected |
mCacheEnginePath | tensorNet | protected |
mClassDesc | imageNet | protected |
mClassPath | imageNet | protected |
mClassSynset | imageNet | protected |
mContext | tensorNet | protected |
mCustomClasses | imageNet | protected |
mDevice | tensorNet | protected |
mEnableDebug | tensorNet | protected |
mEnableProfiler | tensorNet | protected |
mEngine | tensorNet | protected |
mEventsCPU | tensorNet | protected |
mEventsGPU | tensorNet | protected |
mHeight | tensorNet | protected |
mInfer | tensorNet | protected |
mInputBlobName | tensorNet | protected |
mInputCPU | tensorNet | protected |
mInputCUDA | tensorNet | protected |
mInputDims | tensorNet | protected |
mInputSize | tensorNet | protected |
mMaxBatchSize | tensorNet | protected |
mMeanPath | tensorNet | protected |
mModelPath | tensorNet | protected |
mModelType | tensorNet | protected |
mNetworkType | imageNet | protected |
mOutputClasses | imageNet | protected |
mOutputs | tensorNet | protected |
mPrecision | tensorNet | protected |
mProfilerQueriesDone | tensorNet | protected |
mProfilerQueriesUsed | tensorNet | protected |
mProfilerTimes | tensorNet | protected |
mPrototxtPath | tensorNet | protected |
mStream | tensorNet | protected |
mWidth | tensorNet | protected |
NetworkType enum name | imageNet | |
NetworkTypeFromStr(const char *model_name) | imageNet | static |
NetworkTypeToStr(NetworkType network) | imageNet | static |
PreProcess(float *rgba, uint32_t width, uint32_t height) | imageNet | |
PrintProfilerTimes() | tensorNet | inline |
Process() | imageNet | |
ProfileModel(const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream) | tensorNet | protected |
PROFILER_BEGIN(profilerQuery query) | tensorNet | inlineprotected |
PROFILER_END(profilerQuery query) | tensorNet | inlineprotected |
PROFILER_QUERY(profilerQuery query) | tensorNet | inlineprotected |
RESNET_101 enum value | imageNet | |
RESNET_152 enum value | imageNet | |
RESNET_18 enum value | imageNet | |
RESNET_50 enum value | imageNet | |
SetStream(cudaStream_t stream) | tensorNet | |
tensorNet() | tensorNet | protected |
Usage() | imageNet | inlinestatic |
VGG_16 enum value | imageNet | |
VGG_19 enum value | imageNet | |
~imageNet() | imageNet | virtual |
~tensorNet() | tensorNet | virtual |