Jetson Inference
DNN Vision Library
tensorNet Member List

This is the complete list of members for tensorNet, including all inherited members.

AllowGPUFallback() consttensorNetinline
CreateStream(bool nonBlocking=true)tensorNet
DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type)tensorNetstatic
DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU)tensorNetstatic
DetectNativePrecisions(deviceType device=DEVICE_GPU)tensorNetstatic
EnableDebug()tensorNet
EnableLayerProfiler()tensorNet
FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
GetDevice() consttensorNetinline
GetModelPath() consttensorNetinline
GetModelType() consttensorNetinline
GetNetworkTime()tensorNetinline
GetPrecision() consttensorNetinline
GetProfilerTime(profilerQuery query)tensorNetinline
GetProfilerTime(profilerQuery query, profilerDevice device)tensorNetinline
GetPrototxtPath() consttensorNetinline
GetStream() consttensorNetinline
gLoggertensorNetprotected
gProfilertensorNetprotected
IsModelType(modelType type) consttensorNetinline
IsPrecision(precisionType type) consttensorNetinline
LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
mAllowGPUFallbacktensorNetprotected
mCacheCalibrationPathtensorNetprotected
mCacheEnginePathtensorNetprotected
mContexttensorNetprotected
mDevicetensorNetprotected
mEnableDebugtensorNetprotected
mEnableProfilertensorNetprotected
mEnginetensorNetprotected
mEventsCPUtensorNetprotected
mEventsGPUtensorNetprotected
mHeighttensorNetprotected
mInfertensorNetprotected
mInputBlobNametensorNetprotected
mInputCPUtensorNetprotected
mInputCUDAtensorNetprotected
mInputDimstensorNetprotected
mInputSizetensorNetprotected
mMaxBatchSizetensorNetprotected
mMeanPathtensorNetprotected
mModelPathtensorNetprotected
mModelTypetensorNetprotected
mOutputstensorNetprotected
mPrecisiontensorNetprotected
mProfilerQueriesDonetensorNetprotected
mProfilerQueriesUsedtensorNetprotected
mProfilerTimestensorNetprotected
mPrototxtPathtensorNetprotected
mStreamtensorNetprotected
mWidthtensorNetprotected
PrintProfilerTimes()tensorNetinline
ProfileModel(const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream)tensorNetprotected
PROFILER_BEGIN(profilerQuery query)tensorNetinlineprotected
PROFILER_END(profilerQuery query)tensorNetinlineprotected
PROFILER_QUERY(profilerQuery query)tensorNetinlineprotected
SetStream(cudaStream_t stream)tensorNet
tensorNet()tensorNetprotected
~tensorNet()tensorNetvirtual