Jetson Inference
DNN Vision Library
tensorNet Member List

This is the complete list of members for tensorNet, including all inherited members.

AllowGPUFallback() consttensorNetinline
ConfigureBuilder(nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator)tensorNetprotected
CreateStream(bool nonBlocking=true)tensorNet
DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type)tensorNetstatic
DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU)tensorNetstatic
DetectNativePrecisions(deviceType device=DEVICE_GPU)tensorNetstatic
EnableDebug()tensorNet
EnableLayerProfiler()tensorNet
FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
GetDevice() consttensorNetinline
GetInputDims(uint32_t layer=0) consttensorNetinline
GetInputHeight(uint32_t layer=0) consttensorNetinline
GetInputLayers() consttensorNetinline
GetInputSize(uint32_t layer=0) consttensorNetinline
GetInputWidth(uint32_t layer=0) consttensorNetinline
GetModelPath() consttensorNetinline
GetModelType() consttensorNetinline
GetNetworkFPS()tensorNetinline
GetNetworkTime()tensorNetinline
GetOutputDims(uint32_t layer=0) consttensorNetinline
GetOutputHeight(uint32_t layer=0) consttensorNetinline
GetOutputLayers() consttensorNetinline
GetOutputSize(uint32_t layer=0) consttensorNetinline
GetOutputWidth(uint32_t layer=0) consttensorNetinline
GetPrecision() consttensorNetinline
GetProfilerTime(profilerQuery query)tensorNetinline
GetProfilerTime(profilerQuery query, profilerDevice device)tensorNetinline
GetPrototxtPath() consttensorNetinline
GetStream() consttensorNetinline
gLoggertensorNetprotected
gProfilertensorNetprotected
IsModelType(modelType type) consttensorNetinline
IsPrecision(precisionType type) consttensorNetinline
LoadEngine(const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(const char *filename, char **stream, size_t *size)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
mAllowGPUFallbacktensorNetprotected
mBindingstensorNetprotected
mCacheCalibrationPathtensorNetprotected
mCacheEnginePathtensorNetprotected
mContexttensorNetprotected
mDevicetensorNetprotected
mEnableDebugtensorNetprotected
mEnableProfilertensorNetprotected
mEnginetensorNetprotected
mEventsCPUtensorNetprotected
mEventsGPUtensorNetprotected
mInfertensorNetprotected
mInputstensorNetprotected
mMaxBatchSizetensorNetprotected
mMeanPathtensorNetprotected
mModelPathtensorNetprotected
mModelTypetensorNetprotected
mOutputstensorNetprotected
mPrecisiontensorNetprotected
mProfilerQueriesDonetensorNetprotected
mProfilerQueriesUsedtensorNetprotected
mProfilerTimestensorNetprotected
mPrototxtPathtensorNetprotected
mStreamtensorNetprotected
mWorkspaceSizetensorNetprotected
PrintProfilerTimes()tensorNetinline
ProcessNetwork(bool sync=true)tensorNetprotected
ProfileModel(const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize)tensorNetprotected
PROFILER_BEGIN(profilerQuery query)tensorNetinlineprotected
PROFILER_END(profilerQuery query)tensorNetinlineprotected
PROFILER_QUERY(profilerQuery query)tensorNetinlineprotected
SelectPrecision(precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
SetStream(cudaStream_t stream)tensorNet
tensorNet()tensorNetprotected
~tensorNet()tensorNetvirtual