Jetson Inference
DNN Vision Library
tensorNet Member List

This is the complete list of members for tensorNet, including all inherited members.

AllowGPUFallback() consttensorNetinline
ConfigureBuilder(nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator)tensorNetprotected
CreateStream(bool nonBlocking=true)tensorNet
DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type)tensorNetstatic
DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU)tensorNetstatic
DetectNativePrecisions(deviceType device=DEVICE_GPU)tensorNetstatic
EnableDebug()tensorNet
EnableLayerProfiler()tensorNet
FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
GenerateColor(uint32_t classID, float alpha=255.0f)tensorNetstatic
GetDevice() consttensorNetinline
GetInputDims(uint32_t layer=0) consttensorNetinline
GetInputHeight(uint32_t layer=0) consttensorNetinline
GetInputLayers() consttensorNetinline
GetInputPtr(uint32_t layer=0) consttensorNetinline
GetInputSize(uint32_t layer=0) consttensorNetinline
GetInputWidth(uint32_t layer=0) consttensorNetinline
GetModelFilename() consttensorNetinline
GetModelPath() consttensorNetinline
GetModelType() consttensorNetinline
GetNetworkFPS()tensorNetinline
GetNetworkName() consttensorNetinline
GetNetworkTime()tensorNetinline
GetOutputDims(uint32_t layer=0) consttensorNetinline
GetOutputHeight(uint32_t layer=0) consttensorNetinline
GetOutputLayers() consttensorNetinline
GetOutputPtr(uint32_t layer=0) consttensorNetinline
GetOutputSize(uint32_t layer=0) consttensorNetinline
GetOutputWidth(uint32_t layer=0) consttensorNetinline
GetPrecision() consttensorNetinline
GetProfilerTime(profilerQuery query)tensorNetinline
GetProfilerTime(profilerQuery query, profilerDevice device)tensorNetinline
GetPrototxtPath() consttensorNetinline
GetStream() consttensorNetinline
gLoggertensorNetprotected
gProfilertensorNetprotected
IsModelType(modelType type) consttensorNetinline
IsPrecision(precisionType type) consttensorNetinline
LoadClassColors(const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f)tensorNetstatic
LoadClassColors(const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f)tensorNetstatic
LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1)tensorNetstatic
LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1)tensorNetstatic
LoadEngine(const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(const char *filename, char **stream, size_t *size)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
mAllowGPUFallbacktensorNetprotected
mBindingstensorNetprotected
mCacheCalibrationPathtensorNetprotected
mCacheEnginePathtensorNetprotected
mChecksumPathtensorNetprotected
mContexttensorNetprotected
mDevicetensorNetprotected
mEnableDebugtensorNetprotected
mEnableProfilertensorNetprotected
mEnginetensorNetprotected
mEventsCPUtensorNetprotected
mEventsGPUtensorNetprotected
mInfertensorNetprotected
mInputstensorNetprotected
mMaxBatchSizetensorNetprotected
mMeanPathtensorNetprotected
mModelFiletensorNetprotected
mModelPathtensorNetprotected
mModelTypetensorNetprotected
mOutputstensorNetprotected
mPrecisiontensorNetprotected
mProfilerQueriesDonetensorNetprotected
mProfilerQueriesUsedtensorNetprotected
mProfilerTimestensorNetprotected
mPrototxtPathtensorNetprotected
mStreamtensorNetprotected
mWorkspaceSizetensorNetprotected
PrintProfilerTimes()tensorNetinline
ProcessNetwork(bool sync=true)tensorNetprotected
ProfileModel(const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize)tensorNetprotected
PROFILER_BEGIN(profilerQuery query)tensorNetinlineprotected
PROFILER_END(profilerQuery query)tensorNetinlineprotected
PROFILER_QUERY(profilerQuery query)tensorNetinlineprotected
SelectPrecision(precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
SetStream(cudaStream_t stream)tensorNet
tensorNet()tensorNetprotected
ValidateEngine(const char *model_path, const char *cache_path, const char *checksum_path)tensorNetprotected
~tensorNet()tensorNetvirtual