Jetson Inference
DNN Vision Library
|
This is the complete list of members for segNet, including all inherited members.
AllowGPUFallback() const | tensorNet | inline |
classify(const char *ignore_class) | segNet | protected |
ConfigureBuilder(nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator) | tensorNet | protected |
Create(const char *network="fcn-resnet18-voc", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | segNet | static |
Create(const char *prototxt_path, const char *model_path, const char *class_labels, const char *class_colors=NULL, const char *input=SEGNET_DEFAULT_INPUT, const char *output=SEGNET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | segNet | static |
Create(int argc, char **argv) | segNet | static |
Create(const commandLine &cmdLine) | segNet | static |
CreateStream(bool nonBlocking=true) | tensorNet | |
DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type) | tensorNet | static |
DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU) | tensorNet | static |
DetectNativePrecisions(deviceType device=DEVICE_GPU) | tensorNet | static |
EnableDebug() | tensorNet | |
EnableLayerProfiler() | tensorNet | |
FILTER_LINEAR enum value | segNet | |
FILTER_POINT enum value | segNet | |
FilterMode enum name | segNet | |
FilterModeFromStr(const char *str, FilterMode default_value=FILTER_LINEAR) | segNet | static |
FindClassID(const char *label_name) | segNet | |
FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static |
GenerateColor(uint32_t classID, float alpha=255.0f) | tensorNet | static |
GetClassColor(uint32_t id) const | segNet | inline |
GetClassDesc(uint32_t id) const | segNet | inline |
GetClassLabel(uint32_t id) const | segNet | inline |
GetClassPath() const | segNet | inline |
GetDevice() const | tensorNet | inline |
GetGridHeight() const | segNet | inline |
GetGridWidth() const | segNet | inline |
GetInputDims(uint32_t layer=0) const | tensorNet | inline |
GetInputHeight(uint32_t layer=0) const | tensorNet | inline |
GetInputLayers() const | tensorNet | inline |
GetInputPtr(uint32_t layer=0) const | tensorNet | inline |
GetInputSize(uint32_t layer=0) const | tensorNet | inline |
GetInputWidth(uint32_t layer=0) const | tensorNet | inline |
GetModelFilename() const | tensorNet | inline |
GetModelPath() const | tensorNet | inline |
GetModelType() const | tensorNet | inline |
GetNetworkFPS() | tensorNet | inline |
GetNetworkName() const | tensorNet | inline |
GetNetworkTime() | tensorNet | inline |
GetNumClasses() const | segNet | inline |
GetOutputDims(uint32_t layer=0) const | tensorNet | inline |
GetOutputHeight(uint32_t layer=0) const | tensorNet | inline |
GetOutputLayers() const | tensorNet | inline |
GetOutputPtr(uint32_t layer=0) const | tensorNet | inline |
GetOutputSize(uint32_t layer=0) const | tensorNet | inline |
GetOutputWidth(uint32_t layer=0) const | tensorNet | inline |
GetOverlayAlpha() const | segNet | |
GetPrecision() const | tensorNet | inline |
GetProfilerTime(profilerQuery query) | tensorNet | inline |
GetProfilerTime(profilerQuery query, profilerDevice device) | tensorNet | inline |
GetPrototxtPath() const | tensorNet | inline |
GetStream() const | tensorNet | inline |
gLogger | tensorNet | protected |
gProfiler | tensorNet | protected |
IsModelType(modelType type) const | tensorNet | inline |
IsPrecision(precisionType type) const | tensorNet | inline |
LoadClassColors(const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static |
LoadClassColors(const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static |
loadClassColors(const char *filename) | segNet | protected |
LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1) | tensorNet | static |
LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1) | tensorNet | static |
loadClassLabels(const char *filename) | segNet | protected |
LoadEngine(const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | |
LoadEngine(char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | |
LoadEngine(nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | |
LoadEngine(const char *filename, char **stream, size_t *size) | tensorNet | |
LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | |
mAllowGPUFallback | tensorNet | protected |
Mask(T *output, uint32_t width, uint32_t height, FilterMode filter=FILTER_LINEAR) | segNet | inline |
Mask(void *output, uint32_t width, uint32_t height, imageFormat format, FilterMode filter=FILTER_LINEAR) | segNet | |
Mask(float *output, uint32_t width, uint32_t height, FilterMode filter=FILTER_LINEAR) | segNet | |
Mask(uint8_t *output, uint32_t width, uint32_t height) | segNet | |
mBindings | tensorNet | protected |
mCacheCalibrationPath | tensorNet | protected |
mCacheEnginePath | tensorNet | protected |
mChecksumPath | tensorNet | protected |
mClassColors | segNet | protected |
mClassLabels | segNet | protected |
mClassMap | segNet | protected |
mClassPath | segNet | protected |
mColorsAlphaSet | segNet | protected |
mContext | tensorNet | protected |
mDevice | tensorNet | protected |
mEnableDebug | tensorNet | protected |
mEnableProfiler | tensorNet | protected |
mEngine | tensorNet | protected |
mEventsCPU | tensorNet | protected |
mEventsGPU | tensorNet | protected |
mInfer | tensorNet | protected |
mInputs | tensorNet | protected |
mLastInputFormat | segNet | protected |
mLastInputHeight | segNet | protected |
mLastInputImg | segNet | protected |
mLastInputWidth | segNet | protected |
mMaxBatchSize | tensorNet | protected |
mMeanPath | tensorNet | protected |
mModelFile | tensorNet | protected |
mModelPath | tensorNet | protected |
mModelType | tensorNet | protected |
mOutputs | tensorNet | protected |
mPrecision | tensorNet | protected |
mProfilerQueriesDone | tensorNet | protected |
mProfilerQueriesUsed | tensorNet | protected |
mProfilerTimes | tensorNet | protected |
mPrototxtPath | tensorNet | protected |
mStream | tensorNet | protected |
mWorkspaceSize | tensorNet | protected |
Overlay(T *output, uint32_t width, uint32_t height, FilterMode filter=FILTER_LINEAR) | segNet | inline |
Overlay(void *output, uint32_t width, uint32_t height, imageFormat format, FilterMode filter=FILTER_LINEAR) | segNet | |
Overlay(float *output, uint32_t width, uint32_t height, FilterMode filter=FILTER_LINEAR) | segNet | |
overlayLinear(void *input, uint32_t in_width, uint32_t in_height, imageFormat in_format, void *output, uint32_t out_width, uint32_t out_height, imageFormat out_format, bool mask_only) | segNet | protected |
overlayPoint(void *input, uint32_t in_width, uint32_t in_height, imageFormat in_format, void *output, uint32_t out_width, uint32_t out_height, imageFormat out_format, bool mask_only) | segNet | protected |
PrintProfilerTimes() | tensorNet | inline |
Process(T *input, uint32_t width, uint32_t height, const char *ignore_class="void") | segNet | inline |
Process(void *input, uint32_t width, uint32_t height, imageFormat format, const char *ignore_class="void") | segNet | |
Process(float *input, uint32_t width, uint32_t height, const char *ignore_class="void") | segNet | |
ProcessNetwork(bool sync=true) | tensorNet | protected |
ProfileModel(const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize) | tensorNet | protected |
PROFILER_BEGIN(profilerQuery query) | tensorNet | inlineprotected |
PROFILER_END(profilerQuery query) | tensorNet | inlineprotected |
PROFILER_QUERY(profilerQuery query) | tensorNet | inlineprotected |
saveClassLegend(const char *filename) | segNet | protected |
segNet() | segNet | protected |
SelectPrecision(precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static |
SetClassColor(uint32_t classIndex, const float4 &color) | segNet | |
SetClassColor(uint32_t classIndex, float r, float g, float b, float a=255.0f) | segNet | |
SetOverlayAlpha(float alpha, bool explicit_exempt=true) | segNet | |
SetStream(cudaStream_t stream) | tensorNet | |
tensorNet() | tensorNet | protected |
Usage() | segNet | inlinestatic |
ValidateEngine(const char *model_path, const char *cache_path, const char *checksum_path) | tensorNet | protected |
VisualizationFlags enum name | segNet | |
VisualizationFlagsFromStr(const char *str, uint32_t default_value=VISUALIZE_OVERLAY) | segNet | static |
VISUALIZE_MASK enum value | segNet | |
VISUALIZE_OVERLAY enum value | segNet | |
~segNet() | segNet | virtual |
~tensorNet() | tensorNet | virtual |