Jetson Inference
DNN Vision Library

Foreground/background segmentation and removal DNN. More...

Classes

class  backgroundNet
 Background subtraction/removal with DNNs, using TensorRT. More...
 

Macros

#define BACKGROUNDNET_DEFAULT_INPUT   "input_0"
 Name of default input layer for backgroundNet model. More...
 
#define BACKGROUNDNET_DEFAULT_OUTPUT   "output_0"
 Name of default output layer for backgroundNet model. More...
 
#define BACKGROUNDNET_MODEL_TYPE   "background"
 The model type for backgroundNet in data/networks/models.json. More...
 
#define BACKGROUNDNET_USAGE_STRING
 Standard command-line options able to be passed to backgroundNet::Create() More...
 

Detailed Description

Foreground/background segmentation and removal DNN.


Class Documentation

◆ backgroundNet

class backgroundNet

Background subtraction/removal with DNNs, using TensorRT.

Inheritance diagram for backgroundNet:
tensorNet

Public Member Functions

virtual ~backgroundNet ()
 Destroy. More...
 
template<typename T >
int Process (T *image, uint32_t width, uint32_t height, cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true)
 Perform background subtraction/removal on the image (in-place). More...
 
template<typename T >
int Process (T *input, T *output, uint32_t width, uint32_t height, cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true)
 Perform background subtraction/removal on the image. More...
 
bool Process (void *image, uint32_t width, uint32_t height, imageFormat format, cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true)
 Perform background subtraction/removal on the image (in-place). More...
 
bool Process (void *input, void *output, uint32_t width, uint32_t height, imageFormat format, cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true)
 Perform background subtraction/removal on the image. More...
 
- Public Member Functions inherited from tensorNet
virtual ~tensorNet ()
 Destory. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance with multiple output layers. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance with multiple input layers. More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance (this variant is used for UFF models) More...
 
bool LoadNetwork (const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)
 Load a new network instance with multiple input layers (used for UFF models) More...
 
bool LoadEngine (const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)
 Load a network instance from a serialized engine plan file. More...
 
bool LoadEngine (char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)
 Load a network instance from a serialized engine plan file. More...
 
bool LoadEngine (nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)
 Load network resources from an existing TensorRT engine instance. More...
 
bool LoadEngine (const char *filename, char **stream, size_t *size)
 Load a serialized engine plan file into memory. More...
 
void EnableLayerProfiler ()
 Manually enable layer profiling times. More...
 
void EnableDebug ()
 Manually enable debug messages and synchronization. More...
 
bool AllowGPUFallback () const
 Return true if GPU fallback is enabled. More...
 
deviceType GetDevice () const
 Retrieve the device being used for execution. More...
 
precisionType GetPrecision () const
 Retrieve the type of precision being used. More...
 
bool IsPrecision (precisionType type) const
 Check if a particular precision is being used. More...
 
cudaStream_t GetStream () const
 Retrieve the stream that the device is operating on. More...
 
cudaStream_t CreateStream (bool nonBlocking=true)
 Create and use a new stream for execution. More...
 
void SetStream (cudaStream_t stream)
 Set the stream that the device is operating on. More...
 
const char * GetPrototxtPath () const
 Retrieve the path to the network prototxt file. More...
 
const char * GetModelPath () const
 Retrieve the full path to model file, including the filename. More...
 
const char * GetModelFilename () const
 Retrieve the filename of the file, excluding the directory. More...
 
modelType GetModelType () const
 Retrieve the format of the network model. More...
 
bool IsModelType (modelType type) const
 Return true if the model is of the specified format. More...
 
uint32_t GetInputLayers () const
 Retrieve the number of input layers to the network. More...
 
uint32_t GetOutputLayers () const
 Retrieve the number of output layers to the network. More...
 
Dims3 GetInputDims (uint32_t layer=0) const
 Retrieve the dimensions of network input layer. More...
 
uint32_t GetInputWidth (uint32_t layer=0) const
 Retrieve the width of network input layer. More...
 
uint32_t GetInputHeight (uint32_t layer=0) const
 Retrieve the height of network input layer. More...
 
uint32_t GetInputSize (uint32_t layer=0) const
 Retrieve the size (in bytes) of network input layer. More...
 
float * GetInputPtr (uint32_t layer=0) const
 Get the CUDA pointer to the input layer's memory. More...
 
Dims3 GetOutputDims (uint32_t layer=0) const
 Retrieve the dimensions of network output layer. More...
 
uint32_t GetOutputWidth (uint32_t layer=0) const
 Retrieve the width of network output layer. More...
 
uint32_t GetOutputHeight (uint32_t layer=0) const
 Retrieve the height of network output layer. More...
 
uint32_t GetOutputSize (uint32_t layer=0) const
 Retrieve the size (in bytes) of network output layer. More...
 
float * GetOutputPtr (uint32_t layer=0) const
 Get the CUDA pointer to the output memory. More...
 
float GetNetworkFPS ()
 Retrieve the network frames per second (FPS). More...
 
float GetNetworkTime ()
 Retrieve the network runtime (in milliseconds). More...
 
const char * GetNetworkName () const
 Retrieve the network name (it's filename). More...
 
float2 GetProfilerTime (profilerQuery query)
 Retrieve the profiler runtime (in milliseconds). More...
 
float GetProfilerTime (profilerQuery query, profilerDevice device)
 Retrieve the profiler runtime (in milliseconds). More...
 
void PrintProfilerTimes ()
 Print the profiler times (in millseconds). More...
 

Static Public Member Functions

static backgroundNetCreate (const char *network="u2net", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)
 Load a pre-trained model. More...
 
static backgroundNetCreate (const char *model_path, const char *input=BACKGROUNDNET_DEFAULT_INPUT, const char *output=BACKGROUNDNET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)
 Load a new network instance. More...
 
static backgroundNetCreate (int argc, char **argv)
 Load a new network instance by parsing the command line. More...
 
static backgroundNetCreate (const commandLine &cmdLine)
 Load a new network instance by parsing the command line. More...
 
static const char * Usage ()
 Usage string for command line arguments to Create() More...
 
- Static Public Member Functions inherited from tensorNet
static bool LoadClassLabels (const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1)
 Load class descriptions from a label file. More...
 
static bool LoadClassLabels (const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1)
 Load class descriptions and synset strings from a label file. More...
 
static bool LoadClassColors (const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f)
 Load class colors from a text file. More...
 
static bool LoadClassColors (const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f)
 Load class colors from a text file. More...
 
static float4 GenerateColor (uint32_t classID, float alpha=255.0f)
 Procedurally generate a color for a given class index with the specified alpha value. More...
 
static precisionType SelectPrecision (precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true)
 Resolve a desired precision to a specific one that's available. More...
 
static precisionType FindFastestPrecision (deviceType device=DEVICE_GPU, bool allowInt8=true)
 Determine the fastest native precision on a device. More...
 
static std::vector< precisionTypeDetectNativePrecisions (deviceType device=DEVICE_GPU)
 Detect the precisions supported natively on a device. More...
 
static bool DetectNativePrecision (const std::vector< precisionType > &nativeTypes, precisionType type)
 Detect if a particular precision is supported natively. More...
 
static bool DetectNativePrecision (precisionType precision, deviceType device=DEVICE_GPU)
 Detect if a particular precision is supported natively. More...
 

Protected Member Functions

 backgroundNet ()
 
bool init (const char *model_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)
 
- Protected Member Functions inherited from tensorNet
 tensorNet ()
 Constructor. More...
 
bool ProcessNetwork (bool sync=true)
 Execute processing of the network. More...
 
bool ProfileModel (const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize)
 Create and output an optimized network model. More...
 
bool ConfigureBuilder (nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator)
 Configure builder options. More...
 
bool ValidateEngine (const char *model_path, const char *cache_path, const char *checksum_path)
 Validate that the model already has a built TensorRT engine that exists and doesn't need updating. More...
 
void PROFILER_BEGIN (profilerQuery query)
 Begin a profiling query, before network is run. More...
 
void PROFILER_END (profilerQuery query)
 End a profiling query, after the network is run. More...
 
bool PROFILER_QUERY (profilerQuery query)
 Query the CUDA part of a profiler query. More...
 

Additional Inherited Members

- Protected Attributes inherited from tensorNet
tensorNet::Logger gLogger
 
tensorNet::Profiler gProfiler
 
std::string mPrototxtPath
 
std::string mModelPath
 
std::string mModelFile
 
std::string mMeanPath
 
std::string mCacheEnginePath
 
std::string mCacheCalibrationPath
 
std::string mChecksumPath
 
deviceType mDevice
 
precisionType mPrecision
 
modelType mModelType
 
cudaStream_t mStream
 
cudaEvent_t mEventsGPU [PROFILER_TOTAL *2]
 
timespec mEventsCPU [PROFILER_TOTAL *2]
 
nvinfer1::IRuntime * mInfer
 
nvinfer1::ICudaEngine * mEngine
 
nvinfer1::IExecutionContext * mContext
 
float2 mProfilerTimes [PROFILER_TOTAL+1]
 
uint32_t mProfilerQueriesUsed
 
uint32_t mProfilerQueriesDone
 
uint32_t mWorkspaceSize
 
uint32_t mMaxBatchSize
 
bool mEnableProfiler
 
bool mEnableDebug
 
bool mAllowGPUFallback
 
void ** mBindings
 
std::vector< layerInfomInputs
 
std::vector< layerInfomOutputs
 

Constructor & Destructor Documentation

◆ ~backgroundNet()

virtual backgroundNet::~backgroundNet ( )
virtual

Destroy.

◆ backgroundNet()

backgroundNet::backgroundNet ( )
protected

Member Function Documentation

◆ Create() [1/4]

static backgroundNet* backgroundNet::Create ( const char *  model_path,
const char *  input = BACKGROUNDNET_DEFAULT_INPUT,
const char *  output = BACKGROUNDNET_DEFAULT_OUTPUT,
uint32_t  maxBatchSize = DEFAULT_MAX_BATCH_SIZE,
precisionType  precision = TYPE_FASTEST,
deviceType  device = DEVICE_GPU,
bool  allowGPUFallback = true 
)
static

Load a new network instance.

Parameters
model_pathFile path to the caffemodel
inputName of the input layer blob.
outputName of the output layer blob.
maxBatchSizeThe maximum batch size that the network will support and be optimized for.

◆ Create() [2/4]

static backgroundNet* backgroundNet::Create ( const char *  network = "u2net",
uint32_t  maxBatchSize = DEFAULT_MAX_BATCH_SIZE,
precisionType  precision = TYPE_FASTEST,
deviceType  device = DEVICE_GPU,
bool  allowGPUFallback = true 
)
static

Load a pre-trained model.

◆ Create() [3/4]

static backgroundNet* backgroundNet::Create ( const commandLine cmdLine)
static

Load a new network instance by parsing the command line.

◆ Create() [4/4]

static backgroundNet* backgroundNet::Create ( int  argc,
char **  argv 
)
static

Load a new network instance by parsing the command line.

◆ init()

bool backgroundNet::init ( const char *  model_path,
const char *  input,
const char *  output,
uint32_t  maxBatchSize,
precisionType  precision,
deviceType  device,
bool  allowGPUFallback 
)
protected

◆ Process() [1/4]

template<typename T >
int backgroundNet::Process ( T *  image,
uint32_t  width,
uint32_t  height,
cudaFilterMode  filter = FILTER_LINEAR,
bool  maskAlpha = true 
)
inline

Perform background subtraction/removal on the image (in-place).

Parameters
imageinput/output image in CUDA device memory.
widthwidth of the image in pixels.
heightheight of the output image in pixels.
filterthe upsampling mode used to resize the DNN mask (FILTER_LINEAR or FILTER_POINT)
maskAlphaif true (default), the mask will be applied to the alpha channel in addition to the color channels.
Returns
true on success and false if an error occurred.

◆ Process() [2/4]

template<typename T >
int backgroundNet::Process ( T *  input,
T *  output,
uint32_t  width,
uint32_t  height,
cudaFilterMode  filter = FILTER_LINEAR,
bool  maskAlpha = true 
)
inline

Perform background subtraction/removal on the image.

Parameters
inputinput image in CUDA device memory.
outputoutput image in CUDA device memory.
widthwidth of the image in pixels.
heightheight of the output image in pixels.
filterthe upsampling mode used to resize the DNN mask (FILTER_LINEAR or FILTER_POINT)
maskAlphaif true (default), the mask will be applied to the alpha channel in addition to the color channels.
Returns
true on success and false if an error occurred.

◆ Process() [3/4]

bool backgroundNet::Process ( void *  image,
uint32_t  width,
uint32_t  height,
imageFormat  format,
cudaFilterMode  filter = FILTER_LINEAR,
bool  maskAlpha = true 
)
inline

Perform background subtraction/removal on the image (in-place).

Parameters
imageinput/output image in CUDA device memory.
widthwidth of the image in pixels.
heightheight of the output image in pixels.
filterthe upsampling mode used to resize the DNN mask (FILTER_LINEAR or FILTER_POINT)
maskAlphaif true (default), the mask will be applied to the alpha channel as well.
Returns
true on success and false if an error occurred.

◆ Process() [4/4]

bool backgroundNet::Process ( void *  input,
void *  output,
uint32_t  width,
uint32_t  height,
imageFormat  format,
cudaFilterMode  filter = FILTER_LINEAR,
bool  maskAlpha = true 
)

Perform background subtraction/removal on the image.

Parameters
inputinput image in CUDA device memory.
outputoutput image in CUDA device memory.
widthwidth of the image in pixels.
heightheight of the output image in pixels.
filterthe upsampling mode used to resize the DNN mask (FILTER_LINEAR or FILTER_POINT)
maskAlphaif true (default), the mask will be applied to the alpha channel as well.
Returns
true on success and false if an error occurred.

◆ Usage()

static const char* backgroundNet::Usage ( )
inlinestatic

Usage string for command line arguments to Create()

Macro Definition Documentation

◆ BACKGROUNDNET_DEFAULT_INPUT

#define BACKGROUNDNET_DEFAULT_INPUT   "input_0"

Name of default input layer for backgroundNet model.

◆ BACKGROUNDNET_DEFAULT_OUTPUT

#define BACKGROUNDNET_DEFAULT_OUTPUT   "output_0"

Name of default output layer for backgroundNet model.

◆ BACKGROUNDNET_MODEL_TYPE

#define BACKGROUNDNET_MODEL_TYPE   "background"

The model type for backgroundNet in data/networks/models.json.

◆ BACKGROUNDNET_USAGE_STRING

#define BACKGROUNDNET_USAGE_STRING
Value:
"backgroundNet arguments: \n" \
" --network=NETWORK pre-trained model to load, one of the following:\n" \
" * u2net (default)\n" \
" --model=MODEL path to custom model to load (caffemodel, uff, or onnx)\n" \
" --input-blob=INPUT name of the input layer (default is '" BACKGROUNDNET_DEFAULT_INPUT "')\n" \
" --output-blob=OUTPUT name of the output layer (default is '" BACKGROUNDNET_DEFAULT_OUTPUT "')\n" \
" --profile enable layer profiling in TensorRT\n\n"

Standard command-line options able to be passed to backgroundNet::Create()

BACKGROUNDNET_DEFAULT_OUTPUT
#define BACKGROUNDNET_DEFAULT_OUTPUT
Name of default output layer for backgroundNet model.
Definition: backgroundNet.h:41
BACKGROUNDNET_DEFAULT_INPUT
#define BACKGROUNDNET_DEFAULT_INPUT
Name of default input layer for backgroundNet model.
Definition: backgroundNet.h:35