Jetson Inference
DNN Vision Library
backgroundNet.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef __BACKGROUND_NET_H__
24 #define __BACKGROUND_NET_H__
25 
26 
27 #include "tensorNet.h"
29 
30 
35 #define BACKGROUNDNET_DEFAULT_INPUT "input_0"
36 
41 #define BACKGROUNDNET_DEFAULT_OUTPUT "output_0"
42 
47 #define BACKGROUNDNET_MODEL_TYPE "background"
48 
53 #define BACKGROUNDNET_USAGE_STRING "backgroundNet arguments: \n" \
54  " --network=NETWORK pre-trained model to load, one of the following:\n" \
55  " * u2net (default)\n" \
56  " --model=MODEL path to custom model to load (caffemodel, uff, or onnx)\n" \
57  " --input-blob=INPUT name of the input layer (default is '" BACKGROUNDNET_DEFAULT_INPUT "')\n" \
58  " --output-blob=OUTPUT name of the output layer (default is '" BACKGROUNDNET_DEFAULT_OUTPUT "')\n" \
59  " --profile enable layer profiling in TensorRT\n\n"
60 
61 
66 class backgroundNet : public tensorNet
67 {
68 public:
72  static backgroundNet* Create( const char* network="u2net", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE,
73  precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true );
74 
82  static backgroundNet* Create( const char* model_path,
83  const char* input=BACKGROUNDNET_DEFAULT_INPUT,
84  const char* output=BACKGROUNDNET_DEFAULT_OUTPUT,
85  uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE,
86  precisionType precision=TYPE_FASTEST,
87  deviceType device=DEVICE_GPU, bool allowGPUFallback=true );
88 
92  static backgroundNet* Create( int argc, char** argv );
93 
97  static backgroundNet* Create( const commandLine& cmdLine );
98 
102  static inline const char* Usage() { return BACKGROUNDNET_USAGE_STRING; }
103 
107  virtual ~backgroundNet();
108 
118  template<typename T> int Process( T* image, uint32_t width, uint32_t height,
119  cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true ) { return Process((void*)image, width, height, imageFormatFromType<T>(), filter, maskAlpha); }
120 
131  template<typename T> int Process( T* input, T* output, uint32_t width, uint32_t height,
132  cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true ) { return Process((void*)input, (void*)output, width, height, imageFormatFromType<T>(), filter, maskAlpha); }
133 
143  inline bool Process( void* image, uint32_t width, uint32_t height, imageFormat format,
144  cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true ) { return Process(image, image, width, height, format, filter, maskAlpha); }
145 
156  bool Process( void* input, void* output, uint32_t width, uint32_t height, imageFormat format,
157  cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true );
158 
159 protected:
160  backgroundNet();
161 
162  bool init(const char* model_path, const char* input, const char* output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback );
163 
164 };
165 
166 
167 #endif
backgroundNet::Create
static backgroundNet * Create(const char *network="u2net", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)
Load a pre-trained model.
backgroundNet::Process
bool Process(void *image, uint32_t width, uint32_t height, imageFormat format, cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true)
Perform background subtraction/removal on the image (in-place).
Definition: backgroundNet.h:143
backgroundNet
Background subtraction/removal with DNNs, using TensorRT.
Definition: backgroundNet.h:66
FILTER_LINEAR
@ FILTER_LINEAR
Bilinear filtering.
Definition: cudaFilterMode.h:38
backgroundNet::backgroundNet
backgroundNet()
backgroundNet::~backgroundNet
virtual ~backgroundNet()
Destroy.
DEVICE_GPU
@ DEVICE_GPU
GPU (if multiple GPUs are present, a specific GPU can be selected with cudaSetDevice()
Definition: tensorNet.h:131
deviceType
deviceType
Enumeration for indicating the desired device that the network should run on, if available in hardwar...
Definition: tensorNet.h:129
backgroundNet::Process
int Process(T *input, T *output, uint32_t width, uint32_t height, cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true)
Perform background subtraction/removal on the image.
Definition: backgroundNet.h:131
tensorNet.h
TYPE_FASTEST
@ TYPE_FASTEST
The fastest detected precision should be use (i.e.
Definition: tensorNet.h:105
BACKGROUNDNET_USAGE_STRING
#define BACKGROUNDNET_USAGE_STRING
Standard command-line options able to be passed to backgroundNet::Create()
Definition: backgroundNet.h:53
BACKGROUNDNET_DEFAULT_OUTPUT
#define BACKGROUNDNET_DEFAULT_OUTPUT
Name of default output layer for backgroundNet model.
Definition: backgroundNet.h:41
cudaFilterMode
cudaFilterMode
Enumeration of interpolation filtering modes.
Definition: cudaFilterMode.h:35
BACKGROUNDNET_DEFAULT_INPUT
#define BACKGROUNDNET_DEFAULT_INPUT
Name of default input layer for backgroundNet model.
Definition: backgroundNet.h:35
precisionType
precisionType
Enumeration for indicating the desired precision that the network should run in, if available in hard...
Definition: tensorNet.h:102
tensorNet
Abstract class for loading a tensor network with TensorRT.
Definition: tensorNet.h:218
backgroundNet::Usage
static const char * Usage()
Usage string for command line arguments to Create()
Definition: backgroundNet.h:102
cudaFilterMode.h
DEFAULT_MAX_BATCH_SIZE
#define DEFAULT_MAX_BATCH_SIZE
Default maximum batch size.
Definition: tensorNet.h:88
backgroundNet::Process
int Process(T *image, uint32_t width, uint32_t height, cudaFilterMode filter=FILTER_LINEAR, bool maskAlpha=true)
Perform background subtraction/removal on the image (in-place).
Definition: backgroundNet.h:118
backgroundNet::init
bool init(const char *model_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)
commandLine
Command line parser for extracting flags, values, and strings.
Definition: commandLine.h:35
imageFormat
imageFormat
The imageFormat enum is used to identify the pixel format and colorspace of an image.
Definition: imageFormat.h:49