|
| ~superResNet () |
| Destroy. More...
|
|
bool | UpscaleRGBA (float *input, uint32_t inputWidth, uint32_t inputHeight, float *output, uint32_t outputWidth, uint32_t outputHeight, float maxPixelValue=255.0f) |
| Upscale a 4-channel RGBA image. More...
|
|
bool | UpscaleRGBA (float *input, float *output, float maxPixelValue=255.0f) |
| Upscale a 4-channel RGBA image. More...
|
|
uint32_t | GetInputWidth () const |
| Retrieve the width of the input image, in pixels. More...
|
|
uint32_t | GetInputHeight () const |
| Retrieve the height of the input image, in pixels. More...
|
|
uint32_t | GetOutputWidth () const |
| Retrieve the width of the output image, in pixels. More...
|
|
uint32_t | GetOutputHeight () const |
| Retrieve the height of the output image, in pixels. More...
|
|
uint32_t | GetScaleFactor () const |
| Retrieve the scale factor between the input and output. More...
|
|
virtual | ~tensorNet () |
| Destory. More...
|
|
bool | LoadNetwork (const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
| Load a new network instance. More...
|
|
bool | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
| Load a new network instance with multiple output layers. More...
|
|
bool | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) |
| Load a new network instance (this variant is used for UFF models) More...
|
|
void | EnableLayerProfiler () |
| Manually enable layer profiling times. More...
|
|
void | EnableDebug () |
| Manually enable debug messages and synchronization. More...
|
|
bool | AllowGPUFallback () const |
| Return true if GPU fallback is enabled. More...
|
|
deviceType | GetDevice () const |
| Retrieve the device being used for execution. More...
|
|
precisionType | GetPrecision () const |
| Retrieve the type of precision being used. More...
|
|
bool | IsPrecision (precisionType type) const |
| Check if a particular precision is being used. More...
|
|
cudaStream_t | GetStream () const |
| Retrieve the stream that the device is operating on. More...
|
|
cudaStream_t | CreateStream (bool nonBlocking=true) |
| Create and use a new stream for execution. More...
|
|
void | SetStream (cudaStream_t stream) |
| Set the stream that the device is operating on. More...
|
|
const char * | GetPrototxtPath () const |
| Retrieve the path to the network prototxt file. More...
|
|
const char * | GetModelPath () const |
| Retrieve the path to the network model file. More...
|
|
modelType | GetModelType () const |
| Retrieve the format of the network model. More...
|
|
bool | IsModelType (modelType type) const |
| Return true if the model is of the specified format. More...
|
|
float | GetNetworkTime () |
| Retrieve the network runtime (in milliseconds). More...
|
|
float2 | GetProfilerTime (profilerQuery query) |
| Retrieve the profiler runtime (in milliseconds). More...
|
|
float | GetProfilerTime (profilerQuery query, profilerDevice device) |
| Retrieve the profiler runtime (in milliseconds). More...
|
|
void | PrintProfilerTimes () |
| Print the profiler times (in millseconds). More...
|
|
|
| superResNet () |
|
| tensorNet () |
| Constructor. More...
|
|
bool | ProfileModel (const std::string &deployFile, const std::string &modelFile, const char *input, const Dims3 &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, std::ostream &modelStream) |
| Create and output an optimized network model. More...
|
|
void | PROFILER_BEGIN (profilerQuery query) |
| Begin a profiling query, before network is run. More...
|
|
void | PROFILER_END (profilerQuery query) |
| End a profiling query, after the network is run. More...
|
|
bool | PROFILER_QUERY (profilerQuery query) |
| Query the CUDA part of a profiler query. More...
|
|
Super Resolution Network.
- Note
- superResNet is only supported with TensorRT 5.0 and newer, as it uses ONNX models and requires ONNX import support in TensorRT.