| 
| virtual  | ~poseNet () | 
|   | Destory.  More...
  | 
|   | 
| template<typename T >  | 
| bool  | Process (T *image, uint32_t width, uint32_t height, std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | 
|   | Perform pose estimation on the given image, returning object poses, and overlay the results.  More...
  | 
|   | 
| bool  | Process (void *image, uint32_t width, uint32_t height, imageFormat format, std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | 
|   | Perform pose estimation on the given image, and overlay the results.  More...
  | 
|   | 
| template<typename T >  | 
| bool  | Process (T *image, uint32_t width, uint32_t height, uint32_t overlay=OVERLAY_DEFAULT) | 
|   | Perform pose estimation on the given image, and overlay the results.  More...
  | 
|   | 
| bool  | Process (void *image, uint32_t width, uint32_t height, imageFormat format, uint32_t overlay=OVERLAY_DEFAULT) | 
|   | Perform pose estimation on the given image, and overlay the results.  More...
  | 
|   | 
| template<typename T >  | 
| bool  | Overlay (T *input, T *output, uint32_t width, uint32_t height, const std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | 
|   | Overlay the results on the image.  More...
  | 
|   | 
| bool  | Overlay (void *input, void *output, uint32_t width, uint32_t height, imageFormat format, const std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | 
|   | Overlay the results on the image.  More...
  | 
|   | 
| float  | GetThreshold () const | 
|   | Retrieve the minimum confidence threshold.  More...
  | 
|   | 
| void  | SetThreshold (float threshold) | 
|   | Set the minimum confidence threshold.  More...
  | 
|   | 
| const char *  | GetCategory () const | 
|   | Get the category of objects that are detected (e.g.  More...
  | 
|   | 
| uint32_t  | GetNumKeypoints () const | 
|   | Get the number of keypoints in the topology.  More...
  | 
|   | 
| const char *  | GetKeypointName (uint32_t index) const | 
|   | Get the name of a keypoint in the topology by it's ID.  More...
  | 
|   | 
| int  | FindKeypointID (const char *name) const | 
|   | Find the ID of a keypoint by name, or return -1 if not found.  More...
  | 
|   | 
| float4  | GetKeypointColor (uint32_t index) const | 
|   | Get the overlay color of a keypoint.  More...
  | 
|   | 
| void  | SetKeypointColor (uint32_t index, const float4 &color) | 
|   | Set the overlay color for a keypoint.  More...
  | 
|   | 
| void  | SetKeypointAlpha (uint32_t index, float alpha) | 
|   | Set the alpha channel for a keypoint color (between 0-255).  More...
  | 
|   | 
| void  | SetKeypointAlpha (float alpha) | 
|   | Set the alpha channel for all keypoints colors used during overlay.  More...
  | 
|   | 
| float  | GetKeypointScale () const | 
|   | Get the scale used to calculate the radius of keypoints relative to input image dimensions.  More...
  | 
|   | 
| void  | SetKeypointScale (float scale) | 
|   | Set the scale used to calculate the radius of keypoint circles.  More...
  | 
|   | 
| float  | GetLinkScale () const | 
|   | Get the scale used to calculate the width of link lines relative to input image dimensions.  More...
  | 
|   | 
| void  | SetLinkScale (float scale) | 
|   | Set the scale used to calculate the width of link lines.  More...
  | 
|   | 
| virtual  | ~tensorNet () | 
|   | Destory.  More...
  | 
|   | 
| bool  | LoadNetwork (const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | 
|   | Load a new network instance.  More...
  | 
|   | 
| bool  | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | 
|   | Load a new network instance with multiple output layers.  More...
  | 
|   | 
| bool  | LoadNetwork (const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | 
|   | Load a new network instance with multiple input layers.  More...
  | 
|   | 
| bool  | LoadNetwork (const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | 
|   | Load a new network instance (this variant is used for UFF models)  More...
  | 
|   | 
| bool  | LoadNetwork (const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | 
|   | Load a new network instance with multiple input layers (used for UFF models)  More...
  | 
|   | 
| bool  | LoadEngine (const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | 
|   | Load a network instance from a serialized engine plan file.  More...
  | 
|   | 
| bool  | LoadEngine (char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | 
|   | Load a network instance from a serialized engine plan file.  More...
  | 
|   | 
| bool  | LoadEngine (nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | 
|   | Load network resources from an existing TensorRT engine instance.  More...
  | 
|   | 
| bool  | LoadEngine (const char *filename, char **stream, size_t *size) | 
|   | Load a serialized engine plan file into memory.  More...
  | 
|   | 
| void  | EnableLayerProfiler () | 
|   | Manually enable layer profiling times.  More...
  | 
|   | 
| void  | EnableDebug () | 
|   | Manually enable debug messages and synchronization.  More...
  | 
|   | 
| bool  | AllowGPUFallback () const | 
|   | Return true if GPU fallback is enabled.  More...
  | 
|   | 
| deviceType  | GetDevice () const | 
|   | Retrieve the device being used for execution.  More...
  | 
|   | 
| precisionType  | GetPrecision () const | 
|   | Retrieve the type of precision being used.  More...
  | 
|   | 
| bool  | IsPrecision (precisionType type) const | 
|   | Check if a particular precision is being used.  More...
  | 
|   | 
| cudaStream_t  | GetStream () const | 
|   | Retrieve the stream that the device is operating on.  More...
  | 
|   | 
| cudaStream_t  | CreateStream (bool nonBlocking=true) | 
|   | Create and use a new stream for execution.  More...
  | 
|   | 
| void  | SetStream (cudaStream_t stream) | 
|   | Set the stream that the device is operating on.  More...
  | 
|   | 
| const char *  | GetPrototxtPath () const | 
|   | Retrieve the path to the network prototxt file.  More...
  | 
|   | 
| const char *  | GetModelPath () const | 
|   | Retrieve the full path to model file, including the filename.  More...
  | 
|   | 
| const char *  | GetModelFilename () const | 
|   | Retrieve the filename of the file, excluding the directory.  More...
  | 
|   | 
| modelType  | GetModelType () const | 
|   | Retrieve the format of the network model.  More...
  | 
|   | 
| bool  | IsModelType (modelType type) const | 
|   | Return true if the model is of the specified format.  More...
  | 
|   | 
| uint32_t  | GetInputLayers () const | 
|   | Retrieve the number of input layers to the network.  More...
  | 
|   | 
| uint32_t  | GetOutputLayers () const | 
|   | Retrieve the number of output layers to the network.  More...
  | 
|   | 
| Dims3  | GetInputDims (uint32_t layer=0) const | 
|   | Retrieve the dimensions of network input layer.  More...
  | 
|   | 
| uint32_t  | GetInputWidth (uint32_t layer=0) const | 
|   | Retrieve the width of network input layer.  More...
  | 
|   | 
| uint32_t  | GetInputHeight (uint32_t layer=0) const | 
|   | Retrieve the height of network input layer.  More...
  | 
|   | 
| uint32_t  | GetInputSize (uint32_t layer=0) const | 
|   | Retrieve the size (in bytes) of network input layer.  More...
  | 
|   | 
| float *  | GetInputPtr (uint32_t layer=0) const | 
|   | Get the CUDA pointer to the input layer's memory.  More...
  | 
|   | 
| Dims3  | GetOutputDims (uint32_t layer=0) const | 
|   | Retrieve the dimensions of network output layer.  More...
  | 
|   | 
| uint32_t  | GetOutputWidth (uint32_t layer=0) const | 
|   | Retrieve the width of network output layer.  More...
  | 
|   | 
| uint32_t  | GetOutputHeight (uint32_t layer=0) const | 
|   | Retrieve the height of network output layer.  More...
  | 
|   | 
| uint32_t  | GetOutputSize (uint32_t layer=0) const | 
|   | Retrieve the size (in bytes) of network output layer.  More...
  | 
|   | 
| float *  | GetOutputPtr (uint32_t layer=0) const | 
|   | Get the CUDA pointer to the output memory.  More...
  | 
|   | 
| float  | GetNetworkFPS () | 
|   | Retrieve the network frames per second (FPS).  More...
  | 
|   | 
| float  | GetNetworkTime () | 
|   | Retrieve the network runtime (in milliseconds).  More...
  | 
|   | 
| const char *  | GetNetworkName () const | 
|   | Retrieve the network name (it's filename).  More...
  | 
|   | 
| float2  | GetProfilerTime (profilerQuery query) | 
|   | Retrieve the profiler runtime (in milliseconds).  More...
  | 
|   | 
| float  | GetProfilerTime (profilerQuery query, profilerDevice device) | 
|   | Retrieve the profiler runtime (in milliseconds).  More...
  | 
|   | 
| void  | PrintProfilerTimes () | 
|   | Print the profiler times (in millseconds).  More...
  | 
|   | 
 | 
| static uint32_t  | OverlayFlagsFromStr (const char *flags) | 
|   | Parse a string sequence into OverlayFlags enum.  More...
  | 
|   | 
| static poseNet *  | Create (const char *network="resnet18-body", float threshold=POSENET_DEFAULT_THRESHOLD, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | 
|   | Load a pre-trained model.  More...
  | 
|   | 
| static poseNet *  | Create (const char *model_path, const char *topology, const char *colors, float threshold=POSENET_DEFAULT_THRESHOLD, const char *input=POSENET_DEFAULT_INPUT, const char *cmap=POSENET_DEFAULT_CMAP, const char *paf=POSENET_DEFAULT_PAF, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | 
|   | Load a custom network instance.  More...
  | 
|   | 
| static poseNet *  | Create (int argc, char **argv) | 
|   | Load a new network instance by parsing the command line.  More...
  | 
|   | 
| static poseNet *  | Create (const commandLine &cmdLine) | 
|   | Load a new network instance by parsing the command line.  More...
  | 
|   | 
| static const char *  | Usage () | 
|   | Usage string for command line arguments to Create()  More...
  | 
|   | 
| static bool  | LoadClassLabels (const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1) | 
|   | Load class descriptions from a label file.  More...
  | 
|   | 
| static bool  | LoadClassLabels (const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1) | 
|   | Load class descriptions and synset strings from a label file.  More...
  | 
|   | 
| static bool  | LoadClassColors (const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f) | 
|   | Load class colors from a text file.  More...
  | 
|   | 
| static bool  | LoadClassColors (const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f) | 
|   | Load class colors from a text file.  More...
  | 
|   | 
| static float4  | GenerateColor (uint32_t classID, float alpha=255.0f) | 
|   | Procedurally generate a color for a given class index with the specified alpha value.  More...
  | 
|   | 
| static precisionType  | SelectPrecision (precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true) | 
|   | Resolve a desired precision to a specific one that's available.  More...
  | 
|   | 
| static precisionType  | FindFastestPrecision (deviceType device=DEVICE_GPU, bool allowInt8=true) | 
|   | Determine the fastest native precision on a device.  More...
  | 
|   | 
| static std::vector< precisionType >  | DetectNativePrecisions (deviceType device=DEVICE_GPU) | 
|   | Detect the precisions supported natively on a device.  More...
  | 
|   | 
| static bool  | DetectNativePrecision (const std::vector< precisionType > &nativeTypes, precisionType type) | 
|   | Detect if a particular precision is supported natively.  More...
  | 
|   | 
| static bool  | DetectNativePrecision (precisionType precision, deviceType device=DEVICE_GPU) | 
|   | Detect if a particular precision is supported natively.  More...
  | 
|   | 
 | 
|   | poseNet () | 
|   | 
| bool  | init (const char *model_path, const char *topology, const char *colors, float threshold, const char *input, const char *cmap, const char *paf, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) | 
|   | 
| bool  | postProcess (std::vector< ObjectPose > &poses, uint32_t width, uint32_t height) | 
|   | 
| bool  | loadTopology (const char *json_path, Topology *topology) | 
|   | 
| bool  | loadKeypointColors (const char *filename) | 
|   | 
|   | tensorNet () | 
|   | Constructor.  More...
  | 
|   | 
| bool  | ProcessNetwork (bool sync=true) | 
|   | Execute processing of the network.  More...
  | 
|   | 
| bool  | ProfileModel (const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize) | 
|   | Create and output an optimized network model.  More...
  | 
|   | 
| bool  | ConfigureBuilder (nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator) | 
|   | Configure builder options.  More...
  | 
|   | 
| bool  | ValidateEngine (const char *model_path, const char *cache_path, const char *checksum_path) | 
|   | Validate that the model already has a built TensorRT engine that exists and doesn't need updating.  More...
  | 
|   | 
| void  | PROFILER_BEGIN (profilerQuery query) | 
|   | Begin a profiling query, before network is run.  More...
  | 
|   | 
| void  | PROFILER_END (profilerQuery query) | 
|   | End a profiling query, after the network is run.  More...
  | 
|   | 
| bool  | PROFILER_QUERY (profilerQuery query) | 
|   | Query the CUDA part of a profiler query.  More...
  | 
|   | 
Pose estimation models with TensorRT support.