Jetson Inference
DNN Vision Library
poseNet Member List

This is the complete list of members for poseNet, including all inherited members.

AllowGPUFallback() consttensorNetinline
CMAP_WINDOW_SIZEposeNetprotectedstatic
ConfigureBuilder(nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator)tensorNetprotected
Create(const char *network="resnet18-body", float threshold=POSENET_DEFAULT_THRESHOLD, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)poseNetstatic
Create(const char *model_path, const char *topology, const char *colors, float threshold=POSENET_DEFAULT_THRESHOLD, const char *input=POSENET_DEFAULT_INPUT, const char *cmap=POSENET_DEFAULT_CMAP, const char *paf=POSENET_DEFAULT_PAF, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)poseNetstatic
Create(int argc, char **argv)poseNetstatic
Create(const commandLine &cmdLine)poseNetstatic
CreateStream(bool nonBlocking=true)tensorNet
DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type)tensorNetstatic
DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU)tensorNetstatic
DetectNativePrecisions(deviceType device=DEVICE_GPU)tensorNetstatic
EnableDebug()tensorNet
EnableLayerProfiler()tensorNet
FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
FindKeypointID(const char *name) constposeNetinline
GenerateColor(uint32_t classID, float alpha=255.0f)tensorNetstatic
GetCategory() constposeNetinline
GetDevice() consttensorNetinline
GetInputDims(uint32_t layer=0) consttensorNetinline
GetInputHeight(uint32_t layer=0) consttensorNetinline
GetInputLayers() consttensorNetinline
GetInputPtr(uint32_t layer=0) consttensorNetinline
GetInputSize(uint32_t layer=0) consttensorNetinline
GetInputWidth(uint32_t layer=0) consttensorNetinline
GetKeypointColor(uint32_t index) constposeNetinline
GetKeypointName(uint32_t index) constposeNetinline
GetKeypointScale() constposeNetinline
GetLinkScale() constposeNetinline
GetModelFilename() consttensorNetinline
GetModelPath() consttensorNetinline
GetModelType() consttensorNetinline
GetNetworkFPS()tensorNetinline
GetNetworkName() consttensorNetinline
GetNetworkTime()tensorNetinline
GetNumKeypoints() constposeNetinline
GetOutputDims(uint32_t layer=0) consttensorNetinline
GetOutputHeight(uint32_t layer=0) consttensorNetinline
GetOutputLayers() consttensorNetinline
GetOutputPtr(uint32_t layer=0) consttensorNetinline
GetOutputSize(uint32_t layer=0) consttensorNetinline
GetOutputWidth(uint32_t layer=0) consttensorNetinline
GetPrecision() consttensorNetinline
GetProfilerTime(profilerQuery query)tensorNetinline
GetProfilerTime(profilerQuery query, profilerDevice device)tensorNetinline
GetPrototxtPath() consttensorNetinline
GetStream() consttensorNetinline
GetThreshold() constposeNetinline
gLoggertensorNetprotected
gProfilertensorNetprotected
init(const char *model_path, const char *topology, const char *colors, float threshold, const char *input, const char *cmap, const char *paf, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)poseNetprotected
IsModelType(modelType type) consttensorNetinline
IsPrecision(precisionType type) consttensorNetinline
LoadClassColors(const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f)tensorNetstatic
LoadClassColors(const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f)tensorNetstatic
LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1)tensorNetstatic
LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1)tensorNetstatic
LoadEngine(const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL)tensorNet
LoadEngine(const char *filename, char **stream, size_t *size)tensorNet
loadKeypointColors(const char *filename)poseNetprotected
LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL)tensorNet
loadTopology(const char *json_path, Topology *topology)poseNetprotected
mAllowGPUFallbacktensorNetprotected
mAssignmentWorkspaceposeNetprotected
MAX_LINKSposeNetprotectedstatic
MAX_OBJECTSposeNetprotectedstatic
mBindingstensorNetprotected
mCacheCalibrationPathtensorNetprotected
mCacheEnginePathtensorNetprotected
mChecksumPathtensorNetprotected
mConnectionsposeNetprotected
mConnectionWorkspaceposeNetprotected
mContexttensorNetprotected
mDevicetensorNetprotected
mEnableDebugtensorNetprotected
mEnableProfilertensorNetprotected
mEnginetensorNetprotected
mEventsCPUtensorNetprotected
mEventsGPUtensorNetprotected
mInfertensorNetprotected
mInputstensorNetprotected
mKeypointColorsposeNetprotected
mKeypointScaleposeNetprotected
mLinkScaleposeNetprotected
mMaxBatchSizetensorNetprotected
mMeanPathtensorNetprotected
mModelFiletensorNetprotected
mModelPathtensorNetprotected
mModelTypetensorNetprotected
mNumObjectsposeNetprotected
mObjectsposeNetprotected
mOutputstensorNetprotected
mPeakCountsposeNetprotected
mPeaksposeNetprotected
mPrecisiontensorNetprotected
mProfilerQueriesDonetensorNetprotected
mProfilerQueriesUsedtensorNetprotected
mProfilerTimestensorNetprotected
mPrototxtPathtensorNetprotected
mRefinedPeaksposeNetprotected
mScoreGraphposeNetprotected
mStreamtensorNetprotected
mThresholdposeNetprotected
mTopologyposeNetprotected
mWorkspaceSizetensorNetprotected
Overlay(T *input, T *output, uint32_t width, uint32_t height, const std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT)poseNetinline
Overlay(void *input, void *output, uint32_t width, uint32_t height, imageFormat format, const std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT)poseNet
OVERLAY_BOX enum valueposeNet
OVERLAY_DEFAULT enum valueposeNet
OVERLAY_KEYPOINTS enum valueposeNet
OVERLAY_LINKS enum valueposeNet
OVERLAY_NONE enum valueposeNet
OverlayFlags enum nameposeNet
OverlayFlagsFromStr(const char *flags)poseNetstatic
PAF_INTEGRAL_SAMPLESposeNetprotectedstatic
poseNet()poseNetprotected
postProcess(std::vector< ObjectPose > &poses, uint32_t width, uint32_t height)poseNetprotected
PrintProfilerTimes()tensorNetinline
Process(T *image, uint32_t width, uint32_t height, std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT)poseNetinline
Process(void *image, uint32_t width, uint32_t height, imageFormat format, std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT)poseNet
Process(T *image, uint32_t width, uint32_t height, uint32_t overlay=OVERLAY_DEFAULT)poseNetinline
Process(void *image, uint32_t width, uint32_t height, imageFormat format, uint32_t overlay=OVERLAY_DEFAULT)poseNet
ProcessNetwork(bool sync=true)tensorNetprotected
ProfileModel(const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize)tensorNetprotected
PROFILER_BEGIN(profilerQuery query)tensorNetinlineprotected
PROFILER_END(profilerQuery query)tensorNetinlineprotected
PROFILER_QUERY(profilerQuery query)tensorNetinlineprotected
SelectPrecision(precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true)tensorNetstatic
SetKeypointAlpha(uint32_t index, float alpha)poseNetinline
SetKeypointAlpha(float alpha)poseNetinline
SetKeypointColor(uint32_t index, const float4 &color)poseNetinline
SetKeypointScale(float scale)poseNetinline
SetLinkScale(float scale)poseNetinline
SetStream(cudaStream_t stream)tensorNet
SetThreshold(float threshold)poseNetinline
tensorNet()tensorNetprotected
Usage()poseNetinlinestatic
ValidateEngine(const char *model_path, const char *cache_path, const char *checksum_path)tensorNetprotected
~poseNet()poseNetvirtual
~tensorNet()tensorNetvirtual