Back to Jetson Inference

Jetson Inference: Member List

docs/html/classposeNet-members.html

latest23.5 KB
Original Source

| | Jetson Inference

DNN Vision Library |

poseNet Member List

This is the complete list of members for poseNet, including all inherited members.

| AllowGPUFallback() const | tensorNet | inline | | CMAP_WINDOW_SIZE | poseNet | protectedstatic | | ConfigureBuilder(nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator) | tensorNet | protected | | Create(const char *network="resnet18-body", float threshold=POSENET_DEFAULT_THRESHOLD, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | poseNet | static | | Create(const char *model_path, const char *topology, const char *colors, float threshold=POSENET_DEFAULT_THRESHOLD, const char *input=POSENET_DEFAULT_INPUT, const char *cmap=POSENET_DEFAULT_CMAP, const char *paf=POSENET_DEFAULT_PAF, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | poseNet | static | | Create(int argc, char **argv) | poseNet | static | | Create(const commandLine &cmdLine) | poseNet | static | | CreateStream(bool nonBlocking=true) | tensorNet | | | DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type) | tensorNet | static | | DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU) | tensorNet | static | | DetectNativePrecisions(deviceType device=DEVICE_GPU) | tensorNet | static | | EnableDebug() | tensorNet | | | EnableLayerProfiler() | tensorNet | | | FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static | | FindKeypointID(const char *name) const | poseNet | inline | | GenerateColor(uint32_t classID, float alpha=255.0f) | tensorNet | static | | GetCategory() const | poseNet | inline | | GetDevice() const | tensorNet | inline | | GetInputDims(uint32_t layer=0) const | tensorNet | inline | | GetInputHeight(uint32_t layer=0) const | tensorNet | inline | | GetInputLayers() const | tensorNet | inline | | GetInputPtr(uint32_t layer=0) const | tensorNet | inline | | GetInputSize(uint32_t layer=0) const | tensorNet | inline | | GetInputWidth(uint32_t layer=0) const | tensorNet | inline | | GetKeypointColor(uint32_t index) const | poseNet | inline | | GetKeypointName(uint32_t index) const | poseNet | inline | | GetKeypointScale() const | poseNet | inline | | GetLinkScale() const | poseNet | inline | | GetModelFilename() const | tensorNet | inline | | GetModelPath() const | tensorNet | inline | | GetModelType() const | tensorNet | inline | | GetNetworkFPS() | tensorNet | inline | | GetNetworkName() const | tensorNet | inline | | GetNetworkTime() | tensorNet | inline | | GetNumKeypoints() const | poseNet | inline | | GetOutputDims(uint32_t layer=0) const | tensorNet | inline | | GetOutputHeight(uint32_t layer=0) const | tensorNet | inline | | GetOutputLayers() const | tensorNet | inline | | GetOutputPtr(uint32_t layer=0) const | tensorNet | inline | | GetOutputSize(uint32_t layer=0) const | tensorNet | inline | | GetOutputWidth(uint32_t layer=0) const | tensorNet | inline | | GetPrecision() const | tensorNet | inline | | GetProfilerTime(profilerQuery query) | tensorNet | inline | | GetProfilerTime(profilerQuery query, profilerDevice device) | tensorNet | inline | | GetPrototxtPath() const | tensorNet | inline | | GetStream() const | tensorNet | inline | | GetThreshold() const | poseNet | inline | | gLogger | tensorNet | protected | | gProfiler | tensorNet | protected | | init(const char *model_path, const char *topology, const char *colors, float threshold, const char *input, const char *cmap, const char *paf, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback) | poseNet | protected | | IsModelType(modelType type) const | tensorNet | inline | | IsPrecision(precisionType type) const | tensorNet | inline | | LoadClassColors(const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static | | LoadClassColors(const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static | | LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1) | tensorNet | static | | LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1) | tensorNet | static | | LoadEngine(const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | | | LoadEngine(char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | | | LoadEngine(nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | | | LoadEngine(const char *filename, char **stream, size_t *size) | tensorNet | | | loadKeypointColors(const char *filename) | poseNet | protected | | LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | loadTopology(const char *json_path, Topology *topology) | poseNet | protected | | mAllowGPUFallback | tensorNet | protected | | mAssignmentWorkspace | poseNet | protected | | MAX_LINKS | poseNet | protectedstatic | | MAX_OBJECTS | poseNet | protectedstatic | | mBindings | tensorNet | protected | | mCacheCalibrationPath | tensorNet | protected | | mCacheEnginePath | tensorNet | protected | | mChecksumPath | tensorNet | protected | | mConnections | poseNet | protected | | mConnectionWorkspace | poseNet | protected | | mContext | tensorNet | protected | | mDevice | tensorNet | protected | | mEnableDebug | tensorNet | protected | | mEnableProfiler | tensorNet | protected | | mEngine | tensorNet | protected | | mEventsCPU | tensorNet | protected | | mEventsGPU | tensorNet | protected | | mInfer | tensorNet | protected | | mInputs | tensorNet | protected | | mKeypointColors | poseNet | protected | | mKeypointScale | poseNet | protected | | mLinkScale | poseNet | protected | | mMaxBatchSize | tensorNet | protected | | mMeanPath | tensorNet | protected | | mModelFile | tensorNet | protected | | mModelPath | tensorNet | protected | | mModelType | tensorNet | protected | | mNumObjects | poseNet | protected | | mObjects | poseNet | protected | | mOutputs | tensorNet | protected | | mPeakCounts | poseNet | protected | | mPeaks | poseNet | protected | | mPrecision | tensorNet | protected | | mProfilerQueriesDone | tensorNet | protected | | mProfilerQueriesUsed | tensorNet | protected | | mProfilerTimes | tensorNet | protected | | mPrototxtPath | tensorNet | protected | | mRefinedPeaks | poseNet | protected | | mScoreGraph | poseNet | protected | | mStream | tensorNet | protected | | mThreshold | poseNet | protected | | mTopology | poseNet | protected | | mWorkspaceSize | tensorNet | protected | | Overlay(T *input, T *output, uint32_t width, uint32_t height, const std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | poseNet | inline | | Overlay(void *input, void *output, uint32_t width, uint32_t height, imageFormat format, const std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | poseNet | | | OVERLAY_BOX enum value | poseNet | | | OVERLAY_DEFAULT enum value | poseNet | | | OVERLAY_KEYPOINTS enum value | poseNet | | | OVERLAY_LINKS enum value | poseNet | | | OVERLAY_NONE enum value | poseNet | | | OverlayFlags enum name | poseNet | | | OverlayFlagsFromStr(const char *flags) | poseNet | static | | PAF_INTEGRAL_SAMPLES | poseNet | protectedstatic | | poseNet() | poseNet | protected | | postProcess(std::vector< ObjectPose > &poses, uint32_t width, uint32_t height) | poseNet | protected | | PrintProfilerTimes() | tensorNet | inline | | Process(T *image, uint32_t width, uint32_t height, std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | poseNet | inline | | Process(void *image, uint32_t width, uint32_t height, imageFormat format, std::vector< ObjectPose > &poses, uint32_t overlay=OVERLAY_DEFAULT) | poseNet | | | Process(T *image, uint32_t width, uint32_t height, uint32_t overlay=OVERLAY_DEFAULT) | poseNet | inline | | Process(void *image, uint32_t width, uint32_t height, imageFormat format, uint32_t overlay=OVERLAY_DEFAULT) | poseNet | | | ProcessNetwork(bool sync=true) | tensorNet | protected | | ProfileModel(const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize) | tensorNet | protected | | PROFILER_BEGIN(profilerQuery query) | tensorNet | inlineprotected | | PROFILER_END(profilerQuery query) | tensorNet | inlineprotected | | PROFILER_QUERY(profilerQuery query) | tensorNet | inlineprotected | | SelectPrecision(precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static | | SetKeypointAlpha(uint32_t index, float alpha) | poseNet | inline | | SetKeypointAlpha(float alpha) | poseNet | inline | | SetKeypointColor(uint32_t index, const float4 &color) | poseNet | inline | | SetKeypointScale(float scale) | poseNet | inline | | SetLinkScale(float scale) | poseNet | inline | | SetStream(cudaStream_t stream) | tensorNet | | | SetThreshold(float threshold) | poseNet | inline | | tensorNet() | tensorNet | protected | | Usage() | poseNet | inlinestatic | | ValidateEngine(const char *model_path, const char *cache_path, const char *checksum_path) | tensorNet | protected | | ~poseNet() | poseNet | virtual | | ~tensorNet() | tensorNet | virtual |

  • Generated on Fri Mar 17 2023 14:29:30 for Jetson Inference by 1.8.17