Back to Jetson Inference

Jetson Inference: jetson

docs/html/actionNet_8h_source.html

latest17.8 KB
Original Source

| | Jetson Inference

DNN Vision Library |

actionNet.h

Go to the documentation of this file.

1 /*

2 * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.

3 *

4 * Permission is hereby granted, free of charge, to any person obtaining a

5 * copy of this software and associated documentation files (the "Software"),

6 * to deal in the Software without restriction, including without limitation

7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,

8 * and/or sell copies of the Software, and to permit persons to whom the

9 * Software is furnished to do so, subject to the following conditions:

10 *

11 * The above copyright notice and this permission notice shall be included in

12 * all copies or substantial portions of the Software.

13 *

14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL

17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING

19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER

20 * DEALINGS IN THE SOFTWARE.

21 */

22

23 #ifndef __ACTION_NET_H__

24 #define __ACTION_NET_H__

25

26

27 #include "tensorNet.h"

28

29

34 #define ACTIONNET_DEFAULT_INPUT "input"

35

40 #define ACTIONNET_DEFAULT_OUTPUT "output"

41

46 #define ACTIONNET_MODEL_TYPE "action"

47

52 #define ACTIONNET_USAGE_STRING "actionNet arguments: \n" \

53 " --network=NETWORK pre-trained model to load, one of the following:\n" \

54 " * resnet-18 (default)\n" \

55 " * resnet-34\n" \

56 " --model=MODEL path to custom model to load (.onnx)\n" \

57 " --labels=LABELS path to text file containing the labels for each class\n" \

58 " --input-blob=INPUT name of the input layer (default is '" ACTIONNET_DEFAULT_INPUT "')\n" \

59 " --output-blob=OUTPUT name of the output layer (default is '" ACTIONNET_DEFAULT_OUTPUT "')\n" \

60 " --threshold=CONF minimum confidence threshold for classification (default is 0.01)\n" \

61 " --skip-frames=SKIP how many frames to skip between classifications (default is 1)\n" \

62 " --profile enable layer profiling in TensorRT\n\n"

63

64

69 class actionNet : public tensorNet

70 {

71 public:

75static actionNet* Create( const char* network="resnet-18", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE,

76precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU,

77bool allowGPUFallback=true );

78

89static actionNet* Create( const char* model_path, const char* class_labels,

90const char* input=ACTIONNET_DEFAULT_INPUT,

91const char* output=ACTIONNET_DEFAULT_OUTPUT,

92 uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE,

93precisionType precision=TYPE_FASTEST,

94deviceType device=DEVICE_GPU, bool allowGPUFallback=true );

95

99static actionNet* Create( int argc, char** argv );

100

104static actionNet* Create( const commandLine& cmdLine );

105

109static inline const char* Usage() { return ACTIONNET_USAGE_STRING; }

110

114virtual ~actionNet();

115

130template<typename T> int Classify( T* image, uint32_t width, uint32_t height, float* confidence=NULL ) { return Classify((void*)image, width, height, imageFormatFromType<T>(), confidence); }

131

146int Classify( void* image, uint32_t width, uint32_t height, imageFormat format, float* confidence=NULL );

147

151inline uint32_t GetNumClasses() const { return mNumClasses; }

152

156inline const char* GetClassLabel( int index ) const { return GetClassDesc(index); }

157

161inline const char* GetClassDesc( int index ) const { return index >= 0 ? mClassDesc[index].c_str() : "none"; }

162

166inline const char* GetClassPath() const { return mClassPath.c_str(); }

167

171inline float GetThreshold() const { return mThreshold; }

172

178inline void SetThreshold( float threshold ) { mThreshold = threshold; }

179

184inline uint32_t GetSkipFrames() const { return mSkipFrames; }

185

195inline void SetSkipFrames( uint32_t frames ) { mSkipFrames = frames; }

196

197 protected:

198actionNet();

199

200bool init( const char* model_path, const char* class_path, const char* input, const char* output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback );

201bool preProcess( void* image, uint32_t width, uint32_t height, imageFormat format );

202

203float* mInputBuffers[2];

204

205 uint32_t mNumClasses;

206 uint32_t mNumFrames; // number of frames fed into the model

207 uint32_t mSkipFrames; // number of frames to skip when processing

208 uint32_t mFramesSkipped; // frame skip counter

209

210 uint32_t mCurrentInputBuffer;

211 uint32_t mCurrentFrameIndex;

212

213float mThreshold;

214float mLastConfidence;

215intmLastClassification;

216

217 std::vector<std::string> mClassDesc;

218

219 std::string mClassPath;

220 };

221

222

223 #endif

actionNet::GetClassLabel

const char * GetClassLabel(int index) const

Retrieve the description of a particular class.

Definition: actionNet.h:156

actionNet::preProcess

bool preProcess(void *image, uint32_t width, uint32_t height, imageFormat format)

actionNet::GetSkipFrames

uint32_t GetSkipFrames() const

Return the number of frames that are skipped in between classifications.

Definition: actionNet.h:184

actionNet::mClassPath

std::string mClassPath

Definition: actionNet.h:219

actionNet::mInputBuffers

float * mInputBuffers[2]

Definition: actionNet.h:203

actionNet::mLastClassification

int mLastClassification

Definition: actionNet.h:215

actionNet::GetThreshold

float GetThreshold() const

Return the confidence threshold used for classification.

Definition: actionNet.h:171

actionNet::mCurrentInputBuffer

uint32_t mCurrentInputBuffer

Definition: actionNet.h:210

ACTIONNET_DEFAULT_INPUT

#define ACTIONNET_DEFAULT_INPUT

Name of default input blob for actionNet model.

Definition: actionNet.h:34

actionNet::mCurrentFrameIndex

uint32_t mCurrentFrameIndex

Definition: actionNet.h:211

actionNet::mThreshold

float mThreshold

Definition: actionNet.h:213

actionNet

Action/activity classification on a sequence of images or video, using TensorRT.

Definition: actionNet.h:69

actionNet::mLastConfidence

float mLastConfidence

Definition: actionNet.h:214

actionNet::SetThreshold

void SetThreshold(float threshold)

Set the confidence threshold used for classification.

Definition: actionNet.h:178

ACTIONNET_USAGE_STRING

#define ACTIONNET_USAGE_STRING

Standard command-line options able to be passed to actionNet::Create()

Definition: actionNet.h:52

ACTIONNET_DEFAULT_OUTPUT

#define ACTIONNET_DEFAULT_OUTPUT

Name of default output confidence values for actionNet model.

Definition: actionNet.h:40

DEVICE_GPU

@ DEVICE_GPU

GPU (if multiple GPUs are present, a specific GPU can be selected with cudaSetDevice()

Definition: tensorNet.h:131

actionNet::~actionNet

virtual ~actionNet()

Destroy.

actionNet::mNumClasses

uint32_t mNumClasses

Definition: actionNet.h:205

deviceType

deviceType

Enumeration for indicating the desired device that the network should run on, if available in hardwar...

Definition: tensorNet.h:129

actionNet::GetClassDesc

const char * GetClassDesc(int index) const

Retrieve the description of a particular class.

Definition: actionNet.h:161

tensorNet.h

actionNet::Usage

static const char * Usage()

Usage string for command line arguments to Create()

Definition: actionNet.h:109

TYPE_FASTEST

@ TYPE_FASTEST

The fastest detected precision should be use (i.e.

Definition: tensorNet.h:105

actionNet::mNumFrames

uint32_t mNumFrames

Definition: actionNet.h:206

actionNet::init

bool init(const char *model_path, const char *class_path, const char *input, const char *output, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback)

actionNet::GetClassPath

const char * GetClassPath() const

Retrieve the path to the file containing the class descriptions.

Definition: actionNet.h:166

actionNet::actionNet

actionNet()

precisionType

precisionType

Enumeration for indicating the desired precision that the network should run in, if available in hard...

Definition: tensorNet.h:102

actionNet::Classify

int Classify(T *image, uint32_t width, uint32_t height, float *confidence=NULL)

Append an image to the sequence and classify the action, returning the index of the top class.

Definition: actionNet.h:130

actionNet::Create

static actionNet * Create(const char *network="resnet-18", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true)

Load a pre-trained model, either "resnet-18" or "resnet-34".

actionNet::mFramesSkipped

uint32_t mFramesSkipped

Definition: actionNet.h:208

tensorNet

Abstract class for loading a tensor network with TensorRT.

Definition: tensorNet.h:218

actionNet::mClassDesc

std::vector< std::string > mClassDesc

Definition: actionNet.h:217

DEFAULT_MAX_BATCH_SIZE

#define DEFAULT_MAX_BATCH_SIZE

Default maximum batch size.

Definition: tensorNet.h:88

actionNet::GetNumClasses

uint32_t GetNumClasses() const

Retrieve the number of image recognition classes.

Definition: actionNet.h:151

commandLine

Command line parser for extracting flags, values, and strings.

Definition: commandLine.h:35

actionNet::SetSkipFrames

void SetSkipFrames(uint32_t frames)

Set the number of frames that are skipped in between classifications.

Definition: actionNet.h:195

imageFormat

imageFormat

The imageFormat enum is used to identify the pixel format and colorspace of an image.

Definition: imageFormat.h:49

actionNet::mSkipFrames

uint32_t mSkipFrames

Definition: actionNet.h:207