Back to Jetson Inference

Jetson Inference: Member List

docs/html/classdepthNet-members.html

latest21.8 KB
Original Source

| | Jetson Inference

DNN Vision Library |

depthNet Member List

This is the complete list of members for depthNet, including all inherited members.

| allocHistogramBuffers() | depthNet | protected | | AllowGPUFallback() const | tensorNet | inline | | ConfigureBuilder(nvinfer1::IBuilder *builder, uint32_t maxBatchSize, uint32_t workspaceSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator) | tensorNet | protected | | Create(const char *network="fcn-mobilenet", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | depthNet | static | | Create(const char *model_path, const char *input=DEPTHNET_DEFAULT_INPUT, const char *output=DEPTHNET_DEFAULT_OUTPUT, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | depthNet | static | | Create(const char *model_path, const char *input, const Dims3 &inputDims, const char *output, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true) | depthNet | static | | Create(int argc, char **argv) | depthNet | static | | Create(const commandLine &cmdLine) | depthNet | static | | CreateStream(bool nonBlocking=true) | tensorNet | | | depthNet() | depthNet | protected | | DetectNativePrecision(const std::vector< precisionType > &nativeTypes, precisionType type) | tensorNet | static | | DetectNativePrecision(precisionType precision, deviceType device=DEVICE_GPU) | tensorNet | static | | DetectNativePrecisions(deviceType device=DEVICE_GPU) | tensorNet | static | | EnableDebug() | tensorNet | | | EnableLayerProfiler() | tensorNet | | | FindFastestPrecision(deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static | | GenerateColor(uint32_t classID, float alpha=255.0f) | tensorNet | static | | GetDepthField() const | depthNet | inline | | GetDepthFieldHeight() const | depthNet | inline | | GetDepthFieldWidth() const | depthNet | inline | | GetDevice() const | tensorNet | inline | | GetInputDims(uint32_t layer=0) const | tensorNet | inline | | GetInputHeight(uint32_t layer=0) const | tensorNet | inline | | GetInputLayers() const | tensorNet | inline | | GetInputPtr(uint32_t layer=0) const | tensorNet | inline | | GetInputSize(uint32_t layer=0) const | tensorNet | inline | | GetInputWidth(uint32_t layer=0) const | tensorNet | inline | | GetModelFilename() const | tensorNet | inline | | GetModelPath() const | tensorNet | inline | | GetModelType() const | tensorNet | inline | | GetNetworkFPS() | tensorNet | inline | | GetNetworkName() const | tensorNet | inline | | GetNetworkTime() | tensorNet | inline | | GetOutputDims(uint32_t layer=0) const | tensorNet | inline | | GetOutputHeight(uint32_t layer=0) const | tensorNet | inline | | GetOutputLayers() const | tensorNet | inline | | GetOutputPtr(uint32_t layer=0) const | tensorNet | inline | | GetOutputSize(uint32_t layer=0) const | tensorNet | inline | | GetOutputWidth(uint32_t layer=0) const | tensorNet | inline | | GetPrecision() const | tensorNet | inline | | GetProfilerTime(profilerQuery query) | tensorNet | inline | | GetProfilerTime(profilerQuery query, profilerDevice device) | tensorNet | inline | | GetPrototxtPath() const | tensorNet | inline | | GetStream() const | tensorNet | inline | | gLogger | tensorNet | protected | | gProfiler | tensorNet | protected | | histogramEqualization() | depthNet | protected | | histogramEqualizationCUDA() | depthNet | protected | | IsModelType(modelType type) const | tensorNet | inline | | IsPrecision(precisionType type) const | tensorNet | inline | | LoadClassColors(const char *filename, float4 *colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static | | LoadClassColors(const char *filename, float4 **colors, int expectedClasses, float defaultAlpha=255.0f) | tensorNet | static | | LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, int expectedClasses=-1) | tensorNet | static | | LoadClassLabels(const char *filename, std::vector< std::string > &descriptions, std::vector< std::string > &synsets, int expectedClasses=-1) | tensorNet | static | | LoadEngine(const char *engine_filename, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | | | LoadEngine(char *engine_stream, size_t engine_size, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, nvinfer1::IPluginFactory *pluginFactory=NULL, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | | | LoadEngine(nvinfer1::ICudaEngine *engine, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, deviceType device=DEVICE_GPU, cudaStream_t stream=NULL) | tensorNet | | | LoadEngine(const char *filename, char **stream, size_t *size) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean=NULL, const char *input_blob="data", const char *output_blob="prob", uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const char *input_blob, const Dims3 &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | LoadNetwork(const char *prototxt, const char *model, const char *mean, const std::vector< std::string > &input_blobs, const std::vector< Dims3 > &input_dims, const std::vector< std::string > &output_blobs, uint32_t maxBatchSize=DEFAULT_MAX_BATCH_SIZE, precisionType precision=TYPE_FASTEST, deviceType device=DEVICE_GPU, bool allowGPUFallback=true, nvinfer1::IInt8Calibrator *calibrator=NULL, cudaStream_t stream=NULL) | tensorNet | | | mAllowGPUFallback | tensorNet | protected | | mBindings | tensorNet | protected | | mCacheCalibrationPath | tensorNet | protected | | mCacheEnginePath | tensorNet | protected | | mChecksumPath | tensorNet | protected | | mContext | tensorNet | protected | | mDepthEqualized | depthNet | protected | | mDepthRange | depthNet | protected | | mDevice | tensorNet | protected | | mEnableDebug | tensorNet | protected | | mEnableProfiler | tensorNet | protected | | mEngine | tensorNet | protected | | mEventsCPU | tensorNet | protected | | mEventsGPU | tensorNet | protected | | mHistogram | depthNet | protected | | mHistogramCDF | depthNet | protected | | mHistogramEDU | depthNet | protected | | mHistogramPDF | depthNet | protected | | mInfer | tensorNet | protected | | mInputs | tensorNet | protected | | mMaxBatchSize | tensorNet | protected | | mMeanPath | tensorNet | protected | | mModelFile | tensorNet | protected | | mModelPath | tensorNet | protected | | mModelType | tensorNet | protected | | mOutputs | tensorNet | protected | | mPrecision | tensorNet | protected | | mProfilerQueriesDone | tensorNet | protected | | mProfilerQueriesUsed | tensorNet | protected | | mProfilerTimes | tensorNet | protected | | mPrototxtPath | tensorNet | protected | | mStream | tensorNet | protected | | mWorkspaceSize | tensorNet | protected | | PrintProfilerTimes() | tensorNet | inline | | Process(T *image, uint32_t width, uint32_t height) | depthNet | inline | | Process(void *input, uint32_t width, uint32_t height, imageFormat format) | depthNet | | | Process(T1 *input, T2 *output, uint32_t width, uint32_t height, cudaColormapType colormap=COLORMAP_VIRIDIS_INVERTED, cudaFilterMode filter=FILTER_LINEAR) | depthNet | inline | | Process(void *input, imageFormat input_format, void *output, imageFormat output_format, uint32_t width, uint32_t height, cudaColormapType colormap=COLORMAP_VIRIDIS_INVERTED, cudaFilterMode filter=FILTER_LINEAR) | depthNet | | | Process(T1 *input, uint32_t input_width, uint32_t input_height, T2 *output, uint32_t output_width, uint32_t output_height, cudaColormapType colormap=COLORMAP_DEFAULT, cudaFilterMode filter=FILTER_LINEAR) | depthNet | inline | | Process(void *input, uint32_t input_width, uint32_t input_height, imageFormat input_format, void *output, uint32_t output_width, uint32_t output_height, imageFormat output_format, cudaColormapType colormap=COLORMAP_DEFAULT, cudaFilterMode filter=FILTER_LINEAR) | depthNet | | | ProcessNetwork(bool sync=true) | tensorNet | protected | | ProfileModel(const std::string &deployFile, const std::string &modelFile, const std::vector< std::string > &inputs, const std::vector< Dims3 > &inputDims, const std::vector< std::string > &outputs, uint32_t maxBatchSize, precisionType precision, deviceType device, bool allowGPUFallback, nvinfer1::IInt8Calibrator *calibrator, char **engineStream, size_t *engineSize) | tensorNet | protected | | PROFILER_BEGIN(profilerQuery query) | tensorNet | inlineprotected | | PROFILER_END(profilerQuery query) | tensorNet | inlineprotected | | PROFILER_QUERY(profilerQuery query) | tensorNet | inlineprotected | | SavePointCloud(const char *filename) | depthNet | | | SavePointCloud(const char *filename, float *rgba, uint32_t width, uint32_t height) | depthNet | | | SavePointCloud(const char *filename, float *rgba, uint32_t width, uint32_t height, const float2 &focalLength, const float2 &principalPoint) | depthNet | | | SavePointCloud(const char *filename, float *rgba, uint32_t width, uint32_t height, const float intrinsicCalibration[3][3]) | depthNet | | | SavePointCloud(const char *filename, float *rgba, uint32_t width, uint32_t height, const char *intrinsicCalibrationPath) | depthNet | | | SelectPrecision(precisionType precision, deviceType device=DEVICE_GPU, bool allowInt8=true) | tensorNet | static | | SetStream(cudaStream_t stream) | tensorNet | | | tensorNet() | tensorNet | protected | | Usage() | depthNet | inlinestatic | | ValidateEngine(const char *model_path, const char *cache_path, const char *checksum_path) | tensorNet | protected | | VisualizationFlags enum name | depthNet | | | VisualizationFlagsFromStr(const char *str, uint32_t default_value=VISUALIZE_INPUT|VISUALIZE_DEPTH) | depthNet | static | | Visualize(T *output, uint32_t width, uint32_t height, cudaColormapType colormap=COLORMAP_DEFAULT, cudaFilterMode filter=FILTER_LINEAR) | depthNet | inline | | Visualize(void *output, uint32_t width, uint32_t height, imageFormat format, cudaColormapType colormap=COLORMAP_DEFAULT, cudaFilterMode filter=FILTER_LINEAR) | depthNet | | | VISUALIZE_DEPTH enum value | depthNet | | | VISUALIZE_INPUT enum value | depthNet | | | ~depthNet() | depthNet | virtual | | ~tensorNet() | tensorNet | virtual |

  • Generated on Fri Mar 17 2023 14:29:30 for Jetson Inference by 1.8.17