ORT_CPP add CUDA FP16 inference (#4320)

Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
DennisJ 2023-08-12 23:27:52 +08:00 committed by GitHub
parent 02d4f5200d
commit 1c753cbce6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 57 additions and 24 deletions

View file

@ -82,13 +82,15 @@ int read_coco_yaml(DCSP_CORE*& p)
int main()
{
DCSP_CORE* p1 = new DCSP_CORE;
DCSP_CORE* yoloDetector = new DCSP_CORE;
std::string model_path = "yolov8n.onnx";
read_coco_yaml(p1);
// GPU inference
read_coco_yaml(yoloDetector);
// GPU FP32 inference
DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {640, 640}, 0.1, 0.5, true };
// GPU FP16 inference
// DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8_HALF, {640, 640}, 0.1, 0.5, true };
// CPU inference
// DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {640, 640}, 0.1, 0.5, false };
p1->CreateSession(params);
file_iterator(p1);
yoloDetector->CreateSession(params);
file_iterator(yoloDetector);
}