Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

the input format and output format not match onnx model #22797

Open
Chriost opened this issue Nov 11, 2024 · 0 comments
Open

the input format and output format not match onnx model #22797

Chriost opened this issue Nov 11, 2024 · 0 comments
Labels
ep:CUDA issues related to the CUDA execution provider

Comments

@Chriost
Copy link

Chriost commented Nov 11, 2024

Describe the issue

auto inputShapeInfo = session_.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
int ch = inputShapeInfo[1];
input_h = inputShapeInfo[2];
input_w = inputShapeInfo[3];
std::cout << "input format: " << ch << "x" << input_h << "x" << input_w << std::endl;

Image

input_nodes_num:1
output_nodes_num:2
input format: 1x-1x-1
output format: -1x-1
output format: -1x256

To reproduce

`#include "onnxruntime_cxx_api.h"
#include "cpu_provider_factory.h"
#include <opencv2/opencv.hpp>
#include

int main() {

// 测试图片
cv::Mat image = cv::imread("-00000001.jpg");
//cv::imshow("输入图", image);
cv::resize(image, image, cv::Size(image.cols / 4, image.rows / 4));
// 初始化ONNXRuntime环境
Ort::Env env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "superpoint_v1");

// 设置会话选项
Ort::SessionOptions session_options;
// 优化器级别:基本的图优化级别
session_options.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
// 线程数:4
session_options.SetIntraOpNumThreads(1);
// 设备使用优先使用GPU而是才是CPU
std::cout << "onnxruntime inference try to use GPU Device" << std::endl;
//OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0);
OrtSessionOptionsAppendExecutionProvider_CPU(session_options, 0);
// onnx训练模型文件
std::string onnxpath = "LoadOnnx\\superpoint_v1.onnx";
std::wstring modelPath = std::wstring(onnxpath.begin(), onnxpath.end());

// 加载模型并创建会话
Ort::Session session_(env, modelPath.c_str(), session_options);
// 获取模型输入输出信息
int input_nodes_num = session_.GetInputCount();			// 输入节点输
std::cout << "input_nodes_num:" << input_nodes_num << std::endl;
int output_nodes_num = session_.GetOutputCount();		// 输出节点数
std::cout << "output_nodes_num:" << output_nodes_num << std::endl;
std::vector<std::string> input_node_names;				// 输入节点名称
std::vector<std::string> output_node_names;				// 输出节点名称
Ort::AllocatorWithDefaultOptions allocator;
// 输入图像尺寸
int input_h = 0;
int input_w = 0;

// 获取模型输入信息
for (int i = 0; i < input_nodes_num; i++) {
	// 获得输入节点的名称并存储
	auto input_name = session_.GetInputNameAllocated(i, allocator);
	input_node_names.push_back(input_name.get());
	// 显示输入图像的形状
	auto inputShapeInfo = session_.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
	int ch = inputShapeInfo[1];
	input_h = inputShapeInfo[2];
	input_w = inputShapeInfo[3];
	std::cout << "input format: " << ch << "x" << input_h << "x" << input_w << std::endl;
}

// 获取模型输出信息
int num = 0;
int nc = 0;
for (int i = 0; i < output_nodes_num; i++) {
	// 获得输出节点的名称并存储
	auto output_name = session_.GetOutputNameAllocated(i, allocator);
	output_node_names.push_back(output_name.get());
	// 显示输出结果的形状
	auto outShapeInfo = session_.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
	num = outShapeInfo[0];
	nc = outShapeInfo[1];
	std::cout << "output format: " << num << "x" << nc << std::endl;
}

// 预处理输入数据
cv::Mat rgb, blob;
// 默认是BGR需要转化成RGB
cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
// 对图像尺寸进行缩放
cv::resize(rgb, blob, cv::Size(input_w, input_h));
blob.convertTo(blob, CV_32F);
// 对图像进行标准化处理
blob = blob / 255.0;	// 归一化
cv::subtract(blob, cv::Scalar(0.485, 0.456, 0.406), blob);	// 减去均值
cv::divide(blob, cv::Scalar(0.229, 0.224, 0.225), blob);	//除以方差
// CHW-->NCHW 维度扩展
cv::Mat timg = cv::dnn::blobFromImage(blob);
std::cout << timg.size[0] << "x" << timg.size[1] << "x" << timg.size[2] << "x" << timg.size[3] << std::endl;
// 占用内存大小,后续计算是总像素*数据类型大小
size_t tpixels = input_h * input_w * 3;
std::array<int64_t, 4> input_shape_info{ 1, 3, input_h, input_w };

// 准备数据输入
auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
Ort::Value input_tensor_ = Ort::Value::CreateTensor<float>(allocator_info, timg.ptr<float>(), tpixels, input_shape_info.data(), input_shape_info.size());

// 模型输入输出所需数据(名称及其数量),模型只认这种类型的数组
const std::array<const char*, 1> inputNames = { input_node_names[0].c_str() };
const std::array<const char*, 1> outNames = { output_node_names[0].c_str() };

// 模型推理
std::vector<Ort::Value> ort_outputs;
try {
	ort_outputs = session_.Run(Ort::RunOptions{ nullptr }, inputNames.data(), &input_tensor_, 1, outNames.data(), outNames.size());
}
catch (std::exception e) {
	std::cout << e.what() << std::endl;
}
// 1x5 获取输出数据并包装成一个cv::Mat对象,为了方便后处理
const float* pdata = ort_outputs[0].GetTensorMutableData<float>();
cv::Mat prob(num, nc, CV_32F, (float*)pdata);

// 后处理推理结果
cv::Point maxL, minL;		// 用于存储图像分类中的得分最小值索引和最大值索引(坐标)
double maxv, minv;			// 用于存储图像分类中的得分最小值和最大值
cv::minMaxLoc(prob, &minv, &maxv, &minL, &maxL);

int max_index = maxL.x;		// 获得最大值的索引,只有一行所以列坐标既为索引
std::cout << "label id: " << max_index << std::endl;
// 在测试图像上加上预测的分类标签
//cv::putText(image, labels[max_index], cv::Point(50, 50), cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(0, 0, 255), 2, 8);
cv::imshow("输入图像", image);
cv::waitKey(0);

// 释放资源
session_options.release();
session_.release();
return 0;

}
`

Urgency

No response

Platform

Windows

OS Version

10

ONNX Runtime Installation

Released Package

ONNX Runtime Version or Commit ID

onnxruntime-win-x64-gpu-1.16.2

ONNX Runtime API

C++

Architecture

X64

Execution Provider

CUDA

Execution Provider Library Version

CUDA11.2

@fajin-corp fajin-corp added the ep:CUDA issues related to the CUDA execution provider label Nov 12, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
ep:CUDA issues related to the CUDA execution provider
Projects
None yet
Development

No branches or pull requests

2 participants