import onnxruntime as ort
# define the priority order for the execution providers
# prefer CUDA Execution Provider over CPU Execution Provider
available_providers = ort.get_available_providers()
print(available_providers)
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
# initialize the model.onnx
sess = ort.InferenceSession(r"E:\official-model\yolov8\yolov8n.onnx", providers=providers)
# get the outputs metadata as a list of :class:`onnxruntime.NodeArg`
outputs= sess.get_outputs()
output_name = outputs[0].name
print(output_name)
output_shape = outputs[0].shape
print(output_shape)
# get the inputs metadata as a list of :class:`onnxruntime.NodeArg`
inputs = sess.get_inputs()
input_name = inputs[0].name
input_shape = inputs[0].shape
print(input_name)
print(input_shape)
结果:
['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'] output0 [1, 84, 8400] images [1, 3, 640, 640]