Browse Source

Fix lint.

pull/237/head
Eric Mintun 2 years ago
parent
commit
02acff7859
  1. 2
      scripts/export_onnx_model.py

2
scripts/export_onnx_model.py

@ -161,7 +161,7 @@ def run_export( @@ -161,7 +161,7 @@ def run_export(
if onnxruntime_exists:
ort_inputs = {k: to_numpy(v) for k, v in dummy_inputs.items()}
# set cpu provider default
providers = ['CPUExecutionProvider']
providers = ["CPUExecutionProvider"]
ort_session = onnxruntime.InferenceSession(output, providers=providers)
_ = ort_session.run(None, ort_inputs)
print("Model has successfully been run with ONNXRuntime.")

Loading…
Cancel
Save