pytorch ValueError:不支持的ONNX opset版本:13

f87krz0w  于 2023-04-12  发布在  其他
关注(0)|答案(2)|浏览(772)

目标:在Jupyter Labs上成功运行Notebook**。

第2.1节抛出ValueError,我相信是因为我使用的PyTorch版本。

  • PyTorch 1.7.1
  • 内核conda_pytorch_latest_p36

非常相似SO post;解决方案是使用最新的PyTorch版本......我正在使用。
验证码:

import onnxruntime

def export_onnx_model(args, model, tokenizer, onnx_model_path):
    with torch.no_grad():
        inputs = {'input_ids':      torch.ones(1,128, dtype=torch.int64),
                    'attention_mask': torch.ones(1,128, dtype=torch.int64),
                    'token_type_ids': torch.ones(1,128, dtype=torch.int64)}
        outputs = model(**inputs)

        symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}
        torch.onnx.export(model,                                            # model being run
                    (inputs['input_ids'],                             # model input (or a tuple for multiple inputs)
                    inputs['attention_mask'], 
                    inputs['token_type_ids']),                                         # model input (or a tuple for multiple inputs)
                    onnx_model_path,                                # where to save the model (can be a file or file-like object)
                    opset_version=13,                                 # the ONNX version to export the model to
                    do_constant_folding=True,     
                    input_names=['input_ids',                         # the model's input names
                                'input_mask', 
                                'segment_ids'],
                    output_names=['output'],                    # the model's output names
                    dynamic_axes={'input_ids': symbolic_names,        # variable length axes
                                'input_mask' : symbolic_names,
                                'segment_ids' : symbolic_names})
        logger.info("ONNX Model exported to {0}".format(onnx_model_path))

export_onnx_model(configs, model, tokenizer, "bert.onnx")

回溯:

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-7-7aaa4c5455a0> in <module>
     25         logger.info("ONNX Model exported to {0}".format(onnx_model_path))
     26 
---> 27 export_onnx_model(configs, model, tokenizer, "bert.onnx")

<ipython-input-7-7aaa4c5455a0> in export_onnx_model(args, model, tokenizer, onnx_model_path)
     22                     dynamic_axes={'input_ids': symbolic_names,        # variable length axes
     23                                 'input_mask' : symbolic_names,
---> 24                                 'segment_ids' : symbolic_names})
     25         logger.info("ONNX Model exported to {0}".format(onnx_model_path))
     26 

~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/torch/onnx/__init__.py in export(model, args, f, export_params, verbose, training, input_names, output_names, aten, export_raw_ir, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs, custom_opsets, enable_onnx_checker, use_external_data_format)
    228                         do_constant_folding, example_outputs,
    229                         strip_doc_string, dynamic_axes, keep_initializers_as_inputs,
--> 230                         custom_opsets, enable_onnx_checker, use_external_data_format)
    231 
    232 

~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, aten, export_raw_ir, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs, custom_opsets, enable_onnx_checker, use_external_data_format)
     89             dynamic_axes=dynamic_axes, keep_initializers_as_inputs=keep_initializers_as_inputs,
     90             custom_opsets=custom_opsets, enable_onnx_checker=enable_onnx_checker,
---> 91             use_external_data_format=use_external_data_format)
     92 
     93 

~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, example_outputs, opset_version, _retain_param_name, do_constant_folding, strip_doc_string, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size, custom_opsets, add_node_names, enable_onnx_checker, use_external_data_format, onnx_shape_inference, use_new_jit_passes)
    614         # training=TrainingMode.TRAINING or training=TrainingMode.PRESERVE,
    615         # (to preserve whatever the original training mode was.)
--> 616         _set_opset_version(opset_version)
    617         _set_operator_export_type(operator_export_type)
    618         with select_model_mode_for_export(model, training):

~/anaconda3/envs/pytorch_latest_p36/lib/python3.6/site-packages/torch/onnx/symbolic_helper.py in _set_opset_version(opset_version)
    506         _export_onnx_opset_version = opset_version
    507         return
--> 508     raise ValueError("Unsupported ONNX opset version: " + str(opset_version))
    509 
    510 _operator_export_type = None

ValueError: Unsupported ONNX opset version: 13

请让我知道,如果有什么我可以添加到后。

mutmk8jj

mutmk8jj1#

ValueError: Unsupported ONNX opset version N-〉安装最新的PyTorch
Git Issue归功于天雷屋
根据Notebook的第1个单元格:

# Install or upgrade PyTorch 1.8.0 and OnnxRuntime 1.7.0 for CPU-only.

我插入了一个新的单元格后:

pip install torch==1.10.0  # latest
7gs2gvoe

7gs2gvoe2#

遇到类似的错误:“'aten::unflatten' to ONNX opset version 13 not supported”,我尝试了一些指定的版本,如下所示:
(venv)$pip3 install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117
希望这种方法对您的情况有效。

相关问题