Open5

FullSubNet-plusのエクスポート試行 Failed to export an ONNX attribute 'onnx::Gather', since it's not constant

PINTOPINTO
python3 -m speech_enhance.tools.inference \
-C config/inference.toml \
-M weights/best_model.tar \
-I blind_testset_bothtracks/enrollment_speech \
-O output
Traceback (most recent call last):
  File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
    exec(code, run_globals)
  File "/home/xxxx/git/FullSubNet-plus/speech_enhance/tools/inference.py", line 78, in <module>
    main(configuration, checkpoint_path, output_dir)
  File "/home/xxxx/git/FullSubNet-plus/speech_enhance/tools/inference.py", line 46, in main
    torch.onnx.export(
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/__init__.py", line 316, in export
    return utils.export(model, args, f, export_params, verbose, training,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 107, in export
    _export(model, args, f, export_params, verbose, training, input_names, output_names,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 724, in _export
    _model_to_graph(model, args, verbose, input_names,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 497, in _model_to_graph
    graph = _optimize_graph(graph, operator_export_type,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 216, in _optimize_graph
    graph = torch._C._jit_pass_onnx(graph, operator_export_type)
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/__init__.py", line 373, in _run_symbolic_function
    return utils._run_symbolic_function(*args, **kwargs)
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 1032, in _run_symbolic_function
    return symbolic_fn(g, *inputs, **attrs)
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/symbolic_helper.py", line 166, in wrapper
    args = [_parse_arg(arg, arg_desc, arg_name, fn_name)  # type: ignore[assignment]
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/symbolic_helper.py", line 166, in <listcomp>
    args = [_parse_arg(arg, arg_desc, arg_name, fn_name)  # type: ignore[assignment]
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/symbolic_helper.py", line 83, in _parse_arg
    raise RuntimeError("Failed to export an ONNX attribute '" + v.node().kind() +
RuntimeError: Failed to export an ONNX attribute 'onnx::Gather', since it's not constant, please try to make things (e.g., kernel size) static if possible
PINTOPINTO
/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/symbolic_helper.py
 79    elif value.node().kind() == "prim::ListConstruct":
 80         if desc == "is":
 81             for v in value.node().inputs():
 82                 print(v.node())
 83                 if v.node().kind() != "onnx::Constant":
 84                     raise RuntimeError("Failed to export an ONNX attribute '" + v.node().kind() +
 85                                        "', since it's not constant, please try to make "
 86                                        "things (e.g., kernel size) static if possible")
 87             return [int(v.node()["value"]) for v in value.node().inputs()]
 88         else:
 89             raise RuntimeError("ONNX symbolic doesn't know to interpret ListConstruct node")
PINTOPINTO
%2534 : Long(requires_grad=0, device=cpu) = onnx::Constant[value={31}]()

%2503 : Long(device=cpu) = onnx::Gather[axis=0](%2500, %2502) # /home/xxxx/git/FullSubNet-plus/speech_enhance/audio_zen/model/base_model.py:28:0

Traceback (most recent call last):
  File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
    return _run_code(code, main_globals, None,
  File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
    exec(code, run_globals)
  File "/home/xxxx/git/FullSubNet-plus/speech_enhance/tools/inference.py", line 78, in <module>
    main(configuration, checkpoint_path, output_dir)
  File "/home/xxxx/git/FullSubNet-plus/speech_enhance/tools/inference.py", line 46, in main
    torch.onnx.export(
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/__init__.py", line 316, in export
    return utils.export(model, args, f, export_params, verbose, training,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 107, in export
    _export(model, args, f, export_params, verbose, training, input_names, output_names,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 724, in _export
    _model_to_graph(model, args, verbose, input_names,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 497, in _model_to_graph
    graph = _optimize_graph(graph, operator_export_type,
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 216, in _optimize_graph
    graph = torch._C._jit_pass_onnx(graph, operator_export_type)
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/__init__.py", line 373, in _run_symbolic_function
    return utils._run_symbolic_function(*args, **kwargs)
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/utils.py", line 1032, in _run_symbolic_function
    return symbolic_fn(g, *inputs, **attrs)
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/symbolic_helper.py", line 167, in wrapper
    args = [_parse_arg(arg, arg_desc, arg_name, fn_name)  # type: ignore[assignment]
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/symbolic_helper.py", line 167, in <listcomp>
    args = [_parse_arg(arg, arg_desc, arg_name, fn_name)  # type: ignore[assignment]
  File "/home/xxxx/.local/lib/python3.8/site-packages/torch/onnx/symbolic_helper.py", line 84, in _parse_arg
    raise RuntimeError("Failed to export an ONNX attribute '" + v.node().kind() +
RuntimeError: Failed to export an ONNX attribute 'onnx::Gather', since it's not constant, please try to make things (e.g., kernel size) static if possible
PINTOPINTO
speech_enhance/audio_zen/model/base_model.py
    @staticmethod
    def unfold(input, num_neighbor):
        """
        Along with the frequency dim, split overlapped sub band units from spectrogram.

        Args:
            input: [B, C, F, T]
            num_neighbor:

        Returns:
            [B, N, C, F_s, T], F 为子频带的频率轴大小, e.g. [2, 161, 1, 19, 200]
        """
        assert input.dim() == 4, f"The dim of input is {input.dim()}. It should be four dim."
        # batch_size, num_channels, num_freqs, num_frames = input.size()
        x_shape = [int(s) for s in input.size()]
        batch_size, num_channels, num_freqs, num_frames = x_shape[0], x_shape[1], x_shape[2], x_shape[3]