Open2
onnxのバッチサイズをUNK(None)に書き換えたうえで別のバッチサイズを指定してモデルを再生成するワークアラウンドの一例
- Download the sample model
docker run --gpus all -it --rm \
-v `pwd`:/home/user/workdir \
ghcr.io/pinto0309/openvino2tensorflow:latest
# https://github.com/GeekAlexis/FastMOT
gdown --id 1-kXZpA6y8pNbDMMD7N--IWIjwqqnAIGZ -O yolov4_crowdhuman.onnx
- Optimization
python3 -m onnxsim yolov4_crowdhuman.onnx yolov4_crowdhuman.onnx
- Create a script to initialize the batch size
batchsize_clear.py
import onnx
import os
import struct
from argparse import ArgumentParser
def rebatch(infile, outfile, batch_size):
model = onnx.load(infile)
graph = model.graph
# Change batch size in input, output and value_info
for tensor in list(graph.input) + list(graph.value_info) + list(graph.output):
tensor.type.tensor_type.shape.dim[0].dim_param = batch_size
# Set dynamic batch size in reshapes (-1)
for node in graph.node:
if node.op_type != 'Reshape':
continue
for init in graph.initializer:
# node.input[1] is expected to be a reshape
if init.name != node.input[1]:
continue
# Shape is stored as a list of ints
if len(init.int64_data) > 0:
# This overwrites bias nodes' reshape shape but should be fine
init.int64_data[0] = -1
# Shape is stored as bytes
elif len(init.raw_data) > 0:
shape = bytearray(init.raw_data)
struct.pack_into('q', shape, 0, -1)
init.raw_data = bytes(shape)
onnx.save(model, outfile)
if __name__ == '__main__':
parser = ArgumentParser('Replace batch size with \'N\'')
parser.add_argument('infile')
parser.add_argument('outfile')
args = parser.parse_args()
rebatch(args.infile, args.outfile, 'N')
- Run a script to initialize the batch size
python3 batchsize_clear.py yolov4_crowdhuman.onnx yolov4_crowdhuman.onnx
- Fix batch size at any size
python3 -m onnxsim --input-shape="1,3,512,512" yolov4_crowdhuman.onnx yolov4_crowdhuman.onnx
- TensorRT transformation, From Float32 to Float16
onnx2trt yolov4_crowdhuman.onnx -o yolov4_crowdhuman.trt -b 1 -d 16 -v