Closed4
MODNet
$ git clone https://github.com/ZHKKKe/MODNet && cd MODNet
onnx/export_onnx.py
# prepare dummy_input
batch_size = 1
height = 512
width = 512
dummy_input = Variable(torch.randn(batch_size, 3, height, width)).cuda()
# export to onnx model
torch.onnx.export(
modnet.module, dummy_input, args.output_path, export_params = True, opset_version=11,
input_names = ['input'], output_names = ['output'])#,
#dynamic_axes = {'input': {0:'batch_size', 2:'height', 3:'width'}, 'output': {0: 'batch_size', 2: 'height', 3: 'width'}})
# prepare dummy_input
batch_size = 1
height = 256
width = 256
dummy_input = Variable(torch.randn(batch_size, 3, height, width)).cuda()
# export to onnx model
torch.onnx.export(
modnet.module, dummy_input, args.output_path, export_params = True, opset_version=11,
input_names = ['input'], output_names = ['output'])#,
#dynamic_axes = {'input': {0:'batch_size', 2:'height', 3:'width'}, 'output': {0: 'batch_size', 2: 'height', 3: 'width'}})
# prepare dummy_input
batch_size = 1
height = 192
width = 192
dummy_input = Variable(torch.randn(batch_size, 3, height, width)).cuda()
# export to onnx model
torch.onnx.export(
modnet.module, dummy_input, args.output_path, export_params = True, opset_version=11,
input_names = ['input'], output_names = ['output'])#,
#dynamic_axes = {'input': {0:'batch_size', 2:'height', 3:'width'}, 'output': {0: 'batch_size', 2: 'height', 3: 'width'}})
$ python3 -m onnx.export_onnx \
--ckpt-path=pretrained/modnet_photographic_portrait_matting.ckpt \
--output-path=pretrained/modnet_photographic_portrait_matting.onnx
$ python3 -m onnx.export_onnx \
--ckpt-path=pretrained/modnet_webcam_portrait_matting.ckpt \
--output-path=pretrained/modnet_webcam_portrait_matting.onnx
$ cd ..
$ python3 -m onnxsim \
MODNet/pretrained/modnet_photographic_portrait_matting_192x192_float32.onnx \
MODNet/pretrained/modnet_photographic_portrait_matting_192x192_float32_opt.onnx
$ python3 -m onnxsim \
MODNet/pretrained/modnet_photographic_portrait_matting_256x256_float32.onnx \
MODNet/pretrained/modnet_photographic_portrait_matting_256x256_float32_opt.onnx
$ python3 -m onnxsim \
MODNet/pretrained/modnet_photographic_portrait_matting_512x512_float32.onnx \
MODNet/pretrained/modnet_photographic_portrait_matting_512x512_float32_opt.onnx
$ python3 -m onnxsim \
MODNet/pretrained/modnet_webcam_portrait_matting_192x192_float32.onnx \
MODNet/pretrained/modnet_webcam_portrait_matting_192x192_float32_opt.onnx
$ python3 -m onnxsim \
MODNet/pretrained/modnet_webcam_portrait_matting_256x256_float32.onnx \
MODNet/pretrained/modnet_webcam_portrait_matting_256x256_float32_opt.onnx
$ python3 -m onnxsim \
MODNet/pretrained/modnet_webcam_portrait_matting_512x512_float32.onnx \
MODNet/pretrained/modnet_webcam_portrait_matting_512x512_float32_opt.onnx
$ python3 -m onnxsim \
MODNet/pretrained/modnet_photographic_portrait_matting_128x128_float32.onnx \
MODNet/pretrained/modnet_photographic_portrait_matting_128x128_float32_opt.onnx
$ python3 -m onnxsim \
MODNet/pretrained/modnet_webcam_portrait_matting_128x128_float32.onnx \
MODNet/pretrained/modnet_webcam_portrait_matting_128x128_float32_opt.onnx
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_256x256_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_256x256/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_256x256_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_256x256/FP16 \
--data_type FP16
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_512x512_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_512x512/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_512x512_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_512x512/FP16 \
--data_type FP16
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_256x256_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_256x256/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_256x256_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_256x256/FP16 \
--data_type FP16
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_512x512_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_512x512/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_512x512_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_512x512/FP16 \
--data_type FP16
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_192x192_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_192x192/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_192x192_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_192x192/FP16 \
--data_type FP16
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_192x192_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_192x192/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_192x192_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_192x192/FP16 \
--data_type FP16
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_128x128_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_128x128/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_photographic_portrait_matting_128x128_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_photographic_portrait_matting_128x128/FP16 \
--data_type FP16
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_128x128_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_128x128/FP32 \
--data_type FP32
python3 ${INTEL_OPENVINO_DIR}/deployment_tools/model_optimizer/mo.py \
--input_model MODNet/pretrained/modnet_webcam_portrait_matting_128x128_float32_opt.onnx \
--output_dir MODNet/pretrained/openvino/modnet_webcam_portrait_matting_128x128/FP16 \
--data_type FP16
cd MODNet
docker run --gpus all -it --rm \
-v `pwd`:/workspace/resources \
-e LOCAL_UID=$(id -u $USER) \
-e LOCAL_GID=$(id -g $USER) \
pinto0309/openvino2tensorflow:latest bash
source /opt/intel/openvino_2021/bin/setupvars.sh
cd resources/pretrained
openvino2tensorflow \
--model_path openvino/modnet_photographic_portrait_matting_256x256/FP32/modnet_photographic_portrait_matting_256x256_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
openvino2tensorflow \
--model_path openvino/modnet_photographic_portrait_matting_512x512/FP32/modnet_photographic_portrait_matting_512x512_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
openvino2tensorflow \
--model_path openvino/modnet_webcam_portrait_matting_256x256/FP32/modnet_webcam_portrait_matting_256x256_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
openvino2tensorflow \
--model_path openvino/modnet_webcam_portrait_matting_512x512/FP32/modnet_webcam_portrait_matting_512x512_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
openvino2tensorflow \
--model_path openvino/modnet_photographic_portrait_matting_192x192/FP32/modnet_photographic_portrait_matting_192x192_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
openvino2tensorflow \
--model_path openvino/modnet_webcam_portrait_matting_192x192/FP32/modnet_webcam_portrait_matting_192x192_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
openvino2tensorflow \
--model_path openvino/modnet_photographic_portrait_matting_128x128/FP32/modnet_photographic_portrait_matting_128x128_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
openvino2tensorflow \
--model_path openvino/modnet_webcam_portrait_matting_128x128/FP32/modnet_webcam_portrait_matting_128x128_float32_opt.xml \
--output_saved_model True \
--output_pb True \
--output_no_quant_float32_tflite True \
--output_weight_quant_tflite True \
--output_float16_quant_tflite True \
--output_integer_quant_tflite True \
--string_formulas_for_normalization 'data / 255' \
--output_tfjs True \
--output_tftrt True \
--output_coreml True \
--output_integer_quant_type 'uint8'
このスクラップは2021/04/03にクローズされました