Open14
ConvNeXt のONNXエクスポートと Keras 変換
git clone https://github.com/facebookresearch/ConvNeXt.git && cd ConvNeXt
docker run --gpus all -it --rm \
-v `pwd`:/home/user/workdir \
ghcr.io/pinto0309/openvino2tensorflow:latest
pip install timm tensorboardX
- Edit
main.py
for ONNX export
main.py
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
# if args.disable_eval:
# args.dist_eval = False
# dataset_val = None
# else:
# dataset_val, _ = build_dataset(is_train=False, args=args)
# num_tasks = utils.get_world_size()
# global_rank = utils.get_rank()
# sampler_train = torch.utils.data.DistributedSampler(
# dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
# )
# print("Sampler_train = %s" % str(sampler_train))
# if args.dist_eval:
# if len(dataset_val) % num_tasks != 0:
# print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
# 'This will slightly alter validation results as extra duplicate entries are added to achieve '
# 'equal num of samples per-process.')
# sampler_val = torch.utils.data.DistributedSampler(
# dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
# else:
# sampler_val = torch.utils.data.SequentialSampler(dataset_val)
# if global_rank == 0 and args.log_dir is not None:
# os.makedirs(args.log_dir, exist_ok=True)
# log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
# else:
# log_writer = None
# data_loader_train = torch.utils.data.DataLoader(
# dataset_train, sampler=sampler_train,
# batch_size=args.batch_size,
# num_workers=args.num_workers,
# pin_memory=args.pin_mem,
# drop_last=True,
# )
# if dataset_val is not None:
# data_loader_val = torch.utils.data.DataLoader(
# dataset_val, sampler=sampler_val,
# batch_size=int(1.5 * args.batch_size),
# num_workers=args.num_workers,
# pin_memory=args.pin_mem,
# drop_last=False
# )
# else:
# data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
layer_scale_init_value=args.layer_scale_init_value,
head_init_scale=args.head_init_scale,
)
onnx_file = f"{args.model}_{args.input_size}x{args.input_size}.onnx"
x = torch.randn(1, 3, args.input_size, args.input_size).cpu()
torch.onnx.export(
model,
args=(x),
f=onnx_file,
opset_version=11
)
import onnx
from onnxsim import simplify
model = onnx.load(onnx_file)
model_simp, check = simplify(model)
onnx.save(model_simp, onnx_file)
import sys
sys.exit(0)
- ONNX export
python3 main.py \
--model convnext_base \
--eval true \
--resume https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth \
--input_size 224 \
--drop_path 0.2 \
--data_path /path/to/imagenet-1k
exit
- ONNX to OpenVINO
docker run --rm -it -v `pwd`:/workdir pinto0309/openvino:2022.1.20220106
cd /workdir
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/openvino/runtime/lib/intel64
ldconfig
MODEL=convnext_base
H=224
W=224
/opt/intel/repo/openvino/tools/mo/openvino/tools/mo/mo.py \
--input_model ${MODEL}_${H}x${W}.onnx \
--data_type FP32 \
--use_new_frontend
exit
- Granting editing privileges (.xml / .bin)
sudo chown -R $USER *
- Creating JSON for adjusting OpenVINO layer behavior
replace.json
{
"format_version": 2,
"layers": [
{
"layer_id": "1",
"type": "Reshape",
"replace_mode": "insert_after",
"values": [
512
]
},
{
"layer_id": "2",
"type": "Reshape",
"replace_mode": "insert_after",
"values": [
256
]
},
{
"layer_id": "3",
"type": "Reshape",
"replace_mode": "insert_after",
"values": [
128
]
},
{
"layer_id": "11",
"type": "Reshape",
"replace_mode": "insert_after",
"values": [
128
]
},
{
"layer_id": "805",
"type": "Const",
"replace_mode": "direct",
"values": [
2,
3
]
}
]
}
- OpenVINO to Keras - openvino2tensorflow==1.27.5
docker run --gpus all -it --rm \
-v `pwd`:/home/user/workdir \
ghcr.io/pinto0309/openvino2tensorflow:latest
MODEL=convnext_base
H=224
W=224
openvino2tensorflow \
--model_path ${MODEL}_${H}x${W}.xml \
--output_h5 \
--output_no_quant_float32_tflite \
--weight_replacement_config replace.json
- Finish
- Model loading test
$ python3
>>> import tensorflow as tf
>>> model = tf.keras.models.load_model('saved_model/model_float32.h5')
>>> model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(1, 224, 224, 3)] 0 []
conv2d (Conv2D) (1, 56, 56, 128) 6144 ['input_1[0][0]']
tf.math.add (TFOpLambda) (1, 56, 56, 128) 0 ['conv2d[0][0]']
tf.math.reduce_variance (TFOpL (1, 1, 56, 128) 0 ['tf.math.add[0][0]']
ambda)
tf.math.reduce_mean (TFOpLambd (1, 1, 56, 128) 0 ['tf.math.add[0][0]']
a)
tf.__operators__.add (TFOpLamb (1, 1, 56, 128) 0 ['tf.math.reduce_variance[0][0]']
da)
tf.math.subtract (TFOpLambda) (1, 56, 56, 128) 0 ['tf.math.add[0][0]',
'tf.math.reduce_mean[0][0]']
tf.math.sqrt (TFOpLambda) (1, 1, 56, 128) 0 ['tf.__operators__.add[0][0]']
tf.math.truediv (TFOpLambda) (1, 56, 56, 128) 0 ['tf.math.subtract[0][0]',
'tf.math.sqrt[0][0]']
:
tf.math.add_189 (TFOpLambda) (1, 1000) 0 ['tf.linalg.matmul_72[0][0]']
tf.identity (TFOpLambda) (1, 1000) 0 ['tf.math.add_189[0][0]']
==================================================================================================
Total params: 3,643,008
Trainable params: 3,643,008
Non-trainable params: 0
Reference