Open25

mediapipeのコンパイル試行 Pi4 armv7l / aarch64など

$ cd ~ && mkdir -p mediapipe && cd mediapipe

### for armv7l
$ sudo apt update && \
sudo apt install -y python3-dev cmake protobuf-compiler \
python3-pip git make openjdk-8-jdk

### for aarch64
$ sudo apt update && \
sudo apt install -y python3-dev cmake protobuf-compiler \
python3-pip git make openjdk-11-jdk-headless

$ sudo pip3 install pip setuptools --upgrade

$ git clone -b v0.8.4 https://github.com/google/mediapipe && cd mediapipe
$ sed -i -e "/\"imgcodecs\"/d;/\"calib3d\"/d;/\"features2d\"/d;/\"highgui\"/d;/\"video\"/d;/\"videoio\"/d" third_party/BUILD
$ sed -i -e "/-ljpeg/d;/-lpng/d;/-ltiff/d;/-lImath/d;/-lIlmImf/d;/-lHalf/d;/-lIex/d;/-lIlmThread/d;/-lrt/d;/-ldc1394/d;/-lavcodec/d;/-lavformat/d;/-lavutil/d;/-lswscale/d;/-lavresample/d" third_party/BUILD

### for armv7l
$ nano third_party/BUILD

   "WITH_ITT": "OFF",
   "WITH_JASPER": "OFF",
   "WITH_WEBP": "OFF",
   "ENABLE_NEON": "ON",
   "WITH_TENGINE": "OFF",

### for aarch94
$ nano third_party/BUILD

   "WITH_ITT": "OFF",
   "WITH_JASPER": "OFF",
   "WITH_WEBP": "OFF",
   "ENABLE_NEON": "OFF",
   "WITH_TENGINE": "OFF",
### for armv7l
$ wget https://github.com/PINTO0309/Bazel_bin/raw/main/3.7.2/armhf/install.sh
$ sudo chmod +x install.sh
$ ./install.sh

### for aarch64
$ wget https://github.com/PINTO0309/Bazel_bin/raw/main/3.7.2/aarch64/install.sh
$ sudo chmod +x install.sh
$ ./install.sh
$ sudo python3 setup.py gen_protos
$ sudo bazel clean --expunge
$ sudo python3 setup.py bdist_wheel
### jetson
$ sudo apt update && \
sudo apt install -y python3-dev cmake protobuf-compiler \
python3-pip git make openjdk-8-jdk
$ pip3 install pip setuptools numpy==1.19.4 --upgrade
$ git clone -b v0.8.4 https://github.com/google/mediapipe && cd mediapipe

$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
$ ldconfig

$ export TF_CUDA_PATHS=/usr/local/cuda:/usr/lib/aarch64-linux-gnu:/usr/include

$ nano .bazelrc

build:using_cuda --define=using_cuda=true
build:using_cuda --action_env TF_NEED_CUDA=1
build:using_cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
build --define=tensorflow_enable_mlir_generated_gpu_kernels=0
build:using_cuda --define=tensorflow_enable_mlir_generated_gpu_kernels=1
build:cuda --config=using_cuda
build:cuda --define=using_cuda_nvcc=true

$ nano mediapipe/setup.py

# protoc_command = [self._protoc, '-I.', '--python_out=.', source]
protoc_command = [self._protoc, '-I.', '-I/usr/local/include', '--python_out=.', source]

$ wget https://github.com/protocolbuffers/protobuf/releases/download/v3.15.6/protoc-3.15.6-linux-aarch_64.zip
$ unzip protoc-3.15.6-linux-aarch_64.zip -d protoc3
$ sudo mv protoc3/bin/* /usr/local/bin/
$ sudo mv protoc3/include/* /usr/local/include/
$ sudo chown [user] /usr/local/bin/protoc
$ sudo chown -R [user] /usr/local/include/google

$ sed -i -e "s/numpy/numpy==1.19.4/g" requirements.txt


######$ sudo apt install gcc-8 g++-8 -y
######$ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 8
######$ sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 8
$ nano setup.py

    bazel_command = [
        'bazel',
        'build',
        '--compilation_mode=opt',
        '--define=MEDIAPIPE_DISABLE_GPU=1',
        '--action_env=PYTHON_BIN_PATH=' + _normalize_path(sys.executable),
        os.path.join('mediapipe/modules/', graph_path),
    ]

↓

    bazel_command = [
        'bazel',
        'build',
        '--compilation_mode=opt',
        '--config=cuda',
        '--spawn_strategy=local',
        '--define=no_gcp_support=true',
        '--define=no_aws_support=true',
        '--define=no_nccl_support=true',
        '--copt=-DMESA_EGL_NO_X11_HEADERS',
        '--copt=-DEGL_NO_X11',
        '--local_ram_resources=4096',
        '--local_cpu_resources=3',
        '--action_env=PYTHON_BIN_PATH=' + _normalize_path(sys.executable),
        os.path.join('mediapipe/modules/', graph_path),
    ]

===================================================

    bazel_command = [
        'bazel',
        'build',
        '--compilation_mode=opt',
        '--define=MEDIAPIPE_DISABLE_GPU=1',
        '--action_env=PYTHON_BIN_PATH=' + _normalize_path(sys.executable),
        str(ext.bazel_target + '.so'),
    ]

↓

    bazel_command = [
        'bazel',
        'build',
        '--compilation_mode=opt',
        '--config=cuda',
        '--spawn_strategy=local',
        '--define=no_gcp_support=true',
        '--define=no_aws_support=true',
        '--define=no_nccl_support=true',
        '--copt=-DMESA_EGL_NO_X11_HEADERS',
        '--copt=-DEGL_NO_X11',
        '--local_ram_resources=4096',
        '--local_cpu_resources=3',
        '--action_env=PYTHON_BIN_PATH=' + _normalize_path(sys.executable),
        str(ext.bazel_target + '.so'),
    ]
$ sudo python3 setup.py gen_protos
$ sudo bazel clean --expunge
$ sudo python3 setup.py bdist_wheel

mediapipe/framework/tool/BUILD

cc_binary(
    name = "encode_as_c_string",
    srcs = ["encode_as_c_string.cc"],
    visibility = ["//visibility:public"],
    deps = [
        "@com_google_absl//absl/strings",
    ],
    linkopts = ["-lm"],
)

mediapipe/python/BUILD

cc_library(
    name = "builtin_calculators",
    deps = [
        "//mediapipe/calculators/core:gate_calculator",
        "//mediapipe/calculators/core:pass_through_calculator",
        "//mediapipe/calculators/core:side_packet_to_stream_calculator",
        "//mediapipe/calculators/core:split_normalized_landmark_list_calculator",
        "//mediapipe/calculators/core:string_to_int_calculator",
        "//mediapipe/calculators/image:image_transformation_calculator",
        "//mediapipe/calculators/util:detection_unique_id_calculator",
        "//mediapipe/modules/face_detection:face_detection_front_cpu",
        "//mediapipe/modules/face_landmark:face_landmark_front_cpu",
        "//mediapipe/modules/hand_landmark:hand_landmark_tracking_cpu",
        "//mediapipe/modules/holistic_landmark:holistic_landmark_cpu",
        "//mediapipe/modules/objectron:objectron_cpu",
        "//mediapipe/modules/palm_detection:palm_detection_cpu",
        "//mediapipe/modules/pose_detection:pose_detection_cpu",
        "//mediapipe/modules/pose_landmark:pose_landmark_by_roi_cpu",
        "//mediapipe/modules/pose_landmark:pose_landmark_cpu",
    ],
)

↓

cc_library(
    name = "builtin_calculators",
    deps = [
        "//mediapipe/calculators/core:gate_calculator",
        "//mediapipe/calculators/core:pass_through_calculator",
        "//mediapipe/calculators/core:side_packet_to_stream_calculator",
        "//mediapipe/calculators/core:split_normalized_landmark_list_calculator",
        "//mediapipe/calculators/core:string_to_int_calculator",
        "//mediapipe/calculators/image:image_transformation_calculator",
        "//mediapipe/calculators/util:detection_unique_id_calculator",
        ########################################################
        "//mediapipe/modules/face_detection:face_detection_front_gpu",
        "//mediapipe/modules/face_landmark:face_landmark_front_gpu",
        "//mediapipe/modules/hand_landmark:hand_landmark_tracking_gpu",
        "//mediapipe/modules/holistic_landmark:holistic_landmark_gpu",
        "//mediapipe/modules/objectron:objectron_gpu",
        "//mediapipe/modules/palm_detection:palm_detection_gpu",
        "//mediapipe/modules/pose_detection:pose_detection_gpu",
        "//mediapipe/modules/pose_landmark:pose_landmark_by_roi_gpu",
        "//mediapipe/modules/pose_landmark:pose_landmark_gpu",
        "//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
        "//mediapipe/calculators/image:color_convert_calculator",
        ########################################################
    ],
)

mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt

# Predicts pose landmarks.
node {
  calculator: "PoseLandmarkGpu"
  input_stream: "IMAGE:image"
  input_side_packet: "MODEL_COMPLEXITY:model_complexity"
  input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks"
  output_stream: "LANDMARKS:pose_landmarks"
  output_stream: "ROI_FROM_LANDMARKS:pose_landmarks_roi"
  output_stream: "DETECTION:pose_detection"
}

↓

node: {
  calculator: "ColorConvertCalculator"
  input_stream: "RGB_IN:image"
  output_stream: "RGBA_OUT:image_rgba"
}

node: {
  calculator: "ImageFrameToGpuBufferCalculator"
  input_stream: "image_rgba"
  output_stream: "image_gpu"
}

# Predicts pose landmarks.
node {
  calculator: "PoseLandmarkGpu"
  input_stream: "IMAGE:image_gpu"
  input_side_packet: "UPPER_BODY_ONLY:upper_body_only"
  input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks"
  output_stream: "LANDMARKS:pose_landmarks"
  output_stream: "ROI_FROM_LANDMARKS:pose_landmarks_roi"
  output_stream: "DETECTION:pose_detection"

mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt

# Predicts left and right hand landmarks based on the initial pose landmarks.
node {
  calculator: "HandLandmarksLeftAndRightGpu"
  input_stream: "IMAGE:image"
  input_stream: "POSE_LANDMARKS:pose_landmarks"
  output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks"
  output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks"
}

↓

# Predicts left and right hand landmarks based on the initial pose landmarks.
node {
  calculator: "HandLandmarksLeftAndRightGpu"
  input_stream: "IMAGE:image_gpu"
  input_stream: "POSE_LANDMARKS:pose_landmarks"
  output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks"
  output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks"
}

mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt

# Predicts face landmarks based on the initial pose landmarks.
node {
  calculator: "FaceLandmarksFromPoseGpu"
  input_stream: "IMAGE:image"
  input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose"
  output_stream: "FACE_LANDMARKS:face_landmarks"
}

↓

# Predicts face landmarks based on the initial pose landmarks.
node {
  calculator: "FaceLandmarksFromPoseGpu"
  input_stream: "IMAGE:image_gpu"
  input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose"
  output_stream: "FACE_LANDMARKS:face_landmarks"
}

mediapipe/python/solutions/holistic.py

BINARYPB_FILE_PATH = 'mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb'

↓

BINARYPB_FILE_PATH = 'mediapipe/modules/holistic_landmark/holistic_landmark_gpu.binarypb'

mediapipe/python/solutions/holistic.py

    _download_oss_pose_landmark_model(model_complexity)
    super().__init__(
        binary_graph_path=BINARYPB_FILE_PATH,
        side_inputs={
            'model_complexity': model_complexity,
            'smooth_landmarks': smooth_landmarks and not static_image_mode,
        },
        calculator_params={
            'poselandmarkcpu__ConstantSidePacketCalculator.packet': [
                constant_side_packet_calculator_pb2
                .ConstantSidePacketCalculatorOptions.ConstantSidePacket(
                    bool_value=not static_image_mode)
            ],
            'poselandmarkcpu__posedetectioncpu__TensorsToDetectionsCalculator.min_score_thresh':
                min_detection_confidence,
            'poselandmarkcpu__poselandmarkbyroicpu__ThresholdingCalculator.threshold':
                min_tracking_confidence,
        },
        outputs=[
            'pose_landmarks', 'left_hand_landmarks', 'right_hand_landmarks',
            'face_landmarks'
        ])

↓

    _download_oss_pose_landmark_model(model_complexity)
    super().__init__(
        binary_graph_path=BINARYPB_FILE_PATH,
        side_inputs={
            'model_complexity': model_complexity,
            'smooth_landmarks': smooth_landmarks and not static_image_mode,
        },
        calculator_params={
            'poselandmarkgpu__ConstantSidePacketCalculator.packet': [
                constant_side_packet_calculator_pb2
                .ConstantSidePacketCalculatorOptions.ConstantSidePacket(
                    bool_value=not static_image_mode)
            ],
            'poselandmarkgpu__posedetectiongpu__TensorsToDetectionsCalculator.min_score_thresh':
                min_detection_confidence,
            'poselandmarkgpu__poselandmarkbyroigpu__ThresholdingCalculator.threshold':
                min_tracking_confidence,
        },
        outputs=[
            'pose_landmarks', 'left_hand_landmarks', 'right_hand_landmarks',
            'face_landmarks'
        ])

setup.py

  def run(self):
    _check_bazel()
    binary_graphs = [
        'face_detection/face_detection_front_cpu',
        'face_landmark/face_landmark_front_cpu',
        'hand_landmark/hand_landmark_tracking_cpu',
        'holistic_landmark/holistic_landmark_cpu', 'objectron/objectron_cpu',
        'pose_landmark/pose_landmark_cpu'
    ]

↓

  def run(self):
    _check_bazel()
    binary_graphs = [
        'face_detection/face_detection_front_cpu',
        'face_landmark/face_landmark_front_cpu',
        'hand_landmark/hand_landmark_tracking_cpu',
        'holistic_landmark/holistic_landmark_gpu',
        'objectron/objectron_cpu',
        'pose_landmark/pose_landmark_cpu'
    ]

mediapipe/python/solutions/hands.py

BINARYPB_FILE_PATH = 'mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.binarypb'

↓

BINARYPB_FILE_PATH = 'mediapipe/modules/hand_landmark/hand_landmark_tracking_gpu.binarypb'


    super().__init__(
        binary_graph_path=BINARYPB_FILE_PATH,
        side_inputs={
            'num_hands': max_num_hands,
        },
        calculator_params={
            'ConstantSidePacketCalculator.packet': [
                constant_side_packet_calculator_pb2
                .ConstantSidePacketCalculatorOptions.ConstantSidePacket(
                    bool_value=not static_image_mode)
            ],
            'palmdetectioncpu__TensorsToDetectionsCalculator.min_score_thresh':
                min_detection_confidence,
            'handlandmarkcpu__ThresholdingCalculator.threshold':
                min_tracking_confidence,
        },
        outputs=['multi_hand_landmarks', 'multi_handedness'])

↓

    super().__init__(
        binary_graph_path=BINARYPB_FILE_PATH,
        side_inputs={
            'num_hands': max_num_hands,
        },
        calculator_params={
            'ConstantSidePacketCalculator.packet': [
                constant_side_packet_calculator_pb2
                .ConstantSidePacketCalculatorOptions.ConstantSidePacket(
                    bool_value=not static_image_mode)
            ],
            'palmdetectiongpu__TensorsToDetectionsCalculator.min_score_thresh':
                min_detection_confidence,
            'handlandmarkgpu__ThresholdingCalculator.threshold':
                min_tracking_confidence,
        },
        outputs=['multi_hand_landmarks', 'multi_handedness'])

mediapipe/modules/palm_detection/palm_detection_gpu.pbtxt

# Transforms an image into a 256x256 tensor while keeping the aspect ratio, and
# therefore may result in potential letterboxing.
node {
  calculator: "ImageToTensorCalculator"
  input_stream: "IMAGE_GPU:image"
  output_stream: "TENSORS:input_tensor"
  output_stream: "LETTERBOX_PADDING:letterbox_padding"
  options: {
    [mediapipe.ImageToTensorCalculatorOptions.ext] {

↓

node: {
  calculator: "ColorConvertCalculator"
  input_stream: "RGB_IN:image"
  output_stream: "RGBA_OUT:image_rgba"
}

node: {
  calculator: "ImageFrameToGpuBufferCalculator"
  input_stream: "image_rgba"
  output_stream: "image_gpu"
}

# Transforms an image into a 256x256 tensor while keeping the aspect ratio, and
# therefore may result in potential letterboxing.
node {
  calculator: "ImageToTensorCalculator"
  input_stream: "IMAGE_GPU:image_gpu"
  output_stream: "TENSORS:input_tensor"
  output_stream: "LETTERBOX_PADDING:letterbox_padding"
  options: {
    [mediapipe.ImageToTensorCalculatorOptions.ext] {

mediapipe/modules/hand_landmark/hand_landmark_gpu.pbtxt

# Transforms a region of image into a 224x224 tensor while keeping the aspect
# ratio, and therefore may result in potential letterboxing.
node {
  calculator: "ImageToTensorCalculator"
  input_stream: "IMAGE_GPU:image"
  input_stream: "NORM_RECT:hand_rect"
  output_stream: "TENSORS:input_tensor"
  output_stream: "LETTERBOX_PADDING:letterbox_padding"
  options: {
    [mediapipe.ImageToTensorCalculatorOptions.ext] {
↓

node: {
  calculator: "ColorConvertCalculator"
  input_stream: "RGB_IN:image"
  output_stream: "RGBA_OUT:image_rgba"
}

node: {
  calculator: "ImageFrameToGpuBufferCalculator"
  input_stream: "image_rgba"
  output_stream: "image_gpu"
}

# Transforms a region of image into a 224x224 tensor while keeping the aspect
# ratio, and therefore may result in potential letterboxing.
node {
  calculator: "ImageToTensorCalculator"
  input_stream: "IMAGE_GPU:image_gpu"
  input_stream: "NORM_RECT:hand_rect"
  output_stream: "TENSORS:input_tensor"
  output_stream: "LETTERBOX_PADDING:letterbox_padding"
  options: {
    [mediapipe.ImageToTensorCalculatorOptions.ext] {

setup.py

  def run(self):
    _check_bazel()
    binary_graphs = [
        'face_detection/face_detection_front_cpu',
        'face_landmark/face_landmark_front_cpu',
        'hand_landmark/hand_landmark_tracking_cpu',
        'holistic_landmark/holistic_landmark_gpu',
        'objectron/objectron_cpu',
        'pose_landmark/pose_landmark_gpu'
    ]

↓

  def run(self):
    _check_bazel()
    binary_graphs = [
        'face_detection/face_detection_front_cpu',
        'face_landmark/face_landmark_front_cpu',
        'hand_landmark/hand_landmark_tracking_gpu',
        'holistic_landmark/holistic_landmark_gpu',
        'objectron/objectron_cpu',
        'pose_landmark/pose_landmark_gpu'
    ]

mediapipe/python/solutions/face_mesh.py

BINARYPB_FILE_PATH = 'mediapipe/modules/face_landmark/face_landmark_front_cpu.binarypb'
↓
BINARYPB_FILE_PATH = 'mediapipe/modules/face_landmark/face_landmark_front_gpu.binarypb'

    super().__init__(
        binary_graph_path=BINARYPB_FILE_PATH,
        side_inputs={
            'num_faces': max_num_faces,
        },
        calculator_params={
            'ConstantSidePacketCalculator.packet': [
                constant_side_packet_calculator_pb2
                .ConstantSidePacketCalculatorOptions.ConstantSidePacket(
                    bool_value=not static_image_mode)
            ],
            'facedetectionfrontcpu__TensorsToDetectionsCalculator.min_score_thresh':
                min_detection_confidence,
            'facelandmarkcpu__ThresholdingCalculator.threshold':
                min_tracking_confidence,
        },
        outputs=['multi_face_landmarks'])
↓
    super().__init__(
        binary_graph_path=BINARYPB_FILE_PATH,
        side_inputs={
            'num_faces': max_num_faces,
        },
        calculator_params={
            'ConstantSidePacketCalculator.packet': [
                constant_side_packet_calculator_pb2
                .ConstantSidePacketCalculatorOptions.ConstantSidePacket(
                    bool_value=not static_image_mode)
            ],
            'facedetectionfrontgpu__TensorsToDetectionsCalculator.min_score_thresh':
                min_detection_confidence,
            'facelandmarkgpu__ThresholdingCalculator.threshold':
                min_tracking_confidence,
        },
        outputs=['multi_face_landmarks'])
ログインするとコメントできます