💨

ORB-SLAM3を試してみる!

に公開

環境

  • ubuntu24.04
  • mambaのslam-projectという仮想環境を作って使うよ

Pangolinをインストール

cd Documents
sudo apt install libgl1-mesa-glx libegl1-mesa libgl1-mesa-dev libegl1-mesa-dev
mamba install -c conda-forge libgl-devel xorg-libx11
git clone --recursive https://github.com/stevenlovegrove/Pangolin.git
cd ~/Documents/Pangolin
/usr/bin/cmake .. \
    -DCMAKE_BUILD_TYPE=Release \
    -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX \
    -DCMAKE_C_COMPILER=/usr/bin/gcc \
    -DCMAKE_CXX_COMPILER=/usr/bin/g++
make -j$(nproc)
make install

ORB-SLAM3のコンパイル

  • git cloneとかする.
    cd Documents
    git clone https://github.com/UZ-SLAMLab/ORB_SLAM3.git
    cd ORB_SLAM3
    sed -i 's/++11/++14/g' CMakeLists.txt
    chmod +x build.sh
    
  • CMakeLists.txtをちょっといじる
    find_package(Eigen3 3.1.0 REQUIRED)
    find_package(Pangolin REQUIRED)
    find_package(realsense2)
    
    ここにfind_package(OpenGL REQUIRED)を追加.
    target_link_libraries(${PROJECT_NAME}
    ${OpenCV_LIBS}
    ${EIGEN3_LIBS}
    ${Pangolin_LIBRARIES}
    ${PROJECT_SOURCE_DIR}/Thirdparty/DBoW2/lib/libDBoW2.so
    ${PROJECT_SOURCE_DIR}/Thirdparty/g2o/lib/libg2o.so
    -lboost_serialization
    -lcrypto
    )
    
    ここの中に${OPENGL_LIBRARIES}を追加.
  • buildする.
    ./build.sh
    

TUM RGB-Dデータセットで動作確認

  • テスト用データセットをダウンロード
    cd Documents
    wget -c http://vision.in.tum.de/rgbd/dataset/freiburg1/rgbd_dataset_freiburg1_xyz.tgz
    tar -zxvf rgbd_dataset_freiburg1_xyz.tgz
    
  • RGB画像と深度画像を関連づける
    • ~/Documents/rgbd_dataset_freiburg1_xyz/associate.pyを作成し,以下の内容を追加.
      associate.py
      #!/usr/bin/python
      # Software License Agreement (BSD License)
      #
      # Copyright (c) 2013, Juergen Sturm, TUM
      # All rights reserved.
      #
      # Redistribution and use in source and binary forms, with or without
      # modification, are permitted provided that the following conditions
      # are met:
      #
      #  * Redistributions of source code must retain the above copyright
      #    notice, this list of conditions and the following disclaimer.
      #  * Redistributions in binary form must reproduce the above
      #    copyright notice, this list of conditions and the following
      #    disclaimer in the documentation and/or other materials provided
      #    with the distribution.
      #  * Neither the name of TUM nor the names of its
      #    contributors may be used to endorse or promote products derived
      #    from this software without specific prior written permission.
      #
      # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
      # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
      # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
      # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
      # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
      # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
      # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
      # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
      # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
      # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
      # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
      # POSSIBILITY OF SUCH DAMAGE.
      
      """
      The associate script matches two lists of files based on their timestamps.
      """
      
      import argparse
      import sys
      import os
      
      def read_file_list(filename):
          """
          Reads a trajectory from a text file. 
          
          File format:
          The file could be generated with the following command:
          ls -l | awk '{print $9}' > files.txt
          
          Input:
          filename -- filename
          
          Output:
          dict -- dictionary of {timestamp: filename}
          """
          file = open(filename)
          data = file.read()
          lines = data.replace(","," ").replace("\t"," ").split("\n") 
          list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
          list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
          return dict(list)
      
      def associate(first_list, second_list, offset, max_difference):
          """
          Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim 
          to find the closest match for every input tuple.
          
          Input:
          first_list -- first dictionary of (stamp,data)
          second_list -- second dictionary of (stamp,data)
          offset -- time offset added to the stamps of the second list (default: 0.0)
          max_difference -- search radius for matching entries (default: 0.02)
              
          Output:
          matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
          
          """
          first_keys = list(first_list.keys())
          first_keys.sort()
          second_keys = list(second_list.keys())
          second_keys.sort()
          
          matches = []
          for i, a in enumerate(first_keys):
              best_j = -1
              min_diff = float('inf')
              
              # Determine the search range for j
              start_j = 0
              while start_j < len(second_keys) and second_keys[start_j] < a - max_difference:
                  start_j += 1
                  
              end_j = start_j
              while end_j < len(second_keys) and second_keys[end_j] <= a + max_difference:
                  b = second_keys[end_j]
                  diff = abs(a - (b + offset))
                  if diff < min_diff:
                      min_diff = diff
                      best_j = end_j
                  end_j += 1
                  
              if best_j != -1:
                  b = second_keys[best_j]
                  if min_diff < max_difference:
                      matches.append((a, b))
      
          # Create a set of the second_keys that are in matches
          matched_second_keys = set(m[1] for m in matches)
      
          # Now, find the best match for each second_key that is not in matches
          for j, b in enumerate(second_keys):
              if b not in matched_second_keys:
                  best_i = -1
                  min_diff = float('inf')
      
                  start_i = 0
                  while start_i < len(first_keys) and first_keys[start_i] < b + offset - max_difference:
                      start_i += 1
                  
                  end_i = start_i
                  while end_i < len(first_keys) and first_keys[end_i] <= b + offset + max_difference:
                      a = first_keys[end_i]
                      diff = abs(a - (b + offset))
                      if diff < min_diff:
                          min_diff = diff
                          best_i = end_i
                      end_i += 1
                  
                  if best_i != -1:
                      a = first_keys[best_i]
                      if min_diff < max_difference:
                          matches.append((a,b))
          
          # Sort matches by the first timestamp
          matches.sort()
      
          return matches
      
      if __name__ == '__main__':
          # parse command line
          parser = argparse.ArgumentParser(description='''
          This script takes two data files with timestamps and associates them   
          ''')
          parser.add_argument('first_file', help='first text file (format: timestamp data)')
          parser.add_argument('second_file', help='second text file (format: timestamp data)')
          parser.add_argument('--first_only', help='only output associated lines from first file', action='store_true')
          parser.add_argument('--offset', help='time offset added to second file (default: 0.0)',default=0.0)
          parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
          args = parser.parse_args()
      
          first_list = read_file_list(args.first_file)
          second_list = read_file_list(args.second_file)
      
          matches = associate(first_list, second_list,float(args.offset),float(args.max_difference))    
      
          if args.first_only:
              for a,b in matches:
                  print("%f %s"%(a," ".join(first_list[a])))
          else:
              for a,b in matches:
                  print("%f %s %f %s"%(a," ".join(first_list[a]),b," ".join(second_list[b])))
      
    • スクリプトを実行して関連付けファイルを作成
      python3 associate.py rgb.txt depth.txt > associations.txt
      

ORB-SLAM3を実行

  • ORB-SLAM3を実行する.
    cd ~/slam_ws/ORB_SLAM3
    ./Examples/RGB-D/rgbd_tum \
        Vocabulary/ORBvoc.txt \
        Examples/RGB-D/TUM1.yaml \
        ../rgbd_dataset_freiburg1_xyz \
        ../rgbd_dataset_freiburg1_xyz/associations.txt
    
    CameraTrajectory.txtKeyFrameTrajectory.txtが作成されていたら成功.
  • 結果を確認する.
    • 可視化コマンドをインストールする.
      pip install evo --upgrade --no-binary evo
      
    • 可視化コマンド
      evo_traj tum ../rgbd_dataset_freiburg1_xyz/groundtruth.txt CameraTrajectory.txt --ref=../rgbd_dataset_freiburg1_xyz/groundtruth.txt -p --align
      
    • ATE計算コマンド
      evo_ape tum ../rgbd_dataset_freiburg1_xyz/groundtruth.txt CameraTrajectory.txt -va -p
      

Discussion