🐙

M5Stack LLM630 Compute Kit ログインしてみる

2025/01/23に公開

無線はまだよ

Telec通ってないのでまだ無線は使えない。通ったら特例申請で使える。
CEマークついてるけどそれを根拠に特例使えるかは不明。焦るな。

USB UARTからシリアル通信

一番簡単そうなUSB UARTからシリアル通信してみます。
右側のコネクタがUART用。接続すると起動したのでデバイスマネージャから見ると、CH9102デバイスとして認識されました。ドライバが入ってない場合はCH9102のドライバをインストールしよう

ID:root
pass:root
で入れた

Ethernetもケーブル繋いでるのでIPアドレス確認したらちゃんとつながってる。

root@m5stack-kit:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 6e:46:28:dc:fc:f5 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.112/24 brd 192.168.1.255 scope global dynamic eth0
       valid_lft 28080sec preferred_lft 28080sec
    inet6 fe80::6c46:28ff:fedc:fcf5/64 scope link 
       valid_lft forever preferred_lft forever
3: sit0@NONE: <NOARP> mtu 1480 qdisc noop state DOWN group default qlen 1000
    link/sit 0.0.0.0 brd 0.0.0.0

EthernetからSSH

先ほど確認したIPからログインしてもOK
RAMのユーザー領域2GBあるので、VSCodeのremote-SSH使ってもよさそう。eMMCも22GB空いてるのでまだ余裕があります。

root@m5stack-kit:~# free -m
               total        used        free      shared  buff/cache   available
Mem:            1997         179        1639           1         177        1746
Swap:              0           0           0
root@m5stack-kit:~# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/root        28G  6.8G   22G  25% /
tmpfs           999M     0  999M   0% /dev/shm
tmpfs           400M  1.2M  399M   1% /run
tmpfs           5.0M     0  5.0M   0% /run/lock
tmpfs           200M     0  200M   0% /run/user/0
root@m5stack-kit:~# ^C

探検

/opt/data/npu

いつものmobilenetv2とyolov5sと犬と猫

root@m5stack-kit:/opt/data/npu# tree
.
├── images
│   ├── cat.jpg
│   └── dog.jpg
└── models
    ├── mobilenetv2.axmodel
    └── yolov5s.axmodel

/opt/m5stack

root@m5stack-kit:/opt/m5stack# tree
.
├── bin
│   ├── llm_asr
│   ├── llm_audio
│   ├── llm_camera
│   ├── llm_depth_anything
│   ├── llm_kws
│   ├── llm_llm
│   ├── llm_melotts
│   ├── llm_sys
│   ├── llm_tts
│   ├── llm_vlm
│   └── llm_yolo
├── data
│   ├── audio
│   │   ├── hashes.txt
│   │   ├── hashes_.txt
│   │   ├── wakeup_en_us.wav
│   │   └── wakeup_zh_cn.wav
│   ├── depth-anything-ax630c
│   │   ├── depth_anything.axmodel
│   │   └── hashes.txt
│   ├── melotts_zh-cn
│   │   ├── decoder.axmodel
│   │   ├── encoder.ort
│   │   ├── g.bin
│   │   ├── hashes.txt
│   │   ├── lexicon.txt
│   │   ├── melotts_zh-cn.json
│   │   ├── tokens.txt
│   │   └── version
│   ├── models
│   │   ├── mode_depth-anything-ax630c.json
│   │   ├── mode_melotts-zh-cn.json
│   │   ├── mode_qwen2.5-0.5B-prefill-20e.json
│   │   ├── mode_sherpa-ncnn-streaming-zipformer-20M-2023-02-17.json
│   │   ├── mode_sherpa-ncnn-streaming-zipformer-zh-14M-2023-02-23.json
│   │   ├── mode_sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01.json
│   │   ├── mode_sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01.json
│   │   ├── mode_single-speaker-english-fast.json
│   │   ├── mode_single-speaker-fast.json
│   │   ├── mode_yolo11n-hand-pose.json
│   │   ├── mode_yolo11n-pose.json
│   │   ├── mode_yolo11n-seg.json
│   │   └── mode_yolo11n.json
│   ├── qwen2.5-0.5B-prefill-20e
│   │   ├── hashes.txt
│   │   ├── model.embed_tokens.weight.bfloat16.bin
│   │   ├── qwen.tiktoken
│   │   ├── qwen2.5-0.5B-prefill-20e.json
│   │   ├── qwen2_p128_l0_together.axmodel
│   │   ├── qwen2_p128_l10_together.axmodel
│   │   ├── qwen2_p128_l11_together.axmodel
│   │   ├── qwen2_p128_l12_together.axmodel
│   │   ├── qwen2_p128_l13_together.axmodel
│   │   ├── qwen2_p128_l14_together.axmodel
│   │   ├── qwen2_p128_l15_together.axmodel
│   │   ├── qwen2_p128_l16_together.axmodel
│   │   ├── qwen2_p128_l17_together.axmodel
│   │   ├── qwen2_p128_l18_together.axmodel
│   │   ├── qwen2_p128_l19_together.axmodel
│   │   ├── qwen2_p128_l1_together.axmodel
│   │   ├── qwen2_p128_l20_together.axmodel
│   │   ├── qwen2_p128_l21_together.axmodel
│   │   ├── qwen2_p128_l22_together.axmodel
│   │   ├── qwen2_p128_l23_together.axmodel
│   │   ├── qwen2_p128_l2_together.axmodel
│   │   ├── qwen2_p128_l3_together.axmodel
│   │   ├── qwen2_p128_l4_together.axmodel
│   │   ├── qwen2_p128_l5_together.axmodel
│   │   ├── qwen2_p128_l6_together.axmodel
│   │   ├── qwen2_p128_l7_together.axmodel
│   │   ├── qwen2_p128_l8_together.axmodel
│   │   ├── qwen2_p128_l9_together.axmodel
│   │   ├── qwen2_post.axmodel
│   │   └── version
│   ├── qwen2.5-1.5B-ax630c
│   │   ├── hashes.txt
│   │   ├── model.embed_tokens.weight.bfloat16.bin
│   │   ├── qwen2_p128_l0_together.axmodel
│   │   ├── qwen2_p128_l10_together.axmodel
│   │   ├── qwen2_p128_l11_together.axmodel
│   │   ├── qwen2_p128_l12_together.axmodel
│   │   ├── qwen2_p128_l13_together.axmodel
│   │   ├── qwen2_p128_l14_together.axmodel
│   │   ├── qwen2_p128_l15_together.axmodel
│   │   ├── qwen2_p128_l16_together.axmodel
│   │   ├── qwen2_p128_l17_together.axmodel
│   │   ├── qwen2_p128_l18_together.axmodel
│   │   ├── qwen2_p128_l19_together.axmodel
│   │   ├── qwen2_p128_l1_together.axmodel
│   │   ├── qwen2_p128_l20_together.axmodel
│   │   ├── qwen2_p128_l21_together.axmodel
│   │   ├── qwen2_p128_l22_together.axmodel
│   │   ├── qwen2_p128_l23_together.axmodel
│   │   ├── qwen2_p128_l24_together.axmodel
│   │   ├── qwen2_p128_l25_together.axmodel
│   │   ├── qwen2_p128_l26_together.axmodel
│   │   ├── qwen2_p128_l27_together.axmodel
│   │   ├── qwen2_p128_l2_together.axmodel
│   │   ├── qwen2_p128_l3_together.axmodel
│   │   ├── qwen2_p128_l4_together.axmodel
│   │   ├── qwen2_p128_l5_together.axmodel
│   │   ├── qwen2_p128_l6_together.axmodel
│   │   ├── qwen2_p128_l7_together.axmodel
│   │   ├── qwen2_p128_l8_together.axmodel
│   │   ├── qwen2_p128_l9_together.axmodel
│   │   ├── qwen2_post.axmodel
│   │   ├── tokenizer
│   │   │   ├── config.json
│   │   │   ├── configuration.json
│   │   │   ├── generation_config.json
│   │   │   ├── tokenizer.json
│   │   │   ├── tokenizer_config.json
│   │   │   └── vocab.json
│   │   └── version
│   ├── sherpa-ncnn-streaming-zipformer-20M-2023-02-17
│   │   ├── decoder_jit_trace-pnnx.ncnn.bin
│   │   ├── decoder_jit_trace-pnnx.ncnn.param
│   │   ├── encoder_jit_trace-pnnx.ncnn.bin
│   │   ├── encoder_jit_trace-pnnx.ncnn.param
│   │   ├── hashes.txt
│   │   ├── joiner_jit_trace-pnnx.ncnn.bin
│   │   ├── joiner_jit_trace-pnnx.ncnn.param
│   │   ├── sherpa-ncnn-streaming-zipformer-20M-2023-02-17.json
│   │   ├── tokens.txt
│   │   └── version
│   ├── sherpa-ncnn-streaming-zipformer-zh-14M-2023-02-23
│   │   ├── decoder_jit_trace-pnnx.ncnn.bin
│   │   ├── decoder_jit_trace-pnnx.ncnn.param
│   │   ├── encoder_jit_trace-pnnx.ncnn.bin
│   │   ├── encoder_jit_trace-pnnx.ncnn.param
│   │   ├── hashes.txt
│   │   ├── joiner_jit_trace-pnnx.ncnn.bin
│   │   ├── joiner_jit_trace-pnnx.ncnn.param
│   │   ├── sherpa-ncnn-streaming-zipformer-zh-14M-2023-02-23.json
│   │   ├── tokens.txt
│   │   └── version
│   ├── sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01
│   │   ├── bpe.model
│   │   ├── decoder-epoch-12-avg-2-chunk-16-left-64.ort
│   │   ├── encoder-epoch-12-avg-2-chunk-16-left-64.int8.ort
│   │   ├── hashes.txt
│   │   ├── joiner-epoch-12-avg-2-chunk-16-left-64.int8.ort
│   │   ├── keywords.txt
│   │   ├── sherpa-onnx-kws-zipformer-gigaspeech-3.3M-2024-01-01.json
│   │   ├── tokens.txt
│   │   └── version
│   ├── sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01
│   │   ├── decoder-epoch-12-avg-2-chunk-16-left-64.ort
│   │   ├── encoder-epoch-12-avg-2-chunk-16-left-64.int8.ort
│   │   ├── hashes.txt
│   │   ├── joiner-epoch-12-avg-2-chunk-16-left-64.int8.ort
│   │   ├── keywords.txt
│   │   ├── ort.py
│   │   ├── sherpa-onnx-kws-zipformer-wenetspeech-3.3M-2024-01-01.json
│   │   ├── tokens.txt
│   │   └── version
│   ├── single_speaker_english_fast
│   │   ├── hashes.txt
│   │   ├── single_speaker_english_fast.bin
│   │   ├── single_speaker_english_fast.json
│   │   └── version
│   ├── single_speaker_fast
│   │   ├── hashes.txt
│   │   ├── single_speaker_fast.bin
│   │   ├── single_speaker_fast.json
│   │   └── version
│   ├── yolo11n
│   │   ├── hashes.txt
│   │   ├── mode_yolo11n.json
│   │   ├── version
│   │   └── yolo11n.axmodel
│   ├── yolo11n-hand-pose
│   │   ├── hashes.txt
│   │   ├── mode_yolo11n-hand-pose.json
│   │   ├── version
│   │   └── yolo11n-hand-pose.axmodel
│   ├── yolo11n-pose
│   │   ├── hashes.txt
│   │   ├── mode_yolo11n-pose.json
│   │   ├── version
│   │   └── yolo11n-pose.axmodel
│   └── yolo11n-seg
│   ├── hashes.txt
│   ├── mode_yolo11n-seg.json
│   ├── version
│   └── yolo11n-seg.axmodel
├── lib
│   ├── libkaldi-native-fbank-core.so
│   ├── libncnn.so
│   ├── libonnxruntime.so.1.14.0
│   ├── libsherpa-ncnn-core.so
│   └── libtts.so
├── libhv.20231122.log
├── libhv.20250113.log
├── scripts
│   ├── hashes.txt
│   ├── internvl2-1b-ax630c_tokenizer.py
│   ├── llama3.2-1B-prefill-ax630c_tokenizer.py
│   ├── openbuddy-llama3.2-1B-ax630c_tokenizer.py
│   ├── qwen2.5-coder-0.5B-ax630c_tokenizer.py
│   └── text2token.py
└── share
├── _tokenizer.py
├── audio.json
├── camera.json
├── internvl2-1B-ax630c_tokenizer.py
├── llama3.2-1B-prefill-ax630c_tokenizer.py
├── openbuddy-llama3.2-1B-ax630c_tokenizer.py
├── qwen2.5-coder-0.5B-ax630c_tokenizer.py
├── static_file
└── sys_config.json

22 directories, 190 files

Discussion