Browse Source

Update RK3562/RK3566/RK3568/RK3588/RV1103/RV1106 NPU SDK to 1.5.2

Signed-off-by: Randall Zhuo <randall.zhuo@rock-chips.com>
v1.5.2 v1.5.2
Randall Zhuo 2 years ago
parent
commit
f29bfee210
  1. 10
      README.md
  2. BIN
      doc/RKNN_Compiler_Support_Operator_List_v1.5.0.pdf
  3. BIN
      doc/RKNN_Compiler_Support_Operator_List_v1.5.2.pdf
  4. 17
      doc/RKNN_Dynamic_Shape_Usage.md
  5. BIN
      doc/Rockchip_Quick_Start_RKNN_SDK_V1.5.0_CN.pdf
  6. BIN
      doc/Rockchip_Quick_Start_RKNN_SDK_V1.5.2_CN.pdf
  7. BIN
      doc/Rockchip_Quick_Start_RKNN_SDK_V1.5.2_EN.pdf
  8. BIN
      doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.0_CN.pdf
  9. BIN
      doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.0_EN.pdf
  10. BIN
      doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.2_CN.pdf
  11. BIN
      doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.2_EN.pdf
  12. BIN
      doc/Rockchip_RV1106_Quick_Start_RKNN_SDK_V1.5.0_CN.pdf
  13. BIN
      doc/Rockchip_RV1106_Quick_Start_RKNN_SDK_V1.5.2_CN.pdf
  14. BIN
      doc/Rockchip_RV1106_Quick_Start_RKNN_SDK_V1.5.2_EN.pdf
  15. 16
      examples/README.md
  16. 7
      examples/README_CN.md
  17. 5
      examples/RV1106_RV1103/rknn_mobilenet_demo/README.md
  18. 34
      examples/RV1106_RV1103/rknn_mobilenet_demo/README_CN.md
  19. 33
      examples/RV1106_RV1103/rknn_yolov5_demo/README.md
  20. 46
      examples/RV1106_RV1103/rknn_yolov5_demo/README_CN.md
  21. BIN
      examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/bus.jpg
  22. 1
      examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/dataset.txt
  23. 55
      examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx2rknn.py
  24. BIN
      examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx_models/yolov5s.onnx
  25. BIN
      examples/RV1106_RV1103/rknn_yolov5_demo/model/RV1106/yolov5s-640-640.rknn
  26. 97
      examples/RV1106_RV1103/rknn_yolov5_demo/src/main.cc
  27. 95
      examples/RV1106_RV1103/rknn_yolov5_demo/src/postprocess.cc
  28. 90
      examples/librknn_api_android_demo/README.md
  29. 73
      examples/librknn_api_android_demo/README_CN.md
  30. 2
      examples/rknn_api_demo/README.md
  31. 76
      examples/rknn_api_demo/README_CN.md
  32. 8
      examples/rknn_benchmark/README.md
  33. 87
      examples/rknn_benchmark/README_CN.md
  34. 4
      examples/rknn_benchmark/src/rknn_benchmark.cpp
  35. 2
      examples/rknn_common_test/README.md
  36. 64
      examples/rknn_common_test/README_CN.md
  37. 40
      examples/rknn_dynamic_shape_input_demo/CMakeLists.txt
  38. 83
      examples/rknn_dynamic_shape_input_demo/README.md
  39. 52
      examples/rknn_dynamic_shape_input_demo/README_CN.md
  40. BIN
      examples/rknn_dynamic_shape_input_demo/model/RK3562/mobilenet_v2.rknn
  41. BIN
      examples/rknn_dynamic_shape_input_demo/model/RK3566_RK3568/mobilenet_v2.rknn
  42. BIN
      examples/rknn_dynamic_shape_input_demo/model/RK3588/mobilenet_v2.rknn
  43. 20
      examples/rknn_dynamic_shape_input_demo/src/rknn_dynshape_inference.cc
  44. 20
      examples/rknn_dynamic_shape_input_demo/src/rknn_dynshape_inference_zero_copy.cc
  45. 904
      examples/rknn_dynamic_shape_input_demo/src/rknn_dynshape_inference_zero_copy_alloc_outside.cc
  46. 64
      examples/rknn_internal_mem_reuse_demo/README.md
  47. 112
      examples/rknn_internal_mem_reuse_demo/README_CN.md
  48. 12
      examples/rknn_matmul_api_demo/CMakeLists.txt
  49. 31
      examples/rknn_matmul_api_demo/README.md
  50. 74
      examples/rknn_matmul_api_demo/README_CN.md
  51. 185
      examples/rknn_matmul_api_demo/src/Float16.h
  52. 149
      examples/rknn_matmul_api_demo/src/rknn_matmul_api_demo.cpp
  53. 5
      examples/rknn_mobilenet_demo/README.md
  54. 68
      examples/rknn_mobilenet_demo/README_CN.md
  55. 3
      examples/rknn_mobilenet_demo/src/main.cc
  56. 2
      examples/rknn_multiple_input_demo/README.md
  57. 69
      examples/rknn_multiple_input_demo/README_CN.md
  58. 39
      examples/rknn_yolov5_android_apk_demo/README.md
  59. 78
      examples/rknn_yolov5_android_apk_demo/README_CN.md
  60. 13
      examples/rknn_yolov5_android_apk_demo/app/src/main/cpp/post_process.cc
  61. BIN
      examples/rknn_yolov5_android_apk_demo/app/src/main/res/raw/yolov5s_rk3562.rknn
  62. BIN
      examples/rknn_yolov5_android_apk_demo/app/src/main/res/raw/yolov5s_rk3566.rknn
  63. BIN
      examples/rknn_yolov5_android_apk_demo/app/src/main/res/raw/yolov5s_rk3588.rknn
  64. 68
      examples/rknn_yolov5_demo/README.md
  65. 127
      examples/rknn_yolov5_demo/README_CN.md
  66. 17
      examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/README.md
  67. 18
      examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/README_CN.md
  68. 6
      examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx2rknn.py
  69. BIN
      examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx_models/yolov5s_relu.onnx
  70. BIN
      examples/rknn_yolov5_demo/model/RK3562/yolov5s-640-640.rknn
  71. BIN
      examples/rknn_yolov5_demo/model/RK3566_RK3568/yolov5s-640-640.rknn
  72. BIN
      examples/rknn_yolov5_demo/model/RK3588/yolov5s-640-640.rknn
  73. BIN
      examples/rknn_yolov5_demo/model/RV110X/yolov5s-640-640.rknn
  74. 2
      examples/rknn_yolov5_demo/src/main_video.cc
  75. 13
      examples/rknn_yolov5_demo/src/postprocess.cc
  76. 16
      examples/rknn_yolov5_demo/utils/mpp_decoder.cpp
  77. 3
      examples/rknn_yolov5_demo/utils/mpp_encoder.cpp
  78. BIN
      runtime/RK356X/Android/librknn_api/arm64-v8a/librknnrt.so
  79. BIN
      runtime/RK356X/Android/librknn_api/armeabi-v7a/librknnrt.so
  80. 29
      runtime/RK356X/Android/librknn_api/include/rknn_api.h
  81. BIN
      runtime/RK356X/Android/rknn_server/arm/rknn_server
  82. BIN
      runtime/RK356X/Android/rknn_server/arm64/rknn_server
  83. BIN
      runtime/RK356X/Linux/librknn_api/aarch64/librknnrt.so
  84. BIN
      runtime/RK356X/Linux/librknn_api/armhf/librknnrt.so
  85. 29
      runtime/RK356X/Linux/librknn_api/include/rknn_api.h
  86. BIN
      runtime/RK356X/Linux/rknn_server/aarch64/usr/bin/rknn_server
  87. BIN
      runtime/RK356X/Linux/rknn_server/armhf/usr/bin/rknn_server
  88. BIN
      runtime/RK3588/Android/librknn_api/arm64-v8a/librknnrt.so
  89. BIN
      runtime/RK3588/Android/librknn_api/armeabi-v7a/librknnrt.so
  90. 29
      runtime/RK3588/Android/librknn_api/include/rknn_api.h
  91. BIN
      runtime/RK3588/Android/rknn_server/arm/rknn_server
  92. BIN
      runtime/RK3588/Android/rknn_server/arm64/rknn_server
  93. BIN
      runtime/RK3588/Linux/librknn_api/aarch64/librknnrt.so
  94. BIN
      runtime/RK3588/Linux/librknn_api/armhf/librknnrt.so
  95. 29
      runtime/RK3588/Linux/librknn_api/include/rknn_api.h
  96. BIN
      runtime/RK3588/Linux/rknn_server/aarch64/usr/bin/rknn_server
  97. BIN
      runtime/RK3588/Linux/rknn_server/armhf/usr/bin/rknn_server
  98. BIN
      runtime/RV1106/Linux/librknn_api/armhf/librknnmrt.a
  99. BIN
      runtime/RV1106/Linux/librknn_api/armhf/librknnmrt.so
  100. 29
      runtime/RV1106/Linux/librknn_api/include/rknn_api.h
  101. Some files were not shown because too many files have changed in this diff Show More

10
README.md

@ -20,7 +20,15 @@ Note: @@ -20,7 +20,15 @@ Note:
## ReleaseLog
# 1.5.0
### 1.5.2
- Improved dynamic shape support
- Improved matmul api support
- Add GPU back-end implementations for some operators such as matmul
- Improve transformer support
- Reduce rknn_init memory usage
- Optimize rknn_init time-consuming
### 1.5.0
- Support RK3562
- Support more NPU operator fuse, such as Conv-Silu/Conv-Swish/Conv-Hardswish/Conv-sigmoid/Conv-HardSwish/Conv-Gelu ..

BIN
doc/RKNN_Compiler_Support_Operator_List_v1.5.0.pdf

Binary file not shown.

BIN
doc/RKNN_Compiler_Support_Operator_List_v1.5.2.pdf

Binary file not shown.

17
doc/RKNN_Dynamic_Shape_Usage.md

@ -48,7 +48,7 @@ @@ -48,7 +48,7 @@
## 4.设置输入形状
加载动态形状输入RKNN模型后,您可以在运行时动态修改输入的形状。通过调用rknn_set_input_shape接口,传入包含形状信息的rknn_tensor_attr指针可以设置当前次推理的形状。例如,使用rknn_query获取的输入形状设置输入时,您可以使用以下代码:
加载动态形状输入RKNN模型后,您可以在运行时动态修改输入的形状。通过调用rknn_set_input_shapes接口,传入所有输入的rknn_tensor_attr数组,可以设置当前次推理的形状。例如,使用rknn_query获取的输入形状设置输入时,您可以使用以下代码:
```
for (int s = 0; s < shape_num; ++s)
@ -59,17 +59,16 @@ @@ -59,17 +59,16 @@
{
input_attrs[i].dims[j] = shape_range[i].dyn_range[s][j];
}
ret = rknn_set_input_shape(ctx, &input_attrs[i]);
if (ret < 0)
{
fprintf(stderr, "rknn_set_input_shape error! ret=%d\n", ret);
return -1;
}
}
ret = rknn_set_input_shapes(ctx, io_num.n_input, input_attrs);
if (ret < 0)
{
fprintf(stderr, "rknn_set_input_shapes error! ret=%d\n", ret);
return -1;
}
}
```
其中,shape_num是支持的形状个数,shape_range[i]是第i个输入的rknn_input_range结构体,input_attrs[i]是第i个输入的rknn_tensor_attr结构体
其中,shape_num是支持的形状个数,shape_range[i]是第i个输入的rknn_input_range结构体,io_num.n_input是输入数量,input_attrs是模型所有输入的rknn_tensor_attr结构体数组
在设置输入形状后,可以再次调用rknn_query查询当前次推理成功设置后的输入和输出形状,例如,您可以使用以下代码:
```

BIN
doc/Rockchip_Quick_Start_RKNN_SDK_V1.5.0_CN.pdf

Binary file not shown.

BIN
doc/Rockchip_Quick_Start_RKNN_SDK_V1.5.2_CN.pdf

Binary file not shown.

BIN
doc/Rockchip_Quick_Start_RKNN_SDK_V1.5.2_EN.pdf

Binary file not shown.

BIN
doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.0_CN.pdf

Binary file not shown.

BIN
doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.0_EN.pdf

Binary file not shown.

BIN
doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.2_CN.pdf

Binary file not shown.

BIN
doc/Rockchip_RKNPU_User_Guide_RKNN_API_V1.5.2_EN.pdf

Binary file not shown.

BIN
doc/Rockchip_RV1106_Quick_Start_RKNN_SDK_V1.5.0_CN.pdf

Binary file not shown.

BIN
doc/Rockchip_RV1106_Quick_Start_RKNN_SDK_V1.5.2_CN.pdf

Binary file not shown.

BIN
doc/Rockchip_RV1106_Quick_Start_RKNN_SDK_V1.5.2_EN.pdf

Binary file not shown.

16
examples/README.md

@ -1,7 +1,15 @@ @@ -1,7 +1,15 @@
1、目前RV1106/RV1103仅支持rknn_mobilenet_demo 和 rknn_yolov5_demo两个demo,在examples/RV1106_RV1103目录下,demo中的**build脚本用RV1106指代RV1106/RV1103**
2、RK356X和RK3588支持example目录下,除examples/RV1106_RV1103文件夹外的所有demo
3、**RV1106/RV1103设置LD_LIBRARY_PATH必须为全路径**,例如:export LD_LIBRARY_PATH=/userdata/lib
1. Currently, there are only two demo supported on the RV1106/1103 platform,located under the diretory of examples/RV1106_RV1103. **The RV1106 build script is referring to RV1106/RV1103 in the demo**.
2. For all demos, except for the one under examples/RV1106_RV1103, they are all supported on the platform of RK356X/RK3588.
3. **When setting LD_LIBRARY_PATH on the RV1106 or RV1103**, it must be the full path. For example,
```shell
export LD_LIBRARY_PATH=/userdata/lib
```
For RK356X, RK3588, LD_LIBRARY_PATH can be set as either full path or relative path
4、RK356X和RK3588设置LD_LIBRARY_PATH为全路径和相对路径均可

7
examples/README_CN.md

@ -0,0 +1,7 @@ @@ -0,0 +1,7 @@
1、目前RV1106/RV1103仅支持rknn_mobilenet_demo 和 rknn_yolov5_demo两个demo,在examples/RV1106_RV1103目录下,demo中的**build脚本用RV1106指代RV1106/RV1103**
2、RK356X和RK3588支持example目录下,除examples/RV1106_RV1103文件夹外的所有demo
3、**RV1106/RV1103设置LD_LIBRARY_PATH必须为全路径**,例如:export LD_LIBRARY_PATH=/userdata/lib
4、RK356X和RK3588设置LD_LIBRARY_PATH为全路径和相对路径均可

5
examples/RV1106_RV1103/rknn_mobilenet_demo/README.md

@ -1,6 +1,7 @@ @@ -1,6 +1,7 @@
下述<TARGET_PLATFORM>是RV1106或RV1103
The following <TARGET_PLATFORM> is RV1106 or RV1103
# arm Linux Demo
# Arm Linux Demo
## build

34
examples/RV1106_RV1103/rknn_mobilenet_demo/README_CN.md

@ -0,0 +1,34 @@ @@ -0,0 +1,34 @@
下述<TARGET_PLATFORM>是RV1106或RV1103
# Arm Linux Demo
## 编译
修改目标平台的`build-linux_<TARGET_PLATFORM>.sh`上的`GCC_COMPILER`,
然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## 安装
连接设备并将构建输出推送到“/userdata”
```
adb push install/rknn_mobilenet_demo_Linux /userdata/
```
## 运行
```
adb shell
cd /userdata/rknn_mobilenet_demo_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_mobilenet_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
```

33
examples/RV1106_RV1103/rknn_yolov5_demo/README.md

@ -1,39 +1,40 @@ @@ -1,39 +1,40 @@
# Yolo-v5 demo
# 导出rknn模型
# Export RKNN Model
1. 进到examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5目录下,执行如下命令,可生成rknn模型:
Please refer https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo
```sh
cd examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5
python onnx2rknn.py
```
## arm Linux Demo
### 编译
## Arm Linux Demo
### Compiling and Building
The 'build-linux_RV1106.sh' can be used for compiling demo for target including RV1106 and RV1103.
RV1106/RV1103编译脚本均为 `build-linux_RV1106.sh`,设置交叉编译器所在目录的路径 `RK_RV1106_TOOLCHAIN`,例如修改成
Changing the cross compiler path via the setting the `RK_RV1106_TOOLCHAIN`, shown as below:
```sh
export RK_RV1106_TOOLCHAIN=~/opts/toolchain/arm-rockchip830-linux-uclibcgnueabihf/bin/arm-rockchip830-linux-uclibcgnueabihf
```
然后执行
then, run the script
```sh
./build-linux_RV1106.sh
```
### 推送执行文件到板子
Note: The RV1106 and RV1103 requires this 'arm-rockchip830-linux-uclibcgnueabihf' compiler to build the demo or another applications.
连接板子的usb口到PC,将整个demo目录到 `/userdata`:
### Push build output files to the board
Connecting the usb port to the PC, and pushing all demo folder to the directory '/userdata':
```sh
adb push install/rknn_yolov5_demo_Linux /userdata/
```
### 运行
### Running
```sh
adb shell
@ -43,4 +44,8 @@ export LD_LIBRARY_PATH=/userdata/rknn_yolov5_demo_Linux/lib @@ -43,4 +44,8 @@ export LD_LIBRARY_PATH=/userdata/rknn_yolov5_demo_Linux/lib
./rknn_yolov5_demo model/RV1106/yolov5s-640-640.rknn model/bus.jpg
```
Note: LD_LIBRARY_PATH 必须采用全路径
Note:
- LD_LIBRARY_PATH must use the full path
- For performance reasons, the output fmt of the RKNN model is set to **RKNN_QUERY_NATIVE_NHWC_OUTPUT_ATTR** in the demo to obtain better inference performance. At this time, the model output buf is arranged in the order of NHWC. For example, the original shape of the first output is **1,255,80,80**. At this case, the shape output by RKNN is 1,80,80,255. The post-processing in this demo is also optimized and adjusted according to this order.

46
examples/RV1106_RV1103/rknn_yolov5_demo/README_CN.md

@ -0,0 +1,46 @@ @@ -0,0 +1,46 @@
# Yolo-v5 demo
# 导出rknn模型
请参考 https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo
## Arm Linux Demo
### 编译
RV1106/RV1103编译脚本均为 `build-linux_RV1106.sh`,设置交叉编译器所在目录的路径 `RK_RV1106_TOOLCHAIN`,例如修改成
```sh
export RK_RV1106_TOOLCHAIN=~/opts/toolchain/arm-rockchip830-linux-uclibcgnueabihf/bin/arm-rockchip830-linux-uclibcgnueabihf
```
然后执行:
```sh
./build-linux_RV1106.sh
```
### 推送执行文件到板子
连接板子的usb口到PC,将整个demo目录到 `/userdata`:
```sh
adb push install/rknn_yolov5_demo_Linux /userdata/
```
### 运行
```sh
adb shell
cd /userdata/rknn_yolov5_demo_Linux/
export LD_LIBRARY_PATH=/userdata/rknn_yolov5_demo_Linux/lib
./rknn_yolov5_demo model/RV1106/yolov5s-640-640.rknn model/bus.jpg
```
Note:
- LD_LIBRARY_PATH 必须采用全路径
- 基于性能原因,demo中将 RKNN 模型的输出 fmt 设置为 RKNN_QUERY_NATIVE_NHWC_OUTPUT_ATTR,以获取更好的推理性能。此时模型输出 buf 是以 NHWC 顺序进行排布的,比如第一个输出的原始 shape 是 1,255,80,80,此时RKNN输出的 shape 是1,80,80,255,此demo中的后处理也根据这个顺序做了相应的优化调整。

BIN
examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/bus.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 177 KiB

1
examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/dataset.txt

@ -1 +0,0 @@ @@ -1 +0,0 @@
bus.jpg

55
examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx2rknn.py

@ -1,55 +0,0 @@ @@ -1,55 +0,0 @@
import cv2
import numpy as np
from rknn.api import RKNN
import os
if __name__ == '__main__':
exp = 'yolov5s'
Width = 640
Height = 640
MODEL_PATH = './onnx_models/yolov5s.onnx'
NEED_BUILD_MODEL = True
# NEED_BUILD_MODEL = False
im_file = './bus.jpg'
# Create RKNN object
rknn = RKNN()
OUT_DIR = "rknn_models"
RKNN_MODEL_PATH = './{}/{}.rknn'.format(OUT_DIR, exp+'-'+str(Width)+'-'+str(Height))
if NEED_BUILD_MODEL:
DATASET = './dataset.txt'
rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform="rv1106")
# Load model
print('--> Loading model')
ret = rknn.load_onnx(MODEL_PATH, outputs=['334', '353', '372'])
if ret != 0:
print('load model failed!')
exit(ret)
print('done')
# Build model
print('--> Building model')
ret = rknn.build(do_quantization=True, dataset=DATASET)
if ret != 0:
print('build model failed.')
exit(ret)
print('done')
# Export rknn model
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
print('--> Export RKNN model: {}'.format(RKNN_MODEL_PATH))
ret = rknn.export_rknn(RKNN_MODEL_PATH)
if ret != 0:
print('Export rknn model failed.')
exit(ret)
print('done')
else:
ret = rknn.load_rknn(RKNN_MODEL_PATH)
rknn.release()

BIN
examples/RV1106_RV1103/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx_models/yolov5s.onnx

Binary file not shown.

BIN
examples/RV1106_RV1103/rknn_yolov5_demo/model/RV1106/yolov5s-640-640.rknn

Binary file not shown.

97
examples/RV1106_RV1103/rknn_yolov5_demo/src/main.cc

@ -147,65 +147,6 @@ static unsigned char *load_image(const char *image_path, rknn_tensor_attr *input @@ -147,65 +147,6 @@ static unsigned char *load_image(const char *image_path, rknn_tensor_attr *input
return image_data;
}
// 量化模型的npu输出结果为int8数据类型,后处理要按照int8数据类型处理
// 如下提供了int8排布的NC1HWC2转换成int8的nchw转换代码
int NC1HWC2_int8_to_NCHW_int8(const int8_t *src, int8_t *dst, int *dims, int channel, int h, int w)
{
int batch = dims[0];
int C1 = dims[1];
int C2 = dims[4];
int hw_src = dims[2] * dims[3];
int hw_dst = h * w;
for (int i = 0; i < batch; i++)
{
src = src + i * C1 * hw_src * C2;
dst = dst + i * channel * hw_dst;
for (int c = 0; c < channel; ++c)
{
int plane = c / C2;
const int8_t *src_c = plane * hw_src * C2 + src;
int offset = c % C2;
for (int cur_h = 0; cur_h < h; ++cur_h)
for (int cur_w = 0; cur_w < w; ++cur_w)
{
int cur_hw = cur_h * w + cur_w;
dst[c * hw_dst + cur_h * w + cur_w] = src_c[C2 * cur_hw + offset];
}
}
}
return 0;
}
// 量化模型的npu输出结果为int8数据类型,后处理要按照int8数据类型处理
// 如下提供了int8排布的NC1HWC2转换成float的nchw转换代码
int NC1HWC2_int8_to_NCHW_float(const int8_t *src, float *dst, int *dims, int channel, int h, int w, int zp, float scale)
{
int batch = dims[0];
int C1 = dims[1];
int C2 = dims[4];
int hw_src = dims[2] * dims[3];
int hw_dst = h * w;
for (int i = 0; i < batch; i++)
{
src = src + i * C1 * hw_src * C2;
dst = dst + i * channel * hw_dst;
for (int c = 0; c < channel; ++c)
{
int plane = c / C2;
const int8_t *src_c = plane * hw_src * C2 + src;
int offset = c % C2;
for (int cur_h = 0; cur_h < h; ++cur_h)
for (int cur_w = 0; cur_w < w; ++cur_w)
{
int cur_hw = cur_h * w + cur_w;
dst[c * hw_dst + cur_h * w + cur_w] = (src_c[C2 * cur_hw + offset] - zp) * scale; // int8-->float
}
}
}
return 0;
}
/*-------------------------------------------
Main Functions
@ -299,7 +240,7 @@ int main(int argc, char *argv[]) @@ -299,7 +240,7 @@ int main(int argc, char *argv[])
{
output_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_NATIVE_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
ret = rknn_query(ctx, RKNN_QUERY_NATIVE_NHWC_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
@ -408,39 +349,6 @@ int main(int argc, char *argv[]) @@ -408,39 +349,6 @@ int main(int argc, char *argv[])
printf("%4d: Elapse Time = %.2fms, FPS = %.2f\n", i, elapse_us / 1000.f, 1000.f * 1000.f / elapse_us);
}
printf("output origin tensors:\n");
rknn_tensor_attr orig_output_attrs[io_num.n_output];
memset(orig_output_attrs, 0, io_num.n_output * sizeof(rknn_tensor_attr));
for (uint32_t i = 0; i < io_num.n_output; i++)
{
orig_output_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(orig_output_attrs[i]), sizeof(rknn_tensor_attr));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
dump_tensor_attr(&orig_output_attrs[i]);
}
int8_t *output_mems_nchw[io_num.n_output];
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
int size = orig_output_attrs[i].size_with_stride;
output_mems_nchw[i] = (int8_t *)malloc(size);
}
for (uint32_t i = 0; i < io_num.n_output; i++)
{
int channel = orig_output_attrs[i].dims[1];
int h = orig_output_attrs[i].n_dims > 2 ? orig_output_attrs[i].dims[2] : 1;
int w = orig_output_attrs[i].n_dims > 3 ? orig_output_attrs[i].dims[3] : 1;
int hw = h * w;
NC1HWC2_int8_to_NCHW_int8((int8_t *)output_mems[i]->virt_addr, (int8_t *)output_mems_nchw[i], (int *)output_attrs[i].dims,
channel, h, w);
}
int model_width = 0;
int model_height = 0;
if (input_attrs[0].fmt == RKNN_TENSOR_NCHW)
@ -468,7 +376,7 @@ int main(int argc, char *argv[]) @@ -468,7 +376,7 @@ int main(int argc, char *argv[])
out_zps.push_back(output_attrs[i].zp);
}
post_process((int8_t *)output_mems_nchw[0], (int8_t *)output_mems_nchw[1], (int8_t *)output_mems_nchw[2], 640, 640,
post_process((int8_t *)output_mems[0]->virt_addr, (int8_t *)output_mems[1]->virt_addr, (int8_t *)output_mems[2]->virt_addr, 640, 640,
box_conf_threshold, nms_threshold, scale_w, scale_h, out_zps, out_scales, &detect_result_group);
char text[256];
@ -487,7 +395,6 @@ int main(int argc, char *argv[]) @@ -487,7 +395,6 @@ int main(int argc, char *argv[])
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
rknn_destroy_mem(ctx, output_mems[i]);
free(output_mems_nchw[i]);
}
// destroy

95
examples/RV1106_RV1103/rknn_yolov5_demo/src/postprocess.cc

@ -211,8 +211,7 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh @@ -211,8 +211,7 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh
int validCount = 0;
int grid_len = grid_h * grid_w;
float thres = unsigmoid(threshold);
int8_t thres_i8 = qnt_f32_to_affine(thres, zp, scale);
int8_t thres_i8 = qnt_f32_to_affine(threshold, zp, scale);
for (int a = 0; a < 3; a++)
{
for (int i = 0; i < grid_h; i++)
@ -224,10 +223,10 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh @@ -224,10 +223,10 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh
{
int offset = (PROP_BOX_SIZE * a) * grid_len + i * grid_w + j;
int8_t *in_ptr = input + offset;
float box_x = sigmoid(deqnt_affine_to_f32(*in_ptr, zp, scale)) * 2.0 - 0.5;
float box_y = sigmoid(deqnt_affine_to_f32(in_ptr[grid_len], zp, scale)) * 2.0 - 0.5;
float box_w = sigmoid(deqnt_affine_to_f32(in_ptr[2 * grid_len], zp, scale)) * 2.0;
float box_h = sigmoid(deqnt_affine_to_f32(in_ptr[3 * grid_len], zp, scale)) * 2.0;
float box_x = (deqnt_affine_to_f32(*in_ptr, zp, scale)) * 2.0 - 0.5;
float box_y = (deqnt_affine_to_f32(in_ptr[grid_len], zp, scale)) * 2.0 - 0.5;
float box_w = (deqnt_affine_to_f32(in_ptr[2 * grid_len], zp, scale)) * 2.0;
float box_h = (deqnt_affine_to_f32(in_ptr[3 * grid_len], zp, scale)) * 2.0;
box_x = (box_x + j) * (float)stride;
box_y = (box_y + i) * (float)stride;
box_w = box_w * box_w * (float)anchor[a * 2];
@ -251,7 +250,83 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh @@ -251,7 +250,83 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh
boxes.push_back(box_y);
boxes.push_back(box_w);
boxes.push_back(box_h);
objProbs.push_back(sigmoid(deqnt_affine_to_f32(maxClassProbs, zp, scale))* sigmoid(deqnt_affine_to_f32(box_confidence, zp, scale)));
objProbs.push_back((deqnt_affine_to_f32(maxClassProbs, zp, scale))* (deqnt_affine_to_f32(box_confidence, zp, scale)));
classId.push_back(maxClassId);
validCount++;
}
}
}
}
}
return validCount;
}
static int process_native_nhwc(int8_t *input, int *anchor, int grid_h, int grid_w, int height, int width, int stride,
std::vector<float> &boxes, std::vector<float> &boxScores, std::vector<int> &classId,
float threshold, int32_t zp, float scale)
{
int validCount = 0;
int8_t thres_i8 = qnt_f32_to_affine(threshold, zp, scale);
int anchor_per_branch = 3;
// 新驱动不再有对齐要求
// int align_c = get_align(PROP_BOX_SIZE*anchor_per_branch, 16);
int align_c = PROP_BOX_SIZE*anchor_per_branch;
// printf("align_c %d\n", align_c);
for (int h=0; h < grid_h; h++){
for (int w=0; w < grid_w; w++){
for (int a=0; a < anchor_per_branch; a++){
int hw_offset = h*grid_w*align_c + w*align_c + a*PROP_BOX_SIZE;
// int hw_offset = h*grid_w*anchor_per_branch*PROP_BOX_SIZE + w*anchor_per_branch*PROP_BOX_SIZE + a*PROP_BOX_SIZE;
int8_t *hw_ptr = input + hw_offset;
int8_t box_confidence = hw_ptr[4];
if (box_confidence >= thres_i8){
// printf("box_conf %d, thres_i8 %d\n", box_confidence, thres_i8);
int8_t maxClassProbs = hw_ptr[5];
int maxClassId = 0;
for (int k = 1; k < OBJ_CLASS_NUM; ++k)
{
int8_t prob = hw_ptr[5 + k];
if (prob > maxClassProbs)
{
maxClassId = k;
maxClassProbs = prob;
}
}
// printf("box_conf %d, thres_i8 %d, maxClassProbs %d\n", box_confidence, thres_i8, maxClassProbs);
float box_conf_f32 = deqnt_affine_to_f32(box_confidence, zp, scale);
float class_prob_f32 = deqnt_affine_to_f32(maxClassProbs, zp, scale);
float limit_score = box_conf_f32* class_prob_f32;
if (limit_score > threshold){
float box_x, box_y, box_w, box_h;
box_x = deqnt_affine_to_f32(hw_ptr[0], zp, scale) * 2.0 - 0.5;
box_y = deqnt_affine_to_f32(hw_ptr[1], zp, scale) * 2.0 - 0.5;
box_w = deqnt_affine_to_f32(hw_ptr[2], zp, scale) * 2.0;
box_h = deqnt_affine_to_f32(hw_ptr[3], zp, scale) * 2.0;
box_w = box_w * box_w;
box_h = box_h * box_h;
box_x = (box_x + w) * (float)stride;
box_y = (box_y + h) * (float)stride;
box_w *= (float)anchor[a * 2];
box_h *= (float)anchor[a * 2 + 1];
box_x -= (box_w / 2.0);
box_y -= (box_h / 2.0);
boxes.push_back(box_x);
boxes.push_back(box_y);
boxes.push_back(box_w);
boxes.push_back(box_h);
boxScores.push_back(limit_score);
classId.push_back(maxClassId);
validCount++;
}
@ -290,7 +365,7 @@ int post_process(int8_t *input0, int8_t *input1, int8_t *input2, int model_in_h, @@ -290,7 +365,7 @@ int post_process(int8_t *input0, int8_t *input1, int8_t *input2, int model_in_h,
int grid_h0 = model_in_h / stride0;
int grid_w0 = model_in_w / stride0;
int validCount0 = 0;
validCount0 = process(input0, (int *)anchor0, grid_h0, grid_w0, model_in_h, model_in_w,
validCount0 = process_native_nhwc(input0, (int *)anchor0, grid_h0, grid_w0, model_in_h, model_in_w,
stride0, filterBoxes, objProbs, classId, conf_threshold, qnt_zps[0], qnt_scales[0]);
// stride 16
@ -298,7 +373,7 @@ int post_process(int8_t *input0, int8_t *input1, int8_t *input2, int model_in_h, @@ -298,7 +373,7 @@ int post_process(int8_t *input0, int8_t *input1, int8_t *input2, int model_in_h,
int grid_h1 = model_in_h / stride1;
int grid_w1 = model_in_w / stride1;
int validCount1 = 0;
validCount1 = process(input1, (int *)anchor1, grid_h1, grid_w1, model_in_h, model_in_w,
validCount1 = process_native_nhwc(input1, (int *)anchor1, grid_h1, grid_w1, model_in_h, model_in_w,
stride1, filterBoxes, objProbs, classId, conf_threshold, qnt_zps[1], qnt_scales[1]);
// stride 32
@ -306,7 +381,7 @@ int post_process(int8_t *input0, int8_t *input1, int8_t *input2, int model_in_h, @@ -306,7 +381,7 @@ int post_process(int8_t *input0, int8_t *input1, int8_t *input2, int model_in_h,
int grid_h2 = model_in_h / stride2;
int grid_w2 = model_in_w / stride2;
int validCount2 = 0;
validCount2 = process(input2, (int *)anchor2, grid_h2, grid_w2, model_in_h, model_in_w,
validCount2 = process_native_nhwc(input2, (int *)anchor2, grid_h2, grid_w2, model_in_h, model_in_w,
stride2, filterBoxes, objProbs, classId, conf_threshold, qnt_zps[2], qnt_scales[2]);
int validCount = validCount0 + validCount1 + validCount2;

90
examples/librknn_api_android_demo/README.md

@ -1,73 +1,69 @@ @@ -1,73 +1,69 @@
# 说明
Android平台有两种方式来调用RKNN API
1)应用直接链接librknnrt.so
2)应用链接Android平台HIDL实现的librknn_api_android.so
对于需要通过CTS/VTS测试的Android设备可以使用基于Android平台HIDL实现的RKNN API。如果不需要通过CTS/VTS测试的设备建议直接链接使用librknnrt.so,对各个接口调用流程的链路更短,可以提供更好的性能。
# Instructions
对于使用Android HIDL实现的RKNN API的代码位于RK3566_RK3568/RK3588 Android系统SDK的vendor/rockchip/hardware/interfaces/neuralnetworks目录下。当完成Android系统编译后,将会生成一些NPU相关的库(对于应用只需要链接使用librknn_api_android.so即可)
There are two ways to use the RKNN API on the Android platform:
**本示例适用于librknn_api_android.so。**
1. Directly link to librknnrt.so.
2. Link to librknn_api_android.so, which is implemented based on Android platform HIDL.
For Android devices that need to pass CTS/VTS testing, it is recommended to use the RKNN API implemented based on Android platform HIDL. If devices don't need to pass CTS/VTS testing, it is suggested to directly link and use librknnrt.so, which provides better performance due to shorter interface calling process.
The code for using the RKNN API implemented with Android HIDL can be found in the vendor/rockchip/hardware/interfaces/neuralnetworks directory of the RK3566_RK3568/RK3588 Android system SDK. After completing the Android system compilation, some NPU-related libraries will be generated (for applications, only librknn_api_android.so needs to be linked).
# 编译
**This example is applicable to librknn_api_android.so.**
- 编译librknn_api_android.so
## Compilation
需要先下载RK3566_RK3568/RK3588 Android SDK,在Android SDK根目录执行
- Compile librknn_api_android.so
```
source build/envsetup.sh
lunch your target ##需要根据自己的实际情况进行选择
mmm vendor/rockchip/hardware/interfaces/neuralnetworks/ -j16
First, download the RK3566_RK3568/RK3588 Android SDK, and in the root directory of the Android SDK, then execute following commands:
```
```
source build/envsetup.sh
lunch your target ## Choose according to your actual situation
mmm vendor/rockchip/hardware/interfaces/neuralnetworks/ -j16
```
将生成
The following files will be generated:
```
/vendor/lib/librknn_api_android.so
/vendor/lib/librknnhal_bridge.rockchip.so
/vendor/lib64/librknn_api_android.so
/vendor/lib64/librknnhal_bridge.rockchip.so
/vendor/lib64/rockchip.hardware.neuralnetworks@1.0.so
/vendor/lib64/rockchip.hardware.neuralnetworks@1.0-adapter-helper.so
/vendor/lib64/hw/rockchip.hardware.neuralnetworks@1.0-impl.so
/vendor/bin/hw/rockchip.hardware.neuralnetworks@1.0-service
```
```
/vendor/lib/librknn_api_android.so
/vendor/lib/librknnhal_bridge.rockchip.so
/vendor/lib64/librknn_api_android.so
/vendor/lib64/librknnhal_bridge.rockchip.so
/vendor/lib64/rockchip.hardware.neuralnetworks@1.0.so
/vendor/lib64/rockchip.hardware.neuralnetworks@1.0-adapter-helper.so
/vendor/lib64/hw/rockchip.hardware.neuralnetworks@1.0-impl.so
/vendor/bin/hw/rockchip.hardware.neuralnetworks@1.0-service
```
- Compile this demo
- 编译本demo
Copy $RKNPU2_SDK to the root directory of the Android SDK, and execute:
将$RKNPU2_SDK拷贝到Android SDK根目录,并执行:
```
mmm rknpu2/examples/librknn_api_android_demo
```
```
mmm rknpu2/examples/librknn_api_android_demo
```
The following file will be generated:
将生成的vendor/bin/rknn_create_mem_demo
vendor/bin/rknn_create_mem_demo
# 运行
## Execution
- 将rknn_create_mem_demo推到板子/vendor/bin/目录
- 将model推到板子/data/目录
- 确保板子的rockchip.hardware.neuralnetworks@1.0-service已经运行
- Push rknn_create_mem_demo to the /vendor/bin/ directory of the target device.
- Push the model to the /data/ directory of the target device.
- Make sure that the [rockchip.hardware.neuralnetworks@1.0-service](mailto:rockchip.hardware.neuralnetworks@1.0-service) is running on the device.
```
rknn_create_mem_demo /data/model/RK3566_RK3568/mobilenet_v1.rknn /data/model/dog_224x224.jpg
rknn_create_mem_demo /data/model/RK3566_RK3568/mobilenet_v1.rknn /data/model/dog_224x224.jpg
```
## FAQ
- What should I do if the [rockchip.hardware.neuralnetworks@1.0-service](mailto:rockchip.hardware.neuralnetworks@1.0-service) is not running?
# FAQ
- rockchip.hardware.neuralnetworks@1.0-service服务没有运行怎么办
如果该服务没有运行,从Android SDK确保vendor/rockchip/hardware/interfaces/neuralnetworks/目录存在,并且重新编译系统固件,并重新烧写到板子上,具体步骤请参考SDK编译固件的说明。
If the service is not running, make sure that the vendor/rockchip/hardware/interfaces/neuralnetworks/ directory exists in the Android SDK, recompile the system firmware, and burn it onto the device. Please refer to the instructions for compiling the SDK firmware for specific steps.
- 遇到sizeof(rknn_tensor_attr) != sizeof(::rockchip::hardware::neuralnetworks::V1_0::RKNNTensorAttr)的错误
- Encounter the error "sizeof(rknn_tensor_attr) != sizeof(::rockchip::hardware::neuralnetworks::V1_0::RKNNTensorAttr)"
需要更新vendor/rockchip/hardware/interfaces/neuralnetworks到最新代码
You need to update the vendor/rockchip/hardware/interfaces/neuralnetworks to the latest version.

73
examples/librknn_api_android_demo/README_CN.md

@ -0,0 +1,73 @@ @@ -0,0 +1,73 @@
# 说明
Android平台有两种方式来调用RKNN API
1)应用直接链接librknnrt.so
2)应用链接Android平台HIDL实现的librknn_api_android.so
对于需要通过CTS/VTS测试的Android设备可以使用基于Android平台HIDL实现的RKNN API。如果不需要通过CTS/VTS测试的设备建议直接链接使用librknnrt.so,对各个接口调用流程的链路更短,可以提供更好的性能。
对于使用Android HIDL实现的RKNN API的代码位于RK3566_RK3568/RK3588 Android系统SDK的vendor/rockchip/hardware/interfaces/neuralnetworks目录下。当完成Android系统编译后,将会生成一些NPU相关的库(对于应用只需要链接使用librknn_api_android.so即可)
**本示例适用于librknn_api_android.so。**
# 编译
- 编译librknn_api_android.so
需要先下载RK3566_RK3568/RK3588 Android SDK,在Android SDK根目录执行
```
source build/envsetup.sh
lunch your target ##需要根据自己的实际情况进行选择
mmm vendor/rockchip/hardware/interfaces/neuralnetworks/ -j16
```
将生成
```
/vendor/lib/librknn_api_android.so
/vendor/lib/librknnhal_bridge.rockchip.so
/vendor/lib64/librknn_api_android.so
/vendor/lib64/librknnhal_bridge.rockchip.so
/vendor/lib64/rockchip.hardware.neuralnetworks@1.0.so
/vendor/lib64/rockchip.hardware.neuralnetworks@1.0-adapter-helper.so
/vendor/lib64/hw/rockchip.hardware.neuralnetworks@1.0-impl.so
/vendor/bin/hw/rockchip.hardware.neuralnetworks@1.0-service
```
- 编译本demo
将$RKNPU2_SDK拷贝到Android SDK根目录,并执行:
```
mmm rknpu2/examples/librknn_api_android_demo
```
将生成的vendor/bin/rknn_create_mem_demo
# 运行
- 将rknn_create_mem_demo推到板子/vendor/bin/目录
- 将model推到板子/data/目录
- 确保板子的rockchip.hardware.neuralnetworks@1.0-service已经运行
```
rknn_create_mem_demo /data/model/RK3566_RK3568/mobilenet_v1.rknn /data/model/dog_224x224.jpg
```
# FAQ
- rockchip.hardware.neuralnetworks@1.0-service服务没有运行怎么办
如果该服务没有运行,从Android SDK确保vendor/rockchip/hardware/interfaces/neuralnetworks/目录存在,并且重新编译系统固件,并重新烧写到板子上,具体步骤请参考SDK编译固件的说明。
- 遇到sizeof(rknn_tensor_attr) != sizeof(::rockchip::hardware::neuralnetworks::V1_0::RKNNTensorAttr)的错误
需要更新vendor/rockchip/hardware/interfaces/neuralnetworks到最新代码

2
examples/rknn_api_demo/README.md

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
The following <TARGET_PLATFORM> represents RK356X or RK3588.
The following <TARGET_PLATFORM> represents RK3566_RK3568, RK3562 or RK3588.
# Aarch64 Linux Demo
## Build

76
examples/rknn_api_demo/README_CN.md

@ -0,0 +1,76 @@ @@ -0,0 +1,76 @@
以下 <TARGET_PLATFORM> 表示RK3566_RK3568、RK3562或RK3588。
# Aarch64 Linux 示例
## 编译
将`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`修改成交叉编译器路径, 然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## 安装
将 install/rknn_api_demo_Linux 拷贝到设备上。
- 如果使用Rockchip的EVB板,可以使用以下命令:
连接设备并将程序和模型传输到`/userdata`
```
adb push install/rknn_api_demo_Linux /userdata/
adb push ../rknn_mobilenet_demo/model/ /userdata/rknn_api_demo_Linux
```
- 如果你的板子有sshd服务,可以使用scp命令或者其他方式将程序和模型传输到板子上。
## 运行
```
adb shell
cd /userdata/rknn_api_demo_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_create_mem_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
./rknn_create_mem_with_rga_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
```
# Android 示例
## 编译
将`build-android_<TARGET_PLATFORM>.sh`中的`ANDROID_NDK_PATH`修改成平台对应的NDK,然后执行
```
./build-android_<TARGET_PLATFORM>.sh
```
## 安装
连接设备并将程序和模型传输到`/data`
```
adb push install/rknn_api_demo_Android /data/
adb push ../rknn_mobilenet_demo/model/ /userdata/rknn_api_demo_Android
```
## 运行
```
adb shell
cd /data/rknn_api_demo_Android/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_create_mem_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
./rknn_create_mem_with_rga_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
./rknn_with_mmz_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
./rknn_set_internal_mem_from_fd_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
./rknn_set_internal_mem_from_phy_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
```
# 注意:
- 你可能需要依赖系统中的MMZ实现更新libmpimmz.so和头文件。
- 你可能需要依赖系统中的RGA实现更新librga.so和头文件。库地址:https://github.com/airockchip/librga。对于RK3562,librga库版本需要大于等于1.9.1。
- 你可能需要使用r19c或者更老的NDK版本编译MMZ相关的Demo。

8
examples/rknn_benchmark/README.md

@ -1,12 +1,12 @@ @@ -1,12 +1,12 @@
rknn_benchmark is used to test the performance of the rknn model. Please make sure that the cpu/ddr/npu has been clocked to the highest frequency before testing.
rknn_benchmark is used to test the performance of the rknn model. Please make sure that the cpu/ddr/npu has been clocked to the highest frequency before testing.
Usage:
./rknn_benchmark xxx.rknn [input_data] [loop_count] [core_mask]
core_mask: 0: auto, 1: npu core1, 2: npu core2, 4:npu core3,
core_mask: 0: auto, 1: npu core1, 2: npu core2, 4:npu core3,
3: npu core1&2,
3: npu core1&2,
7: npu core1&2&3
@ -22,7 +22,7 @@ Such as: @@ -22,7 +22,7 @@ Such as:
```
The following <TARGET_PLATFORM> represents RK356X or RK3588
The following <TARGET_PLATFORM> represents RK3566_RK3568, RK3562 or RK3588
# Aarch64 Linux Demo
## Build

87
examples/rknn_benchmark/README_CN.md

@ -0,0 +1,87 @@ @@ -0,0 +1,87 @@
rknn_benchmark是用来测试rknn模型性能。请确保测试前将CPU/DDR/NPU频率锁定到最高频率。
用法:
./rknn_benchmark xxx.rknn [input_data] [loop_count] [core_mask]
core_mask: 0: auto, 1: npu core1, 2: npu core2, 4:npu core3,
3: npu core1&2,
7: npu core1&2&3
仅RK3588支持 core mask。
例如:
```
./rknn_benchmark mobilenet_v1.rknn
./rknn_benchmark mobilenet_v1.rknn dog.jpg 10 3
./rknn_benchmark mobilenet_v1.rknn dog.npy 10 7
./rknn_benchmark xxx.rknn input1.npy#input2.npy
```
以下 <TARGET_PLATFORM> 表示RK3566_RK3568、RK3562或RK3588。
# Aarch64 Linux 示例
## 编译
将`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`修改成交叉编译器路径, 然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## 安装
将 install/rknn_benchmark_Linux 拷贝到设备上.
- 如果使用Rockchip的EVB板,可以使用以下命令:
连接设备并将程序和模型传输到`/userdata`
```
adb push install/rknn_benchmark_Linux /userdata/
```
- 如果你的板子有sshd服务,可以使用scp命令或者其他方式将程序和模型传输到板子上。
## 运行
```
adb shell
cd /userdata/rknn_benchmark_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_benchmark xxx.rknn
```
# Android 示例
## 编译
modify `ANDROID_NDK_PATH` on `build-android_<TARGET_PLATFORM>.sh` for target platform, then execute
```
./build-android_<TARGET_PLATFORM>.sh
```
## 安装
connect device and push build output into `/data`
```
adb push install/rknn_benchmark_Android /data/
```
## 运行
```
adb shell
cd /data/rknn_benchmark_Android/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_benchmark xxx.rknn
```

4
examples/rknn_benchmark/src/rknn_benchmark.cpp

@ -370,7 +370,7 @@ int main(int argc, char* argv[]) @@ -370,7 +370,7 @@ int main(int argc, char* argv[])
if (input_paths_split.size() > 0) {
// Load input
if (io_num.n_input != input_paths_split.size()) {
printf("input missing!, need input number: %d, only get %d inputs\n", io_num.n_input, input_paths_split.size());
printf("input missing!, need input number: %d, only get %zu inputs\n", io_num.n_input, input_paths_split.size());
goto out;
}
for (int i = 0; i < io_num.n_input; i++) {
@ -392,7 +392,7 @@ int main(int argc, char* argv[]) @@ -392,7 +392,7 @@ int main(int argc, char* argv[])
}
}
memset(inputs, 0, io_num.n_input * sizeof(rknn_input));
for (int i = 0; i < io_num.n_input; i++) {
inputs[i].index = i;

2
examples/rknn_common_test/README.md

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
The following <TARGET_PLATFORM> represents RK356X or RK3588
The following <TARGET_PLATFORM> represents RK3566_RK3568, RK3562 or RK3588
# Aarch64 Linux Demo
## Build

64
examples/rknn_common_test/README_CN.md

@ -0,0 +1,64 @@ @@ -0,0 +1,64 @@
以下 <TARGET_PLATFORM> 表示RK3566_RK3568、RK3562或RK3588。
# Aarch64 Linux 示例
## 编译
将`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`修改成交叉编译器路径, 然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## 安装
将 install/rknn_api_demo_Linux 拷贝到设备上。
- 如果使用Rockchip的EVB板,可以使用以下命令:
连接设备并将程序和模型传输到`/userdata`
```
adb push install/rknn_common_test_Linux /userdata/
```
- 如果你的板子有sshd服务,可以使用scp命令或者其他方式将程序和模型传输到板子上。
## 运行
```
adb shell
cd /userdata/rknn_common_test_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_common_test model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
```
# Android 示例
## 编译
将`build-android_<TARGET_PLATFORM>.sh`中的`ANDROID_NDK_PATH`修改成平台对应的NDK,然后执行
```
./build-android_<TARGET_PLATFORM>.sh
```
## 安装
连接设备并将程序和模型传输到`/data`
```
adb push install/rknn_common_test_Android /data/
```
## 运行
```
adb shell
cd /data/rknn_common_test_Android/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_common_test model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
```

40
examples/rknn_dynamic_shape_input_demo/CMakeLists.txt

@ -5,6 +5,9 @@ project(rknn_dynshape_demo) @@ -5,6 +5,9 @@ project(rknn_dynshape_demo)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
# skip 3rd-party lib dependencies
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--allow-shlib-undefined")
# rknn api
if(TARGET_SOC STREQUAL "rk356x")
set(RKNN_API_PATH ${CMAKE_SOURCE_DIR}/../../runtime/RK356X/${CMAKE_SYSTEM_NAME}/librknn_api)
@ -39,6 +42,22 @@ else() @@ -39,6 +42,22 @@ else()
endif()
find_package(OpenCV REQUIRED)
# mmz
set(MPI_MMZ_PATH ${CMAKE_SOURCE_DIR}/../3rdparty/rk_mpi_mmz)
if(CMAKE_SYSTEM_NAME STREQUAL "Android")
set(MPI_MMZ_LIB ${MPI_MMZ_PATH}/lib/Android/${CMAKE_ANDROID_ARCH_ABI}/libmpimmz.so)
else()
if(CMAKE_C_COMPILER MATCHES "aarch64")
set(LIB_ARCH aarch64)
else()
set(LIB_ARCH armhf)
endif()
set(MPI_MMZ_LIB ${MPI_MMZ_PATH}/lib/Linux//${LIB_ARCH}/libmpimmz.so)
endif()
include_directories(${MPI_MMZ_PATH}/include)
#### cnpy
set(CNPY_ROOT ${CMAKE_SOURCE_DIR}/../3rdparty/cnpy)
include_directories(${CNPY_ROOT})
@ -68,9 +87,28 @@ target_link_libraries(rknn_dynshape_inference_zero_copy @@ -68,9 +87,28 @@ target_link_libraries(rknn_dynshape_inference_zero_copy
${OpenCV_LIBS}
)
# install target and libraries
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/install/rknn_dynshape_demo_${CMAKE_SYSTEM_NAME})
install(TARGETS rknn_dynshape_inference_zero_copy DESTINATION ./)
install(DIRECTORY model DESTINATION ./)
install(DIRECTORY images DESTINATION ./)
install(PROGRAMS ${RKNN_RT_LIB} DESTINATION lib)
# At present, mmz demo is only available under Android, but not for Linux temporarily,
# mainly because libmpimmz.so has no Linux implementation now. The API of the NPU itself supports Linux.
if(CMAKE_SYSTEM_NAME STREQUAL "Android")
# rknn_dynshape_inference_zero_copy_alloc_outside
add_executable(rknn_dynshape_inference_zero_copy_alloc_outside
src/rknn_dynshape_inference_zero_copy_alloc_outside.cc
${CNPY_ROOT}/cnpy.cpp
)
target_link_libraries(rknn_dynshape_inference_zero_copy_alloc_outside
${RKNN_RT_LIB}
${MPI_MMZ_LIB}
)
# install target and libraries
install(TARGETS rknn_dynshape_inference_zero_copy_alloc_outside DESTINATION ./)
endif()

83
examples/rknn_dynamic_shape_input_demo/README.md

@ -1,52 +1,73 @@ @@ -1,52 +1,73 @@
# RKNN C API 动态形状输入Demo
这是一个使用RKNN C API进行动态形状输入推理的演示应用。您可以在这个应用中看到如何使用RKNN 动态形状 C API对图像进行分类。
# RKNN C API Dynamic Shape Input Demo
# 如何使用
1. 克隆或下载此代码库ssh://git@10.10.10.59:8001/hpc/rknpu2.git。
2. 在终端中进入动态形状推理Demo目录。
```
This is a demo that uses the RKNN C API for dynamic shape input inference. In this demo, you can see how to use the RKNN dynamic shape C API to perform image classification.
## How to Use
1. Clone or download this code repository: ssh://git@10.10.10.59:8001/hpc/rknpu2.git.
2. Navigate to the dynamic shape inference demo directory in your terminal.
```shell
cd examples/rknn_dynamic_shape_input_demo
```
3. 根据芯片平台,运行shell脚本编译应用程序,以RK3562 Android系统为例,命令如下:
```
3. Compile the application by running the shell script based on the chip platform. For example, for the RK3562 Android system, run the following command:
```shell
./build-android_RK3562.sh
```
4. 将Demo程序目录通过adb命令推送到开发板系统中,命令如下:
```
4. Push the demo program directory to the target board's system using the adb command. For example:
```shell
#If using Android system, make sure to run adb root & adb remount first.
adb push ./install/rknn_dynshape_demo_Android/ /data
注意:如果是安卓系统,需要adb root & adb remount
```
5. 设置runtime库链接路径
5. Set the runtime library path.
```
export LD_LIBRARY_PATH=./lib
```
6. 运行程序,以rk3562平台为例,./rknn_dynshape_inference model/RK3562/mobilenet_v2.rknn images/dog_224x224.jpg 命令对图像进行分类,其中 mobilenet_v2.rknn 是神经网络模型文件的名称,dog_224x224.jpg 是要分类的图像文件的名称。
6. Run the program. For example, on the RK3562 platform, use the command
```shell
./rknn_dynshape_inference model/RK3562/mobilenet_v2.rknn images/dog_224x224.jpg
```
,where `mobilenet_v2.rknn` is the name of the neural network model file, and `dog_224x224.jpg` is the name of the image file to classify.
## Compilation Instructions
### Arm Linux
Specify the cross-compiler path for the specific chip platform by modifying the `GCC_COMPILER` in `build-linux_<TARGET_PLATFORM>.sh`, where TARGET_PLATFORM is the chip name. Then execute:
# 编译说明
## Arm Linux系统
为特定的芯片平台指定交叉编译器路径,修改`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`,其中TARGET_PLATFORM为芯片名,然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## Android系统
指定Android NDK的路径,修改`build-android_<TARGET_PLATFORM>.sh`中的`ANDROID_NDK_PATH`,其中TARGET_PLATFORM为芯片名,然后执行
### Android
Specify the path to the Android NDK by modifying `ANDROID_NDK_PATH` in `build-android_<TARGET_PLATFORM>.sh`, where TARGET_PLATFORM is the chip name. Then execute:
```
./build-android_<TARGET_PLATFORM>.sh
```
# 包含的功能
此演示应用程序包含以下功能:
## Included Features
This demonstration application includes the following features:
- 创建一个包含动态形状的神经网络模型。
参考https://github.com/rockchip-linux/rknn-toolkit2仓库下的examples/functions/dynamic_input
- Creating a neural network model with dynamic shape inputs. Please refer to the examples/functions/dynamic_input directory in the https://github.com/rockchip-linux/rknn-toolkit2 repository for more information.
- Reading an image from a file and performing classification using the neural network model. The program follows these steps:
- 从文件中读取一张图像,并使用神经网络模型对其进行分类。程序步骤如下:
1. 使用 rknn_init() 函数初始化 RKNN 上下文。
2. 使用 rknn_set_input_shape() 函数设置模型输入的形状信息,包括形状、布局等。
3. 使用 rknn_query() 函数查询当前设置的模型输入和输出的信息,包括形状、数据类型和大小等。
4. 使用 rknn_inputs_set() 函数设置模型输入的数据,包括数据指针和数据大小等。
5. 使用 rknn_run() 函数运行模型。
6. 使用 rknn_outputs_get() 函数设置是否需要float类型结果并获取输出数据。
7. 处理输出数据,得到分类结果和概率。
8. 使用 rknn_release() 函数释放RKNN上下文。
1. Initialize the RKNN context using the `rknn_init()` function.
2. Set the shape information of all the model inputs using the `rknn_set_input_shapes()` function, including shape and layout.
3. Query the current model input and output information, including shape, data type, and size, using the `rknn_query()` function.
4. Set the input data of the model using the `rknn_inputs_set()` function, including data pointer and size.
5. Run the model using the `rknn_run()` function.
6. Retrieve the output data by using the `rknn_outputs_get()` function, specifying the need for float-type results.
7. Process the output data to obtain the classification results and probabilities.
8. Release the RKNN context using the `rknn_release()` function.

52
examples/rknn_dynamic_shape_input_demo/README_CN.md

@ -0,0 +1,52 @@ @@ -0,0 +1,52 @@
# RKNN C API 动态形状输入Demo
这是一个使用RKNN C API进行动态形状输入推理的演示应用。您可以在这个应用中看到如何使用RKNN 动态形状 C API对图像进行分类。
# 如何使用
1. 克隆或下载此代码库ssh://git@10.10.10.59:8001/hpc/rknpu2.git。
2. 在终端中进入动态形状推理Demo目录。
```
cd examples/rknn_dynamic_shape_input_demo
```
3. 根据芯片平台,运行shell脚本编译应用程序,以RK3562 Android系统为例,命令如下:
```
./build-android_RK3562.sh
```
4. 将Demo程序目录通过adb命令推送到开发板系统中,命令如下:
```
adb push ./install/rknn_dynshape_demo_Android/ /data
注意:如果是安卓系统,需要adb root & adb remount
```
5. 设置runtime库链接路径
```
export LD_LIBRARY_PATH=./lib
```
6. 运行程序,以rk3562平台为例,./rknn_dynshape_inference model/RK3562/mobilenet_v2.rknn images/dog_224x224.jpg 命令对图像进行分类,其中 mobilenet_v2.rknn 是神经网络模型文件的名称,dog_224x224.jpg 是要分类的图像文件的名称。
# 编译说明
## Arm Linux系统
为特定的芯片平台指定交叉编译器路径,修改`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`,其中TARGET_PLATFORM为芯片名,然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## Android系统
指定Android NDK的路径,修改`build-android_<TARGET_PLATFORM>.sh`中的`ANDROID_NDK_PATH`,其中TARGET_PLATFORM为芯片名,然后执行
```
./build-android_<TARGET_PLATFORM>.sh
```
# 包含的功能
此演示应用程序包含以下功能:
- 创建一个包含动态形状的神经网络模型。
参考https://github.com/rockchip-linux/rknn-toolkit2仓库下的examples/functions/dynamic_input
- 从文件中读取一张图像,并使用神经网络模型对其进行分类。程序步骤如下:
1. 使用 rknn_init() 函数初始化 RKNN 上下文。
2. 使用 rknn_set_input_shapes() 函数设置模型所有的输入的形状信息,包括形状、布局等。
3. 使用 rknn_query() 函数查询当前设置的模型输入和输出的信息,包括形状、数据类型和大小等。
4. 使用 rknn_inputs_set() 函数设置模型输入的数据,包括数据指针和数据大小等。
5. 使用 rknn_run() 函数运行模型。
6. 使用 rknn_outputs_get() 函数设置是否需要float类型结果并获取输出数据。
7. 处理输出数据,得到分类结果和概率。
8. 使用 rknn_release() 函数释放RKNN上下文。

BIN
examples/rknn_dynamic_shape_input_demo/model/RK3562/mobilenet_v2.rknn

Binary file not shown.

BIN
examples/rknn_dynamic_shape_input_demo/model/RK3566_RK3568/mobilenet_v2.rknn

Binary file not shown.

BIN
examples/rknn_dynamic_shape_input_demo/model/RK3588/mobilenet_v2.rknn

Binary file not shown.

20
examples/rknn_dynamic_shape_input_demo/src/rknn_dynshape_inference.cc

@ -467,13 +467,12 @@ int main(int argc, char **argv) @@ -467,13 +467,12 @@ int main(int argc, char **argv)
{
input_attrs[i].dims[j] = shape_range[i].dyn_range[s][j];
}
ret = rknn_set_input_shape(ctx, &input_attrs[i]);
if (ret < 0)
{
fprintf(stderr, "rknn_set_input_shape error! ret=%d\n", ret);
return -1;
}
}
ret = rknn_set_input_shapes(ctx, io_num.n_input, input_attrs);
if (ret < 0)
{
fprintf(stderr, "rknn_set_input_shapes error! ret=%d\n", ret);
return -1;
}
// 获取当前次推理的输入和输出形状
@ -515,7 +514,7 @@ int main(int argc, char **argv) @@ -515,7 +514,7 @@ int main(int argc, char **argv)
// 设置输入信息
rknn_input inputs[io_num.n_input];
memset(inputs, 0, io_num.n_input * sizeof(rknn_input));
std::vector<cv::Mat> resize_imgs;
std::vector<cv::Mat> resize_imgs;
resize_imgs.resize(io_num.n_input);
for (int i = 0; i < io_num.n_input; i++)
{
@ -575,10 +574,11 @@ int main(int argc, char **argv) @@ -575,10 +574,11 @@ int main(int argc, char **argv)
#if NPY_SUPPORT
// save output
for (uint32_t i = 0; i < io_num.n_output; i++) {
for (uint32_t i = 0; i < io_num.n_output; i++)
{
char output_path[PATH_MAX];
sprintf(output_path, "%s/rt_output%d.npy", output_dir ? output_dir : ".", i);
save_npy<float>(output_path, (float*)outputs[i].buf, &cur_output_attrs[i]);
save_npy<float>(output_path, (float *)outputs[i].buf, &cur_output_attrs[i]);
}
#endif

20
examples/rknn_dynamic_shape_input_demo/src/rknn_dynshape_inference_zero_copy.cc

@ -490,13 +490,12 @@ int main(int argc, char **argv) @@ -490,13 +490,12 @@ int main(int argc, char **argv)
{
input_attrs[i].dims[j] = shape_range[i].dyn_range[s][j];
}
ret = rknn_set_input_shape(ctx, &input_attrs[i]);
if (ret < 0)
{
fprintf(stderr, "rknn_set_input_shape error! ret=%d\n", ret);
return -1;
}
}
ret = rknn_set_input_shapes(ctx, io_num.n_input, input_attrs);
if (ret < 0)
{
fprintf(stderr, "rknn_set_input_shape error! ret=%d\n", ret);
return -1;
}
// 获取当前次推理的输入和输出形状
@ -538,7 +537,7 @@ int main(int argc, char **argv) @@ -538,7 +537,7 @@ int main(int argc, char **argv)
// 设置输入信息
rknn_input inputs[io_num.n_input];
memset(inputs, 0, io_num.n_input * sizeof(rknn_input));
std::vector<cv::Mat> resize_imgs;
std::vector<cv::Mat> resize_imgs;
resize_imgs.resize(io_num.n_input);
for (int i = 0; i < io_num.n_input; i++)
{
@ -621,10 +620,11 @@ int main(int argc, char **argv) @@ -621,10 +620,11 @@ int main(int argc, char **argv)
#if NPY_SUPPORT
// save output
for (uint32_t i = 0; i < io_num.n_output; i++) {
for (uint32_t i = 0; i < io_num.n_output; i++)
{
char output_path[PATH_MAX];
sprintf(output_path, "%s/rt_output%d.npy", output_dir ? output_dir : ".", i);
save_npy<float>(output_path, (float*)output_mems[i]->virt_addr, &cur_output_attrs[i]);
save_npy<float>(output_path, (float *)output_mems[i]->virt_addr, &cur_output_attrs[i]);
}
#endif

904
examples/rknn_dynamic_shape_input_demo/src/rknn_dynshape_inference_zero_copy_alloc_outside.cc

@ -0,0 +1,904 @@ @@ -0,0 +1,904 @@
/****************************************************************************
*
* Copyright (c) 2017 - 2023 by Rockchip Corp. All rights reserved.
*
* The material in this file is confidential and contains trade secrets
* of Rockchip Corporation. This is proprietary information owned by
* Rockchip Corporation. No part of this work may be disclosed,
* reproduced, copied, transmitted, or used in any way for any purpose,
* without the express written permission of Rockchip Corporation.
*
*****************************************************************************/
/*-------------------------------------------
Includes
-------------------------------------------*/
#include "rk_mpi_mmz.h"
#include "rknn_api.h"
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <string>
#include <vector>
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_RESIZE_IMPLEMENTATION
#include <stb/stb_image_resize.h>
#define NPY_SUPPORT 1
#if NPY_SUPPORT
#include "cnpy/cnpy.h"
using namespace cnpy;
#endif
#define TIME_BEGIN(name) \
struct timeval tv##name; \
gettimeofday(&tv##name, NULL); \
long val##name = tv##name.tv_usec; \
long min##name = tv##name.tv_sec;
#define TIME_END(name) \
gettimeofday(&tv##name, NULL); \
val##name = tv##name.tv_usec - val##name; \
val##name += 1000000 * (tv##name.tv_sec - min##name); \
printf("[%s]exectime is %ld us, %ld ms\n", #name, val##name, val##name / 1000);
static int mb_flags = RK_MMZ_ALLOC_TYPE_IOMMU | RK_MMZ_ALLOC_CACHEABLE;
#define SIZE_ALIGN(size, align) (((size) + ((align)-1)) & (~((align)-1)))
#define SIZE_ALIGN_128(size) SIZE_ALIGN(size, 128)
/*-------------------------------------------
Functions
-------------------------------------------*/
static inline int64_t getCurrentTimeUs()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
static int rknn_GetTopN(float *pfProb, float *pfMaxProb, uint32_t *pMaxClass, uint32_t outputCount, uint32_t topNum)
{
uint32_t i, j;
uint32_t top_count = outputCount > topNum ? topNum : outputCount;
for (i = 0; i < topNum; ++i)
{
pfMaxProb[i] = -FLT_MAX;
pMaxClass[i] = -1;
}
for (j = 0; j < top_count; j++)
{
for (i = 0; i < outputCount; i++)
{
if ((i == *(pMaxClass + 0)) || (i == *(pMaxClass + 1)) || (i == *(pMaxClass + 2)) || (i == *(pMaxClass + 3)) ||
(i == *(pMaxClass + 4)))
{
continue;
}
if (pfProb[i] > *(pfMaxProb + j))
{
*(pfMaxProb + j) = pfProb[i];
*(pMaxClass + j) = i;
}
}
}
return 1;
}
static void dump_tensor_attr(rknn_tensor_attr *attr)
{
printf(" index=%d, name=%s, n_dims=%d, dims=[%d, %d, %d, %d], n_elems=%d, size=%d, fmt=%s, type=%s, qnt_type=%s, "
"zp=%d, scale=%f\n",
attr->index, attr->name, attr->n_dims, attr->dims[0], attr->dims[1], attr->dims[2], attr->dims[3],
attr->n_elems, attr->size, get_format_string(attr->fmt), get_type_string(attr->type),
get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
}
static void dump_input_dynamic_range(rknn_input_range *dyn_range)
{
std::string range_str = "";
for (int n = 0; n < dyn_range->shape_number; ++n)
{
range_str += n == 0 ? "[" : ",[";
range_str += dyn_range->n_dims < 1 ? "" : std::to_string(dyn_range->dyn_range[n][0]);
for (int i = 1; i < dyn_range->n_dims; ++i)
{
range_str += ", " + std::to_string(dyn_range->dyn_range[n][i]);
}
range_str += "]";
}
printf(" index=%d, name=%s, shape_number=%d, range=[%s], fmt = %s\n", dyn_range->index, dyn_range->name,
dyn_range->shape_number, range_str.c_str(), get_format_string(dyn_range->fmt));
}
static unsigned char *load_image(const char *image_path, rknn_tensor_attr *input_attr)
{
int req_height = 0;
int req_width = 0;
int req_channel = 0;
switch (input_attr->fmt)
{
case RKNN_TENSOR_NHWC:
req_height = input_attr->dims[1];
req_width = input_attr->dims[2];
req_channel = input_attr->dims[3];
break;
case RKNN_TENSOR_NCHW:
req_height = input_attr->dims[2];
req_width = input_attr->dims[3];
req_channel = input_attr->dims[1];
break;
default:
printf("meet unsupported layout\n");
return NULL;
}
int height = 0;
int width = 0;
int channel = 0;
unsigned char *image_data = stbi_load(image_path, &width, &height, &channel, req_channel);
if (image_data == NULL)
{
printf("load image failed!\n");
return NULL;
}
if (width != req_width || height != req_height)
{
unsigned char *image_resized = (unsigned char *)STBI_MALLOC(req_width * req_height * req_channel);
if (!image_resized)
{
printf("malloc image failed!\n");
STBI_FREE(image_data);
return NULL;
}
if (stbir_resize_uint8(image_data, width, height, 0, image_resized, req_width, req_height, 0, channel) != 1)
{
printf("resize image failed!\n");
STBI_FREE(image_data);
return NULL;
}
STBI_FREE(image_data);
image_data = image_resized;
}
return image_data;
}
#if NPY_SUPPORT
static unsigned char *load_npy(const char *input_path, rknn_tensor_attr *input_attr, int *input_type, int *input_size,
int *type_bytes)
{
printf("Loading %s\n", input_path);
NpyArray npy_data = npy_load(input_path);
*type_bytes = npy_data.word_size;
std::string typeName = npy_data.typeName;
printf("npy data type:%s\n", typeName.c_str());
if (typeName == "int8")
{
*input_type = RKNN_TENSOR_INT8;
}
else if (typeName == "uint8")
{
*input_type = RKNN_TENSOR_UINT8;
}
else if (typeName == "float16")
{
*input_type = RKNN_TENSOR_FLOAT16;
}
else if (typeName == "float32")
{
*input_type = RKNN_TENSOR_FLOAT32;
}
else if (typeName == "8")
{
*input_type = RKNN_TENSOR_BOOL;
}
else if (typeName == "int64")
{
*input_type = RKNN_TENSOR_INT64;
}
// npy shape = NHWC
std::vector<int> npy_shape;
for (size_t i = 0; i < npy_data.shape.size(); ++i)
{
npy_shape.emplace_back(npy_data.shape[i]);
}
int height = npy_shape.size() > 1 ? npy_shape[1] : 1;
int width = npy_shape.size() > 2 ? npy_shape[2] : 1;
int channel = npy_shape.size() > 3 ? npy_shape[3] : 1;
switch (input_attr->fmt)
{
case RKNN_TENSOR_NHWC:
input_attr->dims[0] = npy_shape[0];
input_attr->dims[1] = height;
input_attr->dims[2] = width;
input_attr->dims[3] = channel;
break;
case RKNN_TENSOR_UNDEFINED:
for (int idx = 0; idx < input_attr->n_dims; ++idx)
{
input_attr->dims[idx] = npy_shape[idx];
}
break;
default:
fprintf(stderr, "load_npy error, unsupport model input layout: %s\n", get_format_string(input_attr->fmt));
break;
}
unsigned char *data = (unsigned char *)malloc(npy_data.num_bytes());
if (!data)
{
return NULL;
}
// TODO: copy
memcpy(data, npy_data.data<unsigned char>(), npy_data.num_bytes());
*input_size = npy_data.num_bytes();
return data;
}
static void save_npy(const char *output_path, float *output_data, rknn_tensor_attr *output_attr)
{
std::vector<size_t> output_shape;
for (uint32_t i = 0; i < output_attr->n_dims; ++i)
{
output_shape.push_back(output_attr->dims[i]);
}
npy_save<float>(output_path, output_data, output_shape);
}
#endif
static std::vector<std::string> split(const std::string &str, const std::string &pattern)
{
std::vector<std::string> res;
if (str == "")
return res;
std::string strs = str + pattern;
size_t pos = strs.find(pattern);
while (pos != strs.npos)
{
std::string temp = strs.substr(0, pos);
res.push_back(temp);
strs = strs.substr(pos + 1, strs.size());
pos = strs.find(pattern);
}
return res;
}
uint32_t get_file_size(char *file_name)
{
FILE *fid = fopen(file_name, "rb");
if (fid == NULL)
{
printf("open file error\n");
fclose(fid);
return -1;
}
fseek(fid, 0, SEEK_END);
uint32_t size = ftell(fid);
fclose(fid);
return size;
}
int read_bin_file(char *file_name, void *out_ptr, unsigned int size)
{
FILE *fid = fopen(file_name, "rb");
if (fid == NULL)
{
printf("open file error\n");
fclose(fid);
return -1;
}
fread(out_ptr, 1, size, fid);
fclose(fid);
return 0;
}
/*-------------------------------------------
Main Functions
-------------------------------------------*/
int main(int argc, char *argv[])
{
if (argc < 3)
{
printf("Usage:%s model_path input_path [loop_count] [core_mask] [output_dir]\n", argv[0]);
return -1;
}
char *model_path = argv[1];
char *input_paths = argv[2];
std::vector<std::string> input_paths_split = split(input_paths, "#");
int loop_count = 1;
if (argc > 3)
{
loop_count = atoi(argv[3]);
}
uint32_t core_mask = 1;
if (argc > 4)
{
// core_mask = atoi(argv[4]);
core_mask = strtoul(argv[4], NULL, 10);
}
char *output_dir = NULL;
if (argc > 5)
{
output_dir = argv[5];
}
rknn_context ctx = 0;
// Allocate model memory in outside
MB_BLK model_mb;
uint32_t model_size = get_file_size(model_path);
int ret = RK_MPI_MMZ_Alloc(&model_mb, model_size, mb_flags);
if (ret < 0)
{
printf("RK_MPI_MMZ_Alloc failed, ret: %d\n", ret);
return ret;
}
void *model_virt = RK_MPI_MMZ_Handle2VirAddr(model_mb);
if (model_virt == NULL)
{
printf("RK_MPI_MMZ_Handle2VirAddr failed!\n");
return -1;
}
ret = read_bin_file(model_path, model_virt, model_size);
if (ret < 0)
{
printf("read_bin_file failed, ret: %d\n", ret);
return ret;
}
TIME_BEGIN(dummpy_rknn_init);
ret = rknn_init(&ctx, model_virt, model_size, RKNN_FLAG_COLLECT_MODEL_INFO_ONLY | RKNN_FLAG_MEM_ALLOC_OUTSIDE, NULL);
TIME_END(dummpy_rknn_init);
if (ret < 0)
{
printf("rknn_init with RKNN_FLAG_COLLECT_MODEL_INFO_ONLY fail! ret=%d\n", ret);
return -1;
}
// [dummpy_rknn_init] Get weight and internal mem size
rknn_mem_size mem_size;
ret = rknn_query(ctx, RKNN_QUERY_MEM_SIZE, &mem_size, sizeof(mem_size));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("[dummpy init] total weight size: %d, total internal size: %d\n", mem_size.total_weight_size,
mem_size.total_internal_size);
// Load RKNN Model
TIME_BEGIN(rknn_init);
ret = rknn_init(&ctx, model_virt, model_size, RKNN_FLAG_MEM_ALLOC_OUTSIDE, NULL);
TIME_END(rknn_init);
if (ret < 0)
{
printf("rknn_init fail! ret=%d\n", ret);
return -1;
}
// Get sdk and driver version
rknn_sdk_version sdk_ver;
ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &sdk_ver, sizeof(sdk_ver));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("rknn_api/rknnrt version: %s, driver version: %s\n", sdk_ver.api_version, sdk_ver.drv_version);
// Get weight and internal mem size
ret = rknn_query(ctx, RKNN_QUERY_MEM_SIZE, &mem_size, sizeof(mem_size));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("total weight size: %u, total internal size: %u\n", mem_size.total_weight_size, mem_size.total_internal_size);
// Get Model Input Output Info
rknn_input_output_num io_num;
ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("model input num: %d, output num: %d\n", io_num.n_input, io_num.n_output);
printf("default input tensors:\n");
rknn_tensor_attr input_attrs[io_num.n_input];
memset(input_attrs, 0, io_num.n_input * sizeof(rknn_tensor_attr));
for (uint32_t i = 0; i < io_num.n_input; i++)
{
input_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
if (ret < 0)
{
printf("rknn_init error! ret=%d\n", ret);
return -1;
}
dump_tensor_attr(&input_attrs[i]);
}
printf("default output tensors:\n");
rknn_tensor_attr output_attrs[io_num.n_output];
memset(output_attrs, 0, io_num.n_output * sizeof(rknn_tensor_attr));
for (uint32_t i = 0; i < io_num.n_output; i++)
{
output_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
dump_tensor_attr(&output_attrs[i]);
}
// Get custom string
rknn_custom_string custom_string;
ret = rknn_query(ctx, RKNN_QUERY_CUSTOM_STRING, &custom_string, sizeof(custom_string));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
printf("custom string: %s\n", custom_string.string);
printf("dynamic inputs shape range:\n");
rknn_input_range dyn_range[io_num.n_input];
memset(dyn_range, 0, io_num.n_input * sizeof(rknn_input_range));
for (uint32_t i = 0; i < io_num.n_input; i++)
{
dyn_range[i].index = i;
ret = rknn_query(ctx, RKNN_QUERY_INPUT_DYNAMIC_RANGE, &dyn_range[i], sizeof(rknn_input_range));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
dump_input_dynamic_range(&dyn_range[i]);
}
unsigned char *input_data[io_num.n_input];
int input_type[io_num.n_input];
int input_layout[io_num.n_input];
int input_size[io_num.n_input];
int type_bytes[io_num.n_input];
for (int i = 0; i < io_num.n_input; i++)
{
input_data[i] = NULL;
input_type[i] = RKNN_TENSOR_UINT8;
input_layout[i] = RKNN_TENSOR_NHWC;
input_size[i] = input_attrs[i].size;
type_bytes[i] = 1;
}
// Load input
if (io_num.n_input != input_paths_split.size())
{
return -1;
}
for (int i = 0; i < io_num.n_input; i++)
{
if (strstr(input_paths_split[i].c_str(), ".npy"))
{
// Load npy
#if NPY_SUPPORT
input_data[i] =
load_npy(input_paths_split[i].c_str(), &input_attrs[i], &input_type[i], &input_size[i], &type_bytes[i]);
#else
return -1;
#endif
}
else
{
// Load image
for (int i = 0; i < io_num.n_input; i++)
{
input_data[i] = load_image(input_paths_split[i].c_str(), &input_attrs[i]);
}
}
if (!input_data[i])
{
return -1;
}
}
ret = rknn_set_input_shapes(ctx, io_num.n_input, input_attrs);
if (ret < 0)
{
fprintf(stderr, "rknn_set_input_shapes error! ret=%d\n", ret);
return -1;
}
// Allocate weight memory in outside
MB_BLK weight_mb;
rknn_tensor_mem *weight_mem;
ret = RK_MPI_MMZ_Alloc(&weight_mb, SIZE_ALIGN_128(mem_size.total_weight_size), mb_flags);
if (ret < 0)
{
printf("RK_MPI_MMZ_Alloc failed, ret: %d\n", ret);
return ret;
}
void *weight_virt = RK_MPI_MMZ_Handle2VirAddr(weight_mb);
if (weight_virt == NULL)
{
printf("RK_MPI_MMZ_Handle2VirAddr failed!\n");
return -1;
}
int weight_fd = RK_MPI_MMZ_Handle2Fd(weight_mb);
if (weight_fd < 0)
{
printf("RK_MPI_MMZ_Handle2Fd failed!\n");
return -1;
}
weight_mem = rknn_create_mem_from_fd(ctx, weight_fd, weight_virt, mem_size.total_weight_size, 0);
ret = rknn_set_weight_mem(ctx, weight_mem);
if (ret < 0)
{
printf("rknn_set_weight_mem fail! ret=%d\n", ret);
return -1;
}
printf("weight mb info: virt = %p, fd = %d, size: %d\n", weight_virt, weight_fd, mem_size.total_weight_size);
// Allocate internal memory in outside
MB_BLK internal_mb;
rknn_tensor_mem *internal_mem;
ret = RK_MPI_MMZ_Alloc(&internal_mb, SIZE_ALIGN_128(mem_size.total_internal_size), mb_flags);
if (ret < 0)
{
printf("RK_MPI_MMZ_Alloc failed, ret: %d\n", ret);
return ret;
}
void *internal_virt = RK_MPI_MMZ_Handle2VirAddr(internal_mb);
if (internal_virt == NULL)
{
printf("RK_MPI_MMZ_Handle2VirAddr failed!\n");
return -1;
}
int internal_fd = RK_MPI_MMZ_Handle2Fd(internal_mb);
if (internal_fd < 0)
{
printf("RK_MPI_MMZ_Handle2Fd failed!\n");
return -1;
}
internal_mem = rknn_create_mem_from_fd(ctx, internal_fd, internal_virt, mem_size.total_internal_size, 0);
ret = rknn_set_internal_mem(ctx, internal_mem);
if (ret < 0)
{
printf("rknn_set_internal_mem fail! ret=%d\n", ret);
return -1;
}
printf("internal mb info: virt = %p, fd = %d, size: %d\n", internal_virt, internal_fd, mem_size.total_internal_size);
printf("current input tensors:\n");
rknn_tensor_attr cur_input_attrs[io_num.n_input];
memset(cur_input_attrs, 0, io_num.n_input * sizeof(rknn_tensor_attr));
for (uint32_t i = 0; i < io_num.n_input; i++)
{
cur_input_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_CURRENT_INPUT_ATTR, &(cur_input_attrs[i]), sizeof(rknn_tensor_attr));
if (ret < 0)
{
printf("rknn_init error! ret=%d\n", ret);
return -1;
}
dump_tensor_attr(&cur_input_attrs[i]);
}
printf("current output tensors:\n");
rknn_tensor_attr cur_output_attrs[io_num.n_output];
memset(cur_output_attrs, 0, io_num.n_output * sizeof(rknn_tensor_attr));
for (uint32_t i = 0; i < io_num.n_output; i++)
{
cur_output_attrs[i].index = i;
// query info
ret = rknn_query(ctx, RKNN_QUERY_CURRENT_OUTPUT_ATTR, &(cur_output_attrs[i]), sizeof(rknn_tensor_attr));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
return -1;
}
dump_tensor_attr(&cur_output_attrs[i]);
}
// Allocate inputs memory in outside
MB_BLK input_mbs[io_num.n_input];
void *input_virts[io_num.n_input];
int input_fds[io_num.n_input];
for (uint32_t i = 0; i < io_num.n_input; ++i)
{
int input_size = cur_input_attrs[i].size_with_stride;
ret = RK_MPI_MMZ_Alloc(&input_mbs[i], SIZE_ALIGN_128(input_size), mb_flags);
if (ret < 0)
{
printf("RK_MPI_MMZ_Alloc failed, ret: %d\n", ret);
return ret;
}
input_virts[i] = RK_MPI_MMZ_Handle2VirAddr(input_mbs[i]);
if (input_virts[i] == NULL)
{
printf("RK_MPI_MMZ_Handle2VirAddr failed!\n");
return -1;
}
input_fds[i] = RK_MPI_MMZ_Handle2Fd(input_mbs[i]);
if (input_fds[i] < 0)
{
printf("RK_MPI_MMZ_Handle2Fd failed!\n");
return -1;
}
printf("input%d mb info: virt = %p, fd = %d, size = %d\n", i, input_virts[i], input_fds[i], input_size);
}
// Allocate outputs memory in outside
MB_BLK output_mbs[io_num.n_output];
void *output_virts[io_num.n_output];
int output_fds[io_num.n_output];
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
// default output type is depend on model, this require float32 to compute top5
cur_output_attrs[i].type = RKNN_TENSOR_FLOAT32;
int output_size = cur_output_attrs[i].n_elems * sizeof(float);
cur_output_attrs[i].size = output_size;
ret = RK_MPI_MMZ_Alloc(&output_mbs[i], SIZE_ALIGN_128(output_size), mb_flags);
if (ret < 0)
{
printf("RK_MPI_MMZ_Alloc failed, ret: %d\n", ret);
return ret;
}
output_virts[i] = RK_MPI_MMZ_Handle2VirAddr(output_mbs[i]);
if (output_virts[i] == NULL)
{
printf("RK_MPI_MMZ_Handle2VirAddr failed!\n");
return -1;
}
output_fds[i] = RK_MPI_MMZ_Handle2Fd(output_mbs[i]);
if (output_fds[i] < 0)
{
printf("RK_MPI_MMZ_Handle2Fd failed!\n");
return -1;
}
printf("output%d mb info: virt = %p, fd = %d, size = %d\n", i, output_virts[i], output_fds[i], output_size);
}
// Create input tensor memory
rknn_tensor_mem *input_mems[io_num.n_input];
for (int i = 0; i < io_num.n_input; i++)
{
// default input type is int8 (normalize and quantize need compute in outside)
// if set uint8, will fuse normalize and quantize to npu
cur_input_attrs[i].type = RKNN_TENSOR_UINT8;
// default fmt is NHWC, npu only support NHWC in zero copy mode
cur_input_attrs[i].fmt = RKNN_TENSOR_NHWC;
input_mems[i] = rknn_create_mem_from_fd(ctx, input_fds[i], input_virts[i], cur_input_attrs[i].size_with_stride, 0);
// Copy input data to input tensor memory
int width = cur_input_attrs[i].dims[2];
int stride = cur_input_attrs[i].w_stride;
if (width == stride)
{
memcpy(input_mems[i]->virt_addr, input_data[i], input_size[i]);
}
else
{
int height = cur_input_attrs[i].dims[1];
int channel = cur_input_attrs[i].dims[3];
// copy from src to dst with stride
uint8_t *src_ptr = input_data[i];
uint8_t *dst_ptr = (uint8_t *)input_mems[i]->virt_addr;
// width-channel elements
int src_wc_elems = width * channel;
int dst_wc_elems = stride * channel;
for (int b = 0; b < cur_input_attrs[i].dims[0]; b++)
{
for (int h = 0; h < height; ++h)
{
memcpy(dst_ptr, src_ptr, src_wc_elems);
src_ptr += src_wc_elems;
dst_ptr += dst_wc_elems;
}
}
}
}
// Create output tensor memory
rknn_tensor_mem *output_mems[io_num.n_output];
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
output_mems[i] = rknn_create_mem_from_fd(ctx, output_fds[i], output_virts[i], cur_output_attrs[i].size, 0);
}
// Set input tensor memory
for (uint32_t i = 0; i < io_num.n_input; ++i)
{
ret = rknn_set_io_mem(ctx, input_mems[i], &cur_input_attrs[i]);
if (ret < 0)
{
printf("rknn_set_io_mem fail! ret=%d\n", ret);
return -1;
}
}
// Set output tensor memory
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
// set output memory and attribute
ret = rknn_set_io_mem(ctx, output_mems[i], &cur_output_attrs[i]);
if (ret < 0)
{
printf("rknn_set_io_mem fail! ret=%d\n", ret);
return -1;
}
}
// weight flush cache
ret = RK_MPI_MMZ_FlushCacheEnd(weight_mb, 0, SIZE_ALIGN_128(mem_size.total_weight_size), RK_MMZ_SYNC_RW);
if (ret < 0)
{
printf("weight_mb FlushCacheEnd fail! ret=%d\n", ret);
return -1;
}
// Run
printf("Begin perf ...\n");
for (int i = 0; i < loop_count; ++i)
{
ret = RK_MPI_MMZ_FlushCacheEnd(internal_mb, 0, SIZE_ALIGN_128(mem_size.total_internal_size), RK_MMZ_SYNC_RW);
if (ret < 0)
{
printf("internal_mb FlushCacheEnd fail! ret=%d\n", ret);
return -1;
}
for (uint32_t i = 0; i < io_num.n_input; ++i)
{
ret =
RK_MPI_MMZ_FlushCacheEnd(input_mbs[i], 0, SIZE_ALIGN_128(cur_input_attrs[i].size_with_stride), RK_MMZ_SYNC_RW);
if (ret < 0)
{
printf("input_mbs FlushCacheEnd fail! ret=%d\n", ret);
return -1;
}
}
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
ret = RK_MPI_MMZ_FlushCacheEnd(output_mbs[i], 0, SIZE_ALIGN_128(cur_output_attrs[i].n_elems * sizeof(float)),
RK_MMZ_SYNC_RW);
if (ret < 0)
{
printf("output_mbs FlushCacheEnd fail! ret=%d\n", ret);
return -1;
}
}
int64_t start_us = getCurrentTimeUs();
ret = rknn_run(ctx, NULL);
int64_t elapse_us = getCurrentTimeUs() - start_us;
if (ret < 0)
{
printf("rknn run error %d\n", ret);
return -1;
}
printf("%4d: Elapse Time = %.2fms, FPS = %.2f\n", i, elapse_us / 1000.f, 1000.f * 1000.f / elapse_us);
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
ret = RK_MPI_MMZ_FlushCacheStart(output_mbs[i], 0, SIZE_ALIGN_128(cur_output_attrs[i].n_elems * sizeof(float)),
RK_MMZ_SYNC_RW);
if (ret < 0)
{
printf("output_mbs FlushCacheStart fail! ret=%d\n", ret);
return -1;
}
}
}
// 处理输出结果
for (uint32_t i = 0; i < io_num.n_output; i++)
{
float *output = (float *)output_mems[i]->virt_addr;
int out_elems = cur_output_attrs[i].n_elems;
std::vector<std::pair<float, int>> results;
for (int i = 0; i < out_elems; i++)
{
results.emplace_back(output[i], i);
}
std::partial_sort(results.begin(), results.begin() + 5, results.end(), std::greater<std::pair<float, int>>());
printf(" --- Top5 ---\n");
for (int i = 0; i < 5; i++)
{
printf("%-3d: %.2f%%\n", results[i].second, results[i].first * 100);
}
}
// save output
for (uint32_t i = 0; i < io_num.n_output; i++)
{
char output_path[PATH_MAX];
sprintf(output_path, "%s/rt_output%d.npy", output_dir ? output_dir : ".", i);
rknn_tensor_attr cur_output_attrs_npy = cur_output_attrs[i];
if (cur_output_attrs[i].fmt == RKNN_TENSOR_NHWC && cur_output_attrs[i].n_dims == 4)
{
std::vector<int> axis = {0, 2, 3, 1};
for (int j = 0; j < axis.size(); j++)
{
cur_output_attrs_npy.dims[j] = cur_output_attrs[i].dims[axis[j]];
}
}
save_npy(output_path, (float *)output_mems[i]->virt_addr, &cur_output_attrs_npy);
}
// free mb blk memory
RK_MPI_MMZ_Free(model_mb);
RK_MPI_MMZ_Free(weight_mb);
RK_MPI_MMZ_Free(internal_mb);
for (uint32_t i = 0; i < io_num.n_input; ++i)
{
RK_MPI_MMZ_Free(input_mbs[i]);
}
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
RK_MPI_MMZ_Free(output_mbs[i]);
}
// Destroy rknn memory
for (uint32_t i = 0; i < io_num.n_input; ++i)
{
rknn_destroy_mem(ctx, input_mems[i]);
free(input_data[i]);
}
for (uint32_t i = 0; i < io_num.n_output; ++i)
{
rknn_destroy_mem(ctx, output_mems[i]);
}
rknn_destroy_mem(ctx, weight_mem);
rknn_destroy_mem(ctx, internal_mem);
// destroy
rknn_destroy(ctx);
return 0;
}

64
examples/rknn_internal_mem_reuse_demo/README.md

@ -1,15 +1,16 @@ @@ -1,15 +1,16 @@
# rknn_internal_mem_reuse_demo
## 说明
## Description
本工程主要用于**RKNN_FLAG_MEM_ALLOC_OUTSIDE** 及 **rknn_set_internal_mem**的使用演示。
This project is mainly used to demonstrate the usage of **RKNN_FLAG_MEM_ALLOC_OUTSIDE** and **rknn_set_internal_mem**.
RKNN_FLAG_MEM_ALLOC_OUTSIDE:主要有两方面的作用:
- 所有内存均是用户自行分配,便于对整个系统内存进行统筹安排
- 用于内存复用,特别是针对RV1103/RV1106这种内存极为紧张的情况。
RKNN_FLAG_MEM_ALLOC_OUTSIDE has two main purposes:
- All memory is allocated by the user, allowing for better overall memory management.
- It enables memory reuse, especially for memory-constrained chips like RV1103/RV1106.
Assuming there are two models, Model A and Model B, designed to run sequentially, the intermediate tensor memory between these two models can be reused. The example code is as follows:
假设有模型A、B 两个模型,这两个模型在设计上串行运行的,那么这两个模型的中间tensor的内存就可以复用。示例代码如下:
```
rknn_context ctx_a, ctx_b;
@ -31,40 +32,36 @@ internal_mem_b = rknn_create_mem_from_fd(ctx_b, internal_mem_max->fd, @@ -31,40 +32,36 @@ internal_mem_b = rknn_create_mem_from_fd(ctx_b, internal_mem_max->fd,
rknn_set_internal_mem(ctx_b, internal_mem_b);
```
注意:本工程使用了上级目录的rknn_mobilenet_demo及rknn_yolov5_demo两个工程的rknn模型,编译之前请确保其存在。
Note: This demo uses the RKNN models from the rknn_mobilenet_demo and rknn_yolov5_demo example in the parent directory. Make sure they exist before compiling.
## Android Demo
### 编译
### Compilation
根据指定平台修改 `build-android_<TARGET_PLATFORM>.sh`中的Android NDK的路径 `ANDROID_NDK_PATH`<TARGET_PLATFORM>可以是RK356X或RK3588 例如修改成:
Modify the `build-android_<TARGET_PLATFORM>.sh` script based on the target platform, and set the Android NDK path in `ANDROID_NDK_PATH`. For example, if the target platform is RK3566_RK3568, RK3562 or RK3588, modify it as follows:
```sh
```
ANDROID_NDK_PATH=~/opt/tool_chain/android-ndk-r17
```
然后执行:
Then execute:
```sh
```
./build-android_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
### Pushing the Executable to the Device
连接板子的usb口到PC,将整个demo目录到 `/data`:
Connect the device to the PC via USB and push the entire demo directory to `/data`:
```sh
adb root
```
adb remount
adb push install/rknn_internal_mem_reuse_demo_Android /data/
```
### 运行
### Execution
```sh
adb shell
```
cd /data/rknn_internal_mem_reuse_demo_Android/
export LD_LIBRARY_PATH=./lib
@ -73,40 +70,39 @@ export LD_LIBRARY_PATH=./lib @@ -73,40 +70,39 @@ export LD_LIBRARY_PATH=./lib
## Aarch64 Linux Demo
### 编译
### Compilation
根据指定平台修改 `build-linux_<TARGET_PLATFORM>.sh`中的交叉编译器所在目录的路径 `GCC_COMPILER`,例如修改成
Modify the `build-linux_<TARGET_PLATFORM>.sh` script based on the target platform and set the path of the cross-compiler directory in `GCC_COMPILER`. For example:
```sh
```
export GCC_COMPILER=prebuilts/gcc/linux-x86/aarch64/gcc-buildroot-9.3.0-2020.03-x86_64_aarch64-rockchip-linux-gnu/bin/aarch64-linux
```
然后执行:
Then execute:
```sh
```
./build-linux_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
### Pushing the Executable to the Device
Copy the `install/rknn_internal_mem_reuse_demo_Linux` to the `/userdata/` directory on the target device.
将 install/rknn_internal_mem_reuse_demo_Linux 拷贝到板子的/userdata/目录.
- 如果使用rockchip的EVB板子,可以使用adb将文件推到板子上:
- If using a Rockchip EVB board, use adb to push the file to the device:
```
bashCopy code
adb push install/rknn_internal_mem_reuse_demo_Linux /userdata/
```
- 如果使用其他板子,可以使用scp等方式将install/rknn_internal_mem_reuse_demo_Linux拷贝到板子的/userdata/目录
- If using other board, use scp or other methods to copy `install/rknn_internal_mem_reuse_demo_Linux` to the `/userdata/` directory on the device.
### 运行
### Execution
```sh
```
adb shell
cd /userdata/rknn_internal_mem_reuse_demo_Linux/
export LD_LIBRARY_PATH=./lib
./rknn_internal_mem_reuse_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn model/bus.jpg model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/cat_224x224.jpg
```

112
examples/rknn_internal_mem_reuse_demo/README_CN.md

@ -0,0 +1,112 @@ @@ -0,0 +1,112 @@
# rknn_internal_mem_reuse_demo
## 说明
本工程主要用于**RKNN_FLAG_MEM_ALLOC_OUTSIDE** 及 **rknn_set_internal_mem**的使用演示。
RKNN_FLAG_MEM_ALLOC_OUTSIDE:主要有两方面的作用:
- 所有内存均是用户自行分配,便于对整个系统内存进行统筹安排
- 用于内存复用,特别是针对RV1103/RV1106这种内存极为紧张的情况。
假设有模型A、B 两个模型,这两个模型在设计上串行运行的,那么这两个模型的中间tensor的内存就可以复用。示例代码如下:
```
rknn_context ctx_a, ctx_b;
rknn_init(&ctx_a, model_path_a, 0, RKNN_FLAG_MEM_ALLOC_OUTSIDE, NULL);
rknn_query(ctx_a, RKNN_QUERY_MEM_SIZE, &mem_size_a, sizeof(mem_size_a));
rknn_init(&ctx_b, model_path_b, 0, RKNN_FLAG_MEM_ALLOC_OUTSIDE, NULL);
rknn_query(ctx_b, RKNN_QUERY_MEM_SIZE, &mem_size_b, sizeof(mem_size_b));
max_internal_size = MAX(mem_size_a.total_internal_size, mem_size_b.total_internal_size);
internal_mem_max = rknn_create_mem(ctx_a, max_internal_size);
internal_mem_a = rknn_create_mem_from_fd(ctx_a, internal_mem_max->fd,
internal_mem_max->virt_addr, mem_size_a.total_internal_size, 0);
rknn_set_internal_mem(ctx_a, internal_mem_a);
internal_mem_b = rknn_create_mem_from_fd(ctx_b, internal_mem_max->fd,
internal_mem_max->virt_addr, mem_size_a.total_internal_size, 0);
rknn_set_internal_mem(ctx_b, internal_mem_b);
```
注意:本工程使用了上级目录的rknn_mobilenet_demo及rknn_yolov5_demo两个工程的rknn模型,编译之前请确保其存在。
## Android Demo
### 编译
根据指定平台修改 `build-android_<TARGET_PLATFORM>.sh`中的Android NDK的路径 `ANDROID_NDK_PATH`<TARGET_PLATFORM>可以是RK3566_RK3568、RK3562或RK3588 例如修改成:
```sh
ANDROID_NDK_PATH=~/opt/tool_chain/android-ndk-r17
```
然后执行:
```sh
./build-android_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
连接板子的usb口到PC,将整个demo目录到 `/data`:
```sh
adb root
adb remount
adb push install/rknn_internal_mem_reuse_demo_Android /data/
```
### 运行
```sh
adb shell
cd /data/rknn_internal_mem_reuse_demo_Android/
export LD_LIBRARY_PATH=./lib
./rknn_internal_mem_reuse_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn model/bus.jpg model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/cat_224x224.jpg
```
## Aarch64 Linux Demo
### 编译
根据指定平台修改 `build-linux_<TARGET_PLATFORM>.sh`中的交叉编译器所在目录的路径 `GCC_COMPILER`,例如修改成
```sh
export GCC_COMPILER=prebuilts/gcc/linux-x86/aarch64/gcc-buildroot-9.3.0-2020.03-x86_64_aarch64-rockchip-linux-gnu/bin/aarch64-linux
```
然后执行:
```sh
./build-linux_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
将 install/rknn_internal_mem_reuse_demo_Linux 拷贝到板子的/userdata/目录.
- 如果使用rockchip的EVB板子,可以使用adb将文件推到板子上:
```
adb push install/rknn_internal_mem_reuse_demo_Linux /userdata/
```
- 如果使用其他板子,可以使用scp等方式将install/rknn_internal_mem_reuse_demo_Linux拷贝到板子的/userdata/目录
### 运行
```sh
adb shell
cd /userdata/rknn_internal_mem_reuse_demo_Linux/
export LD_LIBRARY_PATH=./lib
./rknn_internal_mem_reuse_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn model/bus.jpg model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/cat_224x224.jpg
```

12
examples/rknn_matmul_api_demo/CMakeLists.txt

@ -33,15 +33,15 @@ include_directories(${RKNN_API_PATH}/include) @@ -33,15 +33,15 @@ include_directories(${RKNN_API_PATH}/include)
set(CMAKE_INSTALL_RPATH "lib")
# rknn_matmul_demo
add_executable(rknn_matmul_demo
src/rknn_matmul_demo.cpp
# rknn_matmul_api_demo
add_executable(rknn_matmul_api_demo
src/rknn_matmul_api_demo.cpp
)
target_link_libraries(rknn_matmul_demo
target_link_libraries(rknn_matmul_api_demo
${RKNN_RT_LIB}
)
# install target and libraries
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/install/rknn_matmul_demo_${CMAKE_SYSTEM_NAME})
install(TARGETS rknn_matmul_demo DESTINATION ./)
set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/install/rknn_matmul_api_demo_${CMAKE_SYSTEM_NAME})
install(TARGETS rknn_matmul_api_demo DESTINATION ./)
install(PROGRAMS ${RKNN_RT_LIB} DESTINATION lib)

31
examples/rknn_matmul_api_demo/README.md

@ -1,17 +1,10 @@ @@ -1,17 +1,10 @@
<!--
* @Author: Chifred
* @Date: 2023-04-25 17:32:21
* @LastEditTime: 2023-04-25 17:47:01
* @Editors: Chifred
* @Description: TODO
-->
rknn_matmul_demo is a example which performs int8 matrix multiplication using the RKNPU matmul C API.
rknn_matmul_api_demo is a example which performs int8 matrix multiplication using the RKNPU matmul C API.
Usage:
./rknn_matmul_demo
./rknn_matmul_api_demo
The following <TARGET_PLATFORM> represents RK356X or RK3588
The following <TARGET_PLATFORM> represents RK3566_RK3568, RK3562 or RK3588
# Aarch64 Linux Demo
## Build
@ -26,28 +19,28 @@ then execute @@ -26,28 +19,28 @@ then execute
## Install
Copy install/rknn_matmul_demo_Linux to the devices under /userdata/.
Copy install/rknn_matmul_api_demo_Linux to the devices under /userdata/.
- If you use rockchip's evb board, you can use the following way:
Connect device and push the program and rknn model to `/userdata`
Connect device and push the program to `/userdata`
```
adb push install/rknn_matmul_demo_Linux /userdata/
adb push install/rknn_matmul_api_demo_Linux /userdata/
```
- If your board has sshd service, you can use scp or other methods to copy the program and rknn model to the board.
- If your board has sshd service, you can use scp or other methods to copy the program to the board.
## Run
```
adb shell
cd /userdata/rknn_matmul_demo_Linux/
cd /userdata/rknn_matmul_api_demo_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_matmul_demo
./rknn_matmul_api_demo
```
# Android Demo
@ -64,17 +57,17 @@ modify `ANDROID_NDK_PATH` on `build-android_<TARGET_PLATFORM>.sh` for target pla @@ -64,17 +57,17 @@ modify `ANDROID_NDK_PATH` on `build-android_<TARGET_PLATFORM>.sh` for target pla
connect device and push build output into `/data`
```
adb push install/rknn_matmul_demo_Android /data/
adb push install/rknn_matmul_api_demo_Android /data/
```
## Run
```
adb shell
cd /data/rknn_matmul_demo_Android/
cd /data/rknn_matmul_api_demo_Android/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_matmul_demo
./rknn_matmul_api_demo
```

74
examples/rknn_matmul_api_demo/README_CN.md

@ -0,0 +1,74 @@ @@ -0,0 +1,74 @@
rknn_matmul_api_demo是一个使用matmul C API在NPU上执行int8矩阵乘法的示例。
用法:
```
./rknn_matmul_api_demo
```
以下 <TARGET_PLATFORM> 表示RK3566_RK3568、RK3562或RK3588。
# Aarch64 Linux 示例
## 编译
将`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`修改成交叉编译器路径, 然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## 安装
将 install/rknn_matmul_api_demo_Linux 拷贝到设备上。
- 如果使用Rockchip的EVB板,可以使用以下命令:
连接设备并将程序传输到`/userdata`
```
adb push install/rknn_matmul_api_demo_Linux /userdata/
```
- 如果你的板子有sshd服务,可以使用scp命令或者其他方式将程序和模型传输到板子上。
## 运行
```
adb shell
cd /userdata/rknn_matmul_api_demo_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_matmul_api_demo
```
# Android 示例
## 编译
将`build-android_<TARGET_PLATFORM>.sh`中的`ANDROID_NDK_PATH`修改成平台对应的NDK,然后执行
```
./build-android_<TARGET_PLATFORM>.sh
```
## 安装
连接设备并将程序传输到`/data`
```
adb push install/rknn_matmul_api_demo_Android /data/
```
## 运行
```
adb shell
cd /data/rknn_matmul_api_demo_Android/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_matmul_api_demo
```

185
examples/rknn_matmul_api_demo/src/Float16.h

@ -0,0 +1,185 @@ @@ -0,0 +1,185 @@
#ifndef _RKNPU2_RKNN_MATMUL_API_DEMO_H_
#define _RKNPU2_RKNN_MATMUL_API_DEMO_H_
namespace rknpu2 {
using ushort = unsigned short;
typedef union suf32
{
int i;
unsigned u;
float f;
} suf32;
class float16
{
public:
float16() {}
explicit float16(float x) { w = bits(x); }
operator float() const
{
suf32 out;
unsigned t = ((w & 0x7fff) << 13) + 0x38000000;
unsigned sign = (w & 0x8000) << 16;
unsigned e = w & 0x7c00;
out.u = t + (1 << 23);
out.u = (e >= 0x7c00 ? t + 0x38000000 : e == 0 ? (static_cast<void>(out.f -= 6.103515625e-05f), out.u) : t) | sign;
return out.f;
}
static float16 fromBits(ushort b)
{
float16 result;
result.w = b;
return result;
}
static float16 zero()
{
float16 result;
result.w = (ushort)0;
return result;
}
ushort bits() const { return w; }
static ushort bits(float x)
{
suf32 in;
in.f = x;
unsigned sign = in.u & 0x80000000;
in.u ^= sign;
ushort w;
if (in.u >= 0x47800000)
w = (ushort)(in.u > 0x7f800000 ? 0x7e00 : 0x7c00);
else {
if (in.u < 0x38800000) {
in.f += 0.5f;
w = (ushort)(in.u - 0x3f000000);
} else {
unsigned t = in.u + 0xc8000fff;
w = (ushort)((t + ((in.u >> 13) & 1)) >> 13);
}
}
w = (ushort)(w | (sign >> 16));
return w;
}
float16& operator=(float x)
{
w = bits(x);
return *this;
}
float16& operator+=(float x)
{
w = bits(float() + x);
return *this;
}
float16& operator/(float x)
{
w = bits(float() / x);
return *this;
}
inline bool is_nan() const { return ((w & 0x7c00u) == 0x7c00u) && ((w & 0x03ffu) != 0x0000u); }
inline bool greater(const float16& x) const
{
bool sign = w & 0x8000;
bool sign_x = x.w & 0x8000;
if (sign) {
if (sign_x)
return w < x.w;
else
return false;
} else {
if (sign_x)
/* Signed zeros are equal, have to check for it */
return (w != 0 || x.w != 0x8000);
else
return w > x.w;
}
return false;
}
inline bool less(const float16& x) const
{
bool sign = w & 0x8000;
bool sign_x = x.w & 0x8000;
if (sign) {
if (sign_x)
return w > x.w;
else
/* Signed zeros are equal, have to check for it */
return (w != 0x8000 || x.w != 0);
} else {
if (sign_x)
return false;
else
return w < x.w;
}
return false;
}
inline bool operator>(const float16& x) const
{
if (is_nan() || x.is_nan()) {
return false;
}
return greater(x);
}
inline bool operator<(const float16& x) const
{
if (is_nan() || x.is_nan()) {
return false;
}
return less(x);
}
inline bool operator>=(const float16& x) const
{
if (is_nan() || x.is_nan()) {
return false;
}
return !less(x);
}
inline bool operator<=(const float16& x) const
{
if (is_nan() || x.is_nan()) {
return false;
}
return !greater(x);
}
inline bool operator==(const float16& x) const
{
/*
* The equality cases are as follows:
* - If either value is NaN, never equal.
* - If the values are equal, equal.
* - If the values are both signed zeros, equal.
*/
if (is_nan() || x.is_nan()) {
return false;
}
return (w == x.w || ((w | x.w) & 0x7fff) == 0);
}
inline bool operator!=(const float16& x) const { return !((*this) == x); }
protected:
ushort w = 0;
};
} // namespace rknn
#endif /* _RKNPU2_RKNN_MATMUL_API_DEMO_H_ */

149
examples/rknn_matmul_api_demo/src/rknn_matmul_demo.cpp → examples/rknn_matmul_api_demo/src/rknn_matmul_api_demo.cpp

@ -13,6 +13,7 @@ @@ -13,6 +13,7 @@
/*-------------------------------------------
Includes
-------------------------------------------*/
#include "Float16.h"
#include "rknn_matmul_api.h"
#include <stdio.h>
@ -20,6 +21,10 @@ @@ -20,6 +21,10 @@
#include <string.h>
#include <sys/time.h>
#include <random>
#include <vector>
using namespace rknpu2;
/*-------------------------------------------
Functions
-------------------------------------------*/
@ -30,6 +35,41 @@ static inline int64_t getCurrentTimeUs() @@ -30,6 +35,41 @@ static inline int64_t getCurrentTimeUs()
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// 一维矩阵乘法函数
template <typename Ti, typename To>
std::vector<To> matrixMultiply(const Ti* A, const Ti* B, int M, int K, int N)
{
std::vector<To> result(M * N, 0);
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
float sum = 0;
for (int k = 0; k < K; ++k) {
sum += (float)A[i * K + k] * (float)B[k * N + j];
}
result[i * N + j] = sum;
}
}
return result;
}
template <typename T>
bool arraysEqual(const std::vector<T>& arr1, const std::vector<T>& arr2, float eps = 0.0001f)
{
if (arr1.size() != arr2.size()) {
return false;
}
for (size_t i = 0; i < arr1.size(); ++i) {
if (std::abs(arr1[i] - arr2[i]) > eps) {
return false;
}
}
return true;
}
static const char* get_dims_string(rknn_matmul_tensor_attr* attr)
{
if (!attr->n_dims) {
@ -61,9 +101,13 @@ static void dump_matmul_tensor(rknn_tensor_mem* tensor, rknn_matmul_tensor_attr* @@ -61,9 +101,13 @@ static void dump_matmul_tensor(rknn_tensor_mem* tensor, rknn_matmul_tensor_attr*
for (uint32_t j = 0; j < attr->dims[1]; ++j) {
void* virt_addr = (void*)((size_t)tensor->virt_addr + tensor->offset);
if (attr->type == RKNN_TENSOR_INT8) {
printf(" %2d", ((int8_t*)virt_addr)[i * attr->dims[1] + j]);
printf(" %4d", ((int8_t*)virt_addr)[i * attr->dims[1] + j]);
} else if (attr->type == RKNN_TENSOR_INT32) {
printf(" %3d", ((int32_t*)virt_addr)[i * attr->dims[1] + j]);
printf(" %4d", ((int32_t*)virt_addr)[i * attr->dims[1] + j]);
} else if (attr->type == RKNN_TENSOR_FLOAT16) {
printf(" %4.2f", (float)(((float16*)virt_addr)[i * attr->dims[1] + j]));
} else if (attr->type == RKNN_TENSOR_FLOAT32) {
printf(" %4.2f", ((float*)virt_addr)[i * attr->dims[1] + j]);
}
}
printf("\n");
@ -77,9 +121,13 @@ static void dump_matmul_tensor(rknn_tensor_mem* tensor, rknn_matmul_tensor_attr* @@ -77,9 +121,13 @@ static void dump_matmul_tensor(rknn_tensor_mem* tensor, rknn_matmul_tensor_attr*
for (uint32_t k = 0; k < attr->dims[2]; ++k) {
void* virt_addr = (void*)((size_t)tensor->virt_addr + tensor->offset);
if (attr->type == RKNN_TENSOR_INT8) {
printf(" %2d ", ((int8_t*)virt_addr)[(i * attr->dims[1] + j) * attr->dims[2] + k]);
printf(" %4d ", ((int8_t*)virt_addr)[(i * attr->dims[1] + j) * attr->dims[2] + k]);
} else if (attr->type == RKNN_TENSOR_INT32) {
printf(" %2d ", ((int32_t*)virt_addr)[(i * attr->dims[1] + j) * attr->dims[2] + k]);
printf(" %4d ", ((int32_t*)virt_addr)[(i * attr->dims[1] + j) * attr->dims[2] + k]);
} else if (attr->type == RKNN_TENSOR_FLOAT16) {
printf(" %4.2f ", (float)(((float16*)virt_addr)[(i * attr->dims[1] + j) * attr->dims[2] + k]));
} else if (attr->type == RKNN_TENSOR_FLOAT32) {
printf(" %4.2f ", ((float*)virt_addr)[(i * attr->dims[1] + j) * attr->dims[2] + k]);
}
}
printf("\n");
@ -99,11 +147,18 @@ static void dump_matmul_tensor(rknn_tensor_mem* tensor, rknn_matmul_tensor_attr* @@ -99,11 +147,18 @@ static void dump_matmul_tensor(rknn_tensor_mem* tensor, rknn_matmul_tensor_attr*
for (uint32_t kk = 0; kk < attr->dims[3]; kk++) {
void* virt_addr = (void*)((size_t)tensor->virt_addr + tensor->offset);
if (attr->type == RKNN_TENSOR_INT8) {
printf(" %2d ",
printf(" %4d ",
((int8_t*)virt_addr)[((n * attr->dims[1] + k) * attr->dims[2] + nn) * attr->dims[3] + kk]);
} else if (attr->type == RKNN_TENSOR_INT32) {
printf(" %2d ",
printf(" %4d ",
((int32_t*)virt_addr)[((n * attr->dims[1] + k) * attr->dims[2] + nn) * attr->dims[3] + kk]);
} else if (attr->type == RKNN_TENSOR_FLOAT16) {
printf(
" %4.2f ",
(float)(((float16*)virt_addr)[((n * attr->dims[1] + k) * attr->dims[2] + nn) * attr->dims[3] + kk]));
} else if (attr->type == RKNN_TENSOR_FLOAT32) {
printf(" %4.2f ",
((float*)virt_addr)[((n * attr->dims[1] + k) * attr->dims[2] + nn) * attr->dims[3] + kk]);
}
}
printf("\n");
@ -174,13 +229,48 @@ int main(int argc, char* argv[]) @@ -174,13 +229,48 @@ int main(int argc, char* argv[])
dump_matmul_tensor_attr(&io_attr.B);
dump_matmul_tensor_attr(&io_attr.C);
// 生成随机种子
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> int_dis(-128, 127);
std::normal_distribution<> float_dis(0.0, 1.0);
// Create A
rknn_tensor_mem* A = rknn_create_mem(ctx, io_attr.A.size);
if (A == NULL) {
printf("rknn_create_mem fail!\n");
return -1;
}
memset(A->virt_addr, 1, A->size);
// normal layout
if (io_attr.A.n_dims == 2) {
for (uint32_t i = 0; i < io_attr.A.dims[0]; ++i) {
for (uint32_t j = 0; j < io_attr.A.dims[1]; ++j) {
if (info.type == RKNN_TENSOR_INT8) {
((int8_t*)A->virt_addr)[i * io_attr.A.dims[1] + j] = int_dis(gen);
} else if (info.type == RKNN_TENSOR_FLOAT16) {
((float16*)A->virt_addr)[i * io_attr.A.dims[1] + j] = float_dis(gen);
}
}
}
}
// perf layout
else if (io_attr.A.n_dims == 4) {
for (uint32_t n = 0; n < io_attr.A.dims[0]; ++n) {
for (uint32_t k = 0; k < io_attr.A.dims[1]; ++k) {
for (uint32_t nn = 0; nn < io_attr.A.dims[2]; ++nn) {
for (uint32_t kk = 0; kk < io_attr.A.dims[3]; ++kk) {
if (info.type == RKNN_TENSOR_INT8) {
((int8_t*)A->virt_addr)[((n * io_attr.A.dims[1] + k) * io_attr.A.dims[2] + nn) * io_attr.A.dims[3] + kk] =
1;
} else if (info.type == RKNN_TENSOR_FLOAT16) {
((float16*)
A->virt_addr)[((n * io_attr.A.dims[1] + k) * io_attr.A.dims[2] + nn) * io_attr.A.dims[3] + kk] = 1;
}
}
}
}
}
}
// Create B
rknn_tensor_mem* B = rknn_create_mem(ctx, io_attr.B.size);
@ -193,7 +283,11 @@ int main(int argc, char* argv[]) @@ -193,7 +283,11 @@ int main(int argc, char* argv[])
if (io_attr.B.n_dims == 2) {
for (uint32_t i = 0; i < io_attr.B.dims[1]; ++i) {
for (uint32_t j = 0; j < io_attr.B.dims[0]; ++j) {
((int8_t*)B->virt_addr)[i * io_attr.B.dims[0] + j] = (j % 16) + 1;
if (info.type == RKNN_TENSOR_INT8) {
((int8_t*)B->virt_addr)[i * io_attr.B.dims[0] + j] = int_dis(gen);
} else if (info.type == RKNN_TENSOR_FLOAT16) {
((float16*)B->virt_addr)[i * io_attr.B.dims[0] + j] = float_dis(gen);
}
}
}
}
@ -203,8 +297,14 @@ int main(int argc, char* argv[]) @@ -203,8 +297,14 @@ int main(int argc, char* argv[])
for (uint32_t k = 0; k < io_attr.B.dims[1]; ++k) {
for (uint32_t nn = 0; nn < io_attr.B.dims[2]; ++nn) {
for (uint32_t kk = 0; kk < io_attr.B.dims[3]; ++kk) {
((int8_t*)B->virt_addr)[((n * io_attr.B.dims[1] + k) * io_attr.B.dims[2] + nn) * io_attr.B.dims[3] + kk] =
nn + 1;
if (info.type == RKNN_TENSOR_INT8) {
((int8_t*)B->virt_addr)[((n * io_attr.B.dims[1] + k) * io_attr.B.dims[2] + nn) * io_attr.B.dims[3] + kk] =
nn + 1;
} else if (info.type == RKNN_TENSOR_FLOAT16) {
((float16*)
B->virt_addr)[((n * io_attr.B.dims[1] + k) * io_attr.B.dims[2] + nn) * io_attr.B.dims[3] + kk] =
nn + 1;
}
}
}
}
@ -258,6 +358,35 @@ int main(int argc, char* argv[]) @@ -258,6 +358,35 @@ int main(int argc, char* argv[])
dump_matmul_tensor(B, &io_attr.B);
dump_matmul_tensor(C, &io_attr.C);
// compare NPU res vs CPU res
if (io_attr.A.n_dims == 2 && io_attr.B.n_dims == 2) {
size_t C_elems = 1;
for (int i = 0; i < io_attr.C.n_dims; ++i) {
C_elems *= io_attr.C.dims[i];
}
if (info.type == RKNN_TENSOR_INT8) {
std::vector<int32_t> cpu_res;
cpu_res.reserve(C_elems);
cpu_res = matrixMultiply<int8_t, int32_t>((const int8_t*)A->virt_addr, (const int8_t*)B->virt_addr, M, K, N);
std::vector<int32_t> npu_res((int32_t*)C->virt_addr, (int32_t*)C->virt_addr + C_elems);
if (arraysEqual<int32_t>(cpu_res, npu_res)) {
printf("int8 matmul result is correct\n");
} else {
printf("int8 matmul result is wrong\n");
}
} else if (info.type == RKNN_TENSOR_FLOAT16) {
std::vector<float> cpu_res;
cpu_res.reserve(C_elems);
cpu_res = matrixMultiply<float16, float>((const float16*)A->virt_addr, (const float16*)B->virt_addr, M, K, N);
std::vector<float> npu_res((float*)C->virt_addr, (float*)C->virt_addr + C_elems);
if (arraysEqual<float>(cpu_res, npu_res)) {
printf("fp16 matmul result is correct\n");
} else {
printf("fp16 matmul result is wrong\n");
}
}
}
// destroy
rknn_destroy_mem(ctx, A);
rknn_destroy_mem(ctx, B);

5
examples/rknn_mobilenet_demo/README.md

@ -1,4 +1,7 @@ @@ -1,4 +1,7 @@
The following <TARGET_PLATFORM> represents RK356X or RK3588.
The following <TARGET_PLATFORM> represents RK3566_RK3568, RK3562 or RK3588.
# rknn model source description
The rknn model of each platform comes from the examples/tflite/mobilenet_v1 example of RKNN-Toolkit2.
# Aarch64 Linux Demo

68
examples/rknn_mobilenet_demo/README_CN.md

@ -0,0 +1,68 @@ @@ -0,0 +1,68 @@
以下 <TARGET_PLATFORM> 表示RK3566_RK3568、RK3562或RK3588。
# rknn模型来源说明
各个平台的rknn模型来源于RKNN-Toolkit2的的examples/tflite/mobilenet_v1示例。
# Aarch64 Linux 示例
## 编译
将`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`修改成交叉编译器路径, 然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## 安装
将 install/rknn_api_demo_Linux 拷贝到设备上。
- 如果使用Rockchip的EVB板,可以使用以下命令:
连接设备并将程序和模型传输到`/userdata`
```
adb push install/rknn_mobilenet_demo_Linux /userdata/
```
- 如果你的板子有sshd服务,可以使用scp命令或者其他方式将程序和模型传输到板子上。
## 运行
```
adb shell
cd /userdata/rknn_mobilenet_demo_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_mobilenet_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
```
# Android 示例
## 编译
将`build-android_<TARGET_PLATFORM>.sh`中的`ANDROID_NDK_PATH`修改成平台对应的NDK,然后执行
```
./build-android_<TARGET_PLATFORM>.sh
```
## 安装
连接设备并将程序和模型传输到`/data`
```
adb push install/rknn_mobilenet_demo_Android /data/
```
## 运行
```
adb shell
cd /data/rknn_mobilenet_demo_Android/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_mobilenet_demo model/<TARGET_PLATFORM>/mobilenet_v1.rknn model/dog_224x224.jpg
```

3
examples/rknn_mobilenet_demo/src/main.cc

@ -125,12 +125,13 @@ int main(int argc, char** argv) @@ -125,12 +125,13 @@ int main(int argc, char** argv)
}
cv::Mat orig_img_rgb;
//rknn模型说明来源于RKNN-Toolkit2的的examples/tflite/mobilenet_v1示例,输入通道顺序与python代码保持一致
cv::cvtColor(orig_img, orig_img_rgb, cv::COLOR_BGR2RGB);
cv::Mat img = orig_img_rgb.clone();
if (orig_img.cols != MODEL_IN_WIDTH || orig_img.rows != MODEL_IN_HEIGHT) {
printf("resize %d %d to %d %d\n", orig_img.cols, orig_img.rows, MODEL_IN_WIDTH, MODEL_IN_HEIGHT);
cv::resize(orig_img, img, cv::Size(MODEL_IN_WIDTH, MODEL_IN_HEIGHT), 0, 0, cv::INTER_LINEAR);
cv::resize(orig_img_rgb, img, cv::Size(MODEL_IN_WIDTH, MODEL_IN_HEIGHT), 0, 0, cv::INTER_LINEAR);
}
// Load RKNN Model

2
examples/rknn_multiple_input_demo/README.md

@ -1,4 +1,4 @@ @@ -1,4 +1,4 @@
The following <TARGET_PLATFORM> represents RK356X or RK3588.
The following <TARGET_PLATFORM> represents RK3566_RK3568,RK3562 or RK3588.
# Aarch64 Linux Demo
## Build

69
examples/rknn_multiple_input_demo/README_CN.md

@ -0,0 +1,69 @@ @@ -0,0 +1,69 @@
以下 <TARGET_PLATFORM> 表示RK3566_RK3568、RK3562或RK3588。
# Aarch64 Linux 示例
## 编译
将`build-linux_<TARGET_PLATFORM>.sh`中的`GCC_COMPILER`修改成交叉编译器路径, 然后执行
```
./build-linux_<TARGET_PLATFORM>.sh
```
## 安装
将 install/rknn_api_demo_Linux 拷贝到设备上。
- 如果使用Rockchip的EVB板,可以使用以下命令:
连接设备并将程序和模型传输到`/userdata`
```
adb push install/rknn_multiple_input_demo_Linux /userdata/
```
- 如果你的板子有sshd服务,可以使用scp命令或者其他方式将程序和模型传输到板子上。
## 运行
```
adb shell
cd /userdata/rknn_multiple_input_demo_Linux/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_multiple_input_demo model/<TARGET_PLATFORM>/multiple_input_demo.rknn model/input1.bin#model/input2.bin
```
# Android 示例
## 编译
将`build-android_<TARGET_PLATFORM>.sh`中的`ANDROID_NDK_PATH`修改成平台对应的NDK,然后执行
```
./build-android.sh
```
## 安装
connect device and push build output into `/data`
```
adb push install/rknn_multiple_input_demo_Android /data/
```
## 运行
```
adb shell
cd /data/rknn_multiple_input_demo_Android/
```
```
export LD_LIBRARY_PATH=./lib
./rknn_multiple_input_demo model/<TARGET_PLATFORM>/multiple_input_demo.rknn model/input1.bin#model/input2.bin
```

39
examples/rknn_yolov5_android_apk_demo/README.md

@ -1,31 +1,32 @@ @@ -1,31 +1,32 @@
# 简介
- rknn_yolov5_android_apk_demo 是RK356X/RK3588上如何调用NPU的demo,该demo的基础模型是yolov5s
# Introduction
- rknn_yolov5_android_apk_demo is the one showing how to deploy yolov5s model on the android apk on RK3566_RK3568, RK3562 or RK3588
# 使用说明
# Pre-requisite
- 可以使用android studio 2021.2.1 (Windows)或者android studio 2021.3.1(Linux/macOS) 编译该工程
- yolov5s.rknn是使用rknn toolkit2将yolov5s.onnx转换而来,具体转换方法参考rknn toolkit2的examples/onnx/yolov5
- Using the andorid studio 2021 1.2.1 or 1.3.1, higher version
- Yolov5s.rknn is required, which can be converted from yolov5s.onnx, which SiLU activation layer was replace with ReLU. The detail should refer to this link: https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo
# 代码说明
# Structure
## 代码分为两大部分:
- JAVA: com.rockchip.gpadc.demo: 读取camera输入,并调用jni进行inference,并将结果显示出来
## The demo can be split into two parts:
- JAVA: com.rockchip.gpadc.demo: Reading camera inputs, and invoking jni interface to inferece and show the result
- JNI: 调用rknnrt进行实际inference
- JNI: The interface for invoking rknnrt to do the inference of model.
# Permission
This demo requires the permission of Camera and Read/Write permission for EXTERNAL_STORAGE
# 权限
运行本程序需要Camera及EXTERNAL_STORAGE读写权限
# FAQ
## 编译时出现“files found with path 'lib/arm64-v8a/xxx.so' from inputs:”类似错误
## Compiling error like“files found with path 'lib/arm64-v8a/xxx.so' from inputs:”
```
Execution failed for task ':app:mergeDebugNativeLibs'.
> A failure occurred while executing com.android.build.gradle.internal.tasks.MergeNativeLibsTask$MergeNativeLibsTaskWorkAction
@ -39,7 +40,7 @@ Execution failed for task ':app:mergeDebugNativeLibs'. @@ -39,7 +40,7 @@ Execution failed for task ':app:mergeDebugNativeLibs'.
> Run with --info or --debug option to get more log output.
> Run with --scan to get full insights.
```
或者
Alternatively,
```
2 files found with path 'lib/arm64-v8a/xxx.so' from inputs:
- rknn_yolov5_android_apk_demo/app/build/intermediates/merged_jni_libs/debug/out/arm64-v8a/librga.so
@ -47,11 +48,11 @@ Execution failed for task ':app:mergeDebugNativeLibs'. @@ -47,11 +48,11 @@ Execution failed for task ':app:mergeDebugNativeLibs'.
If you are using jniLibs and CMake IMPORTED targets, see
https://developer.android.com/r/tools/jniLibs-vs-imported-targets
```
则需要添加 app/build.gradle中的"jniLibs.srcDirs = ['libs']",具体原因参考“https://developer.android.com/r/tools/jniLibs-vs-imported-targets”
This requires add the "jniLibs.srcDirs = ['libs']" on app/build.gradle file “https://developer.android.com/r/tools/jniLibs-vs-imported-targets”
## app启动时,出现"E/SurfaceView: Exception configuring surface"的错误
## During launch app,errors like "E/SurfaceView: Exception configuring surface"
```
D/rkyolo: camera facing: 1
@ -75,4 +76,4 @@ E/SurfaceView: Exception configuring surface @@ -75,4 +76,4 @@ E/SurfaceView: Exception configuring surface
at com.rockchip.gpadc.demo.MainActivity$TSurfaceHolderCallback.surfaceCreated(MainActivity.java:199)
```
是因为app设置的分辨率该camera不支持,需要修改"app/src/main/java/com/rockchip/gpadc/demo/rga/HALDefine.java"中CAMERA_PREVIEW_WIDTH及CAMERA_PREVIEW_HEIGHT为摄像头支持的分辨率。
This is casued by the unsupported camera resolution , modifying CAMERA_PREVIEW_WIDTH in "app/src/main/java/com/rockchip/gpadc/demo/rga/HALDefine.java" for supported resolution.

78
examples/rknn_yolov5_android_apk_demo/README_CN.md

@ -0,0 +1,78 @@ @@ -0,0 +1,78 @@
# 简介
- rknn_yolov5_android_apk_demo 是RK3566_RK3568, RK3562或RK3588上如何调用NPU的demo,该demo的基础模型是yolov5s
# 使用说明
- 可以使用android studio 2021.2.1 (Windows)或者android studio 2021.3.1(Linux/macOS) 编译该工程
- yolov5s.rknn是使用rknn toolkit2将yolov5s.onnx转换而来,其中激活函数用relu替换silu,具体转换方法参考 https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo
# 代码说明
## 代码分为两大部分:
- JAVA: com.rockchip.gpadc.demo: 读取camera输入,并调用jni进行inference,并将结果显示出来
- JNI: 调用rknnrt进行实际inference
# 权限
运行本程序需要Camera及EXTERNAL_STORAGE读写权限
# FAQ
## 编译时出现“files found with path 'lib/arm64-v8a/xxx.so' from inputs:”类似错误
```
Execution failed for task ':app:mergeDebugNativeLibs'.
> A failure occurred while executing com.android.build.gradle.internal.tasks.MergeNativeLibsTask$MergeNativeLibsTaskWorkAction
> 2 files found with path 'lib/arm64-v8a/librga.so' from inputs:
- rknpu2/examples/rknn_yolov5_android_apk_demo/app/build/intermediates/merged_jni_libs/debug/out/arm64-v8a/librga.so
- rknpu2/examples/rknn_yolov5_android_apk_demo/app/build/intermediates/cxx/Debug/3f40b265/obj/arm64-v8a/librga.so
If you are using jniLibs and CMake IMPORTED targets, see
https://developer.android.com/r/tools/jniLibs-vs-imported-targets
* Try:
> Run with --info or --debug option to get more log output.
> Run with --scan to get full insights.
```
或者
```
2 files found with path 'lib/arm64-v8a/xxx.so' from inputs:
- rknn_yolov5_android_apk_demo/app/build/intermediates/merged_jni_libs/debug/out/arm64-v8a/librga.so
- rknn_yolov5_android_apk_demo/app/build/intermediates/cxx/Debug/3f40b265/obj/arm64-v8a/librga.so
If you are using jniLibs and CMake IMPORTED targets, see
https://developer.android.com/r/tools/jniLibs-vs-imported-targets
```
则需要添加 app/build.gradle中的"jniLibs.srcDirs = ['libs']",具体原因参考“https://developer.android.com/r/tools/jniLibs-vs-imported-targets”
## app启动时,出现"E/SurfaceView: Exception configuring surface"的错误
```
D/rkyolo: camera facing: 1
V/rkyolo: Camera Supported Preview Size = 160x90
V/rkyolo: Camera Supported Preview Size = 320x180
V/rkyolo: Camera Supported Preview Size = 432x240
V/rkyolo: Camera Supported Preview Size = 640x360
V/rkyolo: Camera Supported Preview Size = 800x448
V/rkyolo: Camera Supported Preview Size = 864x480
V/rkyolo: Camera Supported Preview Size = 1024x576
V/rkyolo: Camera Supported Preview Size = 1280x720
V/rkyolo: Camera Supported Preview Size = 1600x896
V/rkyolo: Camera Supported Preview Size = 1920x1080
E/SurfaceView: Exception configuring surface
java.lang.RuntimeException: setParameters failed
at android.hardware.Camera.native_setParameters(Native Method)
at android.hardware.Camera.setParameters(Camera.java:2068)
at com.rockchip.gpadc.demo.MainActivity.setCameraParameters(MainActivity.java:295)
at com.rockchip.gpadc.demo.MainActivity.startCamera(MainActivity.java:245)
at com.rockchip.gpadc.demo.MainActivity.access$300(MainActivity.java:47)
at com.rockchip.gpadc.demo.MainActivity$TSurfaceHolderCallback.surfaceCreated(MainActivity.java:199)
```
是因为app设置的分辨率该camera不支持,需要修改"app/src/main/java/com/rockchip/gpadc/demo/rga/HALDefine.java"中CAMERA_PREVIEW_WIDTH及CAMERA_PREVIEW_HEIGHT为摄像头支持的分辨率。

13
examples/rknn_yolov5_android_apk_demo/app/src/main/cpp/post_process.cc

@ -147,8 +147,7 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh @@ -147,8 +147,7 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh
int validCount = 0;
int grid_len = grid_h * grid_w;
float thres = unsigmoid(threshold);
int8_t thres_i8 = qnt_f32_to_affine(thres, zp, scale);
int8_t thres_i8 = qnt_f32_to_affine(threshold, zp, scale);
for (int a = 0; a < 3; a++)
{
for (int i = 0; i < grid_h; i++)
@ -160,10 +159,10 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh @@ -160,10 +159,10 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh
{
int offset = (PROP_BOX_SIZE * a) * grid_len + i * grid_w + j;
int8_t *in_ptr = input + offset;
float box_x = sigmoid(deqnt_affine_to_f32(*in_ptr, zp, scale)) * 2.0 - 0.5;
float box_y = sigmoid(deqnt_affine_to_f32(in_ptr[grid_len], zp, scale)) * 2.0 - 0.5;
float box_w = sigmoid(deqnt_affine_to_f32(in_ptr[2 * grid_len], zp, scale)) * 2.0;
float box_h = sigmoid(deqnt_affine_to_f32(in_ptr[3 * grid_len], zp, scale)) * 2.0;
float box_x = (deqnt_affine_to_f32(*in_ptr, zp, scale)) * 2.0 - 0.5;
float box_y = (deqnt_affine_to_f32(in_ptr[grid_len], zp, scale)) * 2.0 - 0.5;
float box_w = (deqnt_affine_to_f32(in_ptr[2 * grid_len], zp, scale)) * 2.0;
float box_h = (deqnt_affine_to_f32(in_ptr[3 * grid_len], zp, scale)) * 2.0;
box_x = (box_x + j) * (float)stride;
box_y = (box_y + i) * (float)stride;
box_w = box_w * box_w * (float)anchor[a * 2];
@ -189,7 +188,7 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh @@ -189,7 +188,7 @@ static int process(int8_t *input, int *anchor, int grid_h, int grid_w, int heigh
boxes.push_back(box_y);
boxes.push_back(box_w);
boxes.push_back(box_h);
objProbs.push_back(sigmoid(max_class_prob * box_prob));
objProbs.push_back((max_class_prob * box_prob));
classId.push_back(maxClassId);
validCount++;
}

BIN
examples/rknn_yolov5_android_apk_demo/app/src/main/res/raw/yolov5s_rk3562.rknn

Binary file not shown.

BIN
examples/rknn_yolov5_android_apk_demo/app/src/main/res/raw/yolov5s_rk3566.rknn

Binary file not shown.

BIN
examples/rknn_yolov5_android_apk_demo/app/src/main/res/raw/yolov5s_rk3588.rknn

Binary file not shown.

68
examples/rknn_yolov5_demo/README.md

@ -1,40 +1,39 @@ @@ -1,40 +1,39 @@
# Yolo-v5 demo
## 导出rknn模型
## Guide for exporting rknn model
请参考 https://github.com/airockchip/rknn_model_zoo/tree/main/models/vision/object_detection/yolov5-pytorch
Please refer to this link: https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo
## Precautions
## 注意事项
1. 使用rknn-toolkit2版本大于等于1.1.2。
2. 切换成自己训练的模型时,请注意对齐anchor等后处理参数,否则会导致后处理解析出错。
3. 官网和rk预训练模型都是检测80类的目标,如果自己训练的模型,需要更改include/postprocess.h中的OBJ_CLASS_NUM以及NMS_THRESH,BOX_THRESH后处理参数。
5. demo需要librga.so的支持,编译使用请参考https://github.com/rockchip-linux/linux-rga
5. 由于硬件限制,该demo的模型默认把 yolov5 模型的后处理部分,移至cpu实现。本demo附带的模型均使用relu为激活函数,相比silu激活函数精度略微下降,性能大幅上升。
1. Use rknn-toolkit2 version greater than or equal to **1.4.0**.
2. When using the model trained by yourself, please pay attention to aligning post-processing parameters such as anchor, otherwise it will cause post-processing analysis errors.
3. The official website and rk pre-training models both detect 80 types of targets. If you train your own model, you need to change the OBJ_CLASS_NUM and NMS_THRESH, BOX_THRESH post-processing parameters in include/postprocess.h.
4. The demo needs the support of librga.so, please refer to https://github.com/airockchip/librga for compiling and using
5. Due to hardware limitations, the demo model moves the post-processing part of the yolov5 model to the cpu implementation by default. The models attached to this demo all use relu as the activation function. Compared with the silu activation function, the accuracy is slightly lower, and the performance is greatly improved.
## Android Demo
### 编译
### Compiling && Building
According to the target platform, modifying the path for Android NDK on 'build-android_<TARGET_PLATFORM>.sh'
根据指定平台修改 `build-android_<TARGET_PLATFORM>.sh`中的Android NDK的路径 `ANDROID_NDK_PATH`<TARGET_PLATFORM>可以是RK356X或RK3588 例如修改成:
for example,
```sh
ANDROID_NDK_PATH=~/opt/tool_chain/android-ndk-r17
```
然后执行
then, running this script
```sh
./build-android_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
### Push all build output file to the board
连接板子的usb口到PC,将整个demo目录到 `/data`:
Connecting the usb port to PC, then pushing all demo files to the board,
```sh
adb root
@ -42,7 +41,7 @@ adb remount @@ -42,7 +41,7 @@ adb remount
adb push install/rknn_yolov5_demo /data/
```
### 运行
### Running
```sh
adb shell
@ -54,34 +53,34 @@ export LD_LIBRARY_PATH=./lib @@ -54,34 +53,34 @@ export LD_LIBRARY_PATH=./lib
## Aarch64 Linux Demo
### 编译
### Compiling && Building
根据指定平台修改 `build-linux_<TARGET_PLATFORM>.sh`中的交叉编译器所在目录的路径 `TOOL_CHAIN`,例如修改成
According to the target platform, modifying the path for 'TOOL_CHAIN' on 'build-android_<TARGET_PLATFORM>.sh'
```sh
export TOOL_CHAIN=~/opt/tool_chain/gcc-9.3.0-x86_64_aarch64-linux-gnu/host
```
然后执行
then run the script
```sh
./build-linux_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
### Push all build output file to the board
将 install/rknn_yolov5_demo_Linux 拷贝到板子的/userdata/目录.
Push install/rknn_yolov5_demo_Linux to the board,
- 如果使用rockchip的EVB板子,可以使用adb将文件推到板子上
- If using adb via the EVB board
```
adb push install/rknn_yolov5_demo_Linux /userdata/
```
- 如果使用其他板子,可以使用scp等方式将install/rknn_yolov5_demo_Linux拷贝到板子的/userdata/目录
- For other boards, using the scp or other different approaches to push all files under install/rknn_yolov5_demo_Linux to '/userdata'
### 运行
### Running
```sh
adb shell
@ -98,31 +97,30 @@ Using the following commands to add to LD_LIBRARY_PATH. @@ -98,31 +97,30 @@ Using the following commands to add to LD_LIBRARY_PATH.
export LD_LIBRARY_PATH=./lib:<LOCATION_LIBRGA.SO>
```
## 视频流Demo运行命令参考如下
- h264视频
## Guide for Video Demo
- H264
```
./rknn_yolov5_video_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn xxx.h264 264
```
注意需要使用h264码流视频,可以使用如下命令转换得到
For converting to h264 via the ffmpeg
```
ffmpeg -i xxx.mp4 -vcodec h264 out.h264
```
- h265视频
- H265
```
./rknn_yolov5_video_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn xxx.hevc 265
```
注意需要使用h265码流视频,可以使用如下命令转换得到
For converting to h265 via the ffmpeg
```
ffmpeg -i xxx.mp4 -vcodec hevc out.hevc
```
- rtsp视频流
- RTSP
```
./rknn_yolov5_video_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn <RTSP_URL> 265
```
### 注意
### Remark
- 需要根据系统的rga驱动选择正确的librga库,具体依赖请参考: https://github.com/airockchip/librga
- **rk3562 目前仅支持h264视频流**
- **rtsp 视频流Demo仅在Linux系统上支持,Android上目前还不支持**
- **RK3562 only supports h264 video stream **
- **rtsp video stream only available on the Linux system **

127
examples/rknn_yolov5_demo/README_CN.md

@ -0,0 +1,127 @@ @@ -0,0 +1,127 @@
# Yolo-v5 demo
## 导出rknn模型步骤
请参考 https://github.com/airockchip/rknn_model_zoo/tree/main/models/CV/object_detection/yolo
## 注意事项
1. 使用rknn-toolkit2版本大于等于1.4.0。
2. 切换成自己训练的模型时,请注意对齐anchor等后处理参数,否则会导致后处理解析出错。
3. 官网和rk预训练模型都是检测80类的目标,如果自己训练的模型,需要更改include/postprocess.h中的OBJ_CLASS_NUM以及NMS_THRESH,BOX_THRESH后处理参数。
4. demo需要librga.so的支持,编译使用请参考 https://github.com/airockchip/librga
5. 由于硬件限制,该demo的模型默认把 yolov5 模型的后处理部分,移至cpu实现。本demo附带的模型均使用relu为激活函数,相比silu激活函数精度略微下降,性能大幅上升。
## Android Demo
### 编译
根据指定平台修改 `build-android_<TARGET_PLATFORM>.sh`中的Android NDK的路径 `ANDROID_NDK_PATH`<TARGET_PLATFORM>可以是RK3566_RK3568, RK3562或RK3588 例如修改成:
```sh
ANDROID_NDK_PATH=~/opt/tool_chain/android-ndk-r17
```
然后执行:
```sh
./build-android_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
连接板子的usb口到PC,将整个demo目录到 `/data`:
```sh
adb root
adb remount
adb push install/rknn_yolov5_demo /data/
```
### 运行
```sh
adb shell
cd /data/rknn_yolov5_demo/
export LD_LIBRARY_PATH=./lib
./rknn_yolov5_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn model/bus.jpg
```
## Aarch64 Linux Demo
### 编译
根据指定平台修改 `build-linux_<TARGET_PLATFORM>.sh`中的交叉编译器所在目录的路径 `TOOL_CHAIN`,例如修改成
```sh
export TOOL_CHAIN=~/opt/tool_chain/gcc-9.3.0-x86_64_aarch64-linux-gnu/host
```
然后执行:
```sh
./build-linux_<TARGET_PLATFORM>.sh
```
### 推送执行文件到板子
将 install/rknn_yolov5_demo_Linux 拷贝到板子的/userdata/目录.
- 如果使用rockchip的EVB板子,可以使用adb将文件推到板子上:
```
adb push install/rknn_yolov5_demo_Linux /userdata/
```
- 如果使用其他板子,可以使用scp等方式将install/rknn_yolov5_demo_Linux拷贝到板子的/userdata/目录
### 运行
```sh
adb shell
cd /userdata/rknn_yolov5_demo_Linux/
export LD_LIBRARY_PATH=./lib
./rknn_yolov5_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn model/bus.jpg
```
Note: Try searching the location of librga.so and add it to LD_LIBRARY_PATH if the librga.so is not found on the lib folder.
Using the following commands to add to LD_LIBRARY_PATH.
```sh
export LD_LIBRARY_PATH=./lib:<LOCATION_LIBRGA.SO>
```
## 视频流Demo运行命令参考如下:
- h264视频
```
./rknn_yolov5_video_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn xxx.h264 264
```
注意需要使用h264码流视频,可以使用如下命令转换得到:
```
ffmpeg -i xxx.mp4 -vcodec h264 out.h264
```
- h265视频
```
./rknn_yolov5_video_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn xxx.hevc 265
```
注意需要使用h265码流视频,可以使用如下命令转换得到:
```
ffmpeg -i xxx.mp4 -vcodec hevc out.hevc
```
- rtsp视频流
```
./rknn_yolov5_video_demo model/<TARGET_PLATFORM>/yolov5s-640-640.rknn <RTSP_URL> 265
```
### 注意
- 需要根据系统的rga驱动选择正确的librga库,具体依赖请参考: https://github.com/airockchip/librga
- **rk3562 目前仅支持h264视频流**
- **rtsp 视频流Demo仅在Linux系统上支持,Android上目前还不支持**

17
examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/README.md

@ -1,16 +1,17 @@ @@ -1,16 +1,17 @@
# onnx模型
# onnx model
* onnx_models/yolov5s_rm_transpose.onnx
模型基于官方yolov5s基础魔改而来,与rknn-toolkit2工具SDK中examples/onnx/yolov5/yolov5s.onnx相同,用于rknpu2的examples/rknn_yolov5_demo
* onnx_models/yolov5s_relu.onnx
# 转换rknn模型
The model comes from https://github.com/airockchip/rknn_model_zoo
1. 将onnx2rknn.py以下的参数修改成对应的平台,例如RK3566_RK3568模型,修改为:
# convert rknn model
```C++
1. Modify the following parameters of onnx2rknn.py to the corresponding platform, such as the RK3566_RK3568 model, modify as follows:
```python
platform="rk3566"
```
2. 执行python onnx2rknn.py
2. Execute python onnx2rknn.py
3. rknn模型生成在rknn_models目录
3. The rknn model is generated in the rknn_models directory

18
examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/README_CN.md

@ -0,0 +1,18 @@ @@ -0,0 +1,18 @@
# onnx模型
* onnx_models/yolov5s_relu.onnx
模型来源:https://github.com/airockchip/rknn_model_zoo
# 转换rknn模型
1. 将onnx2rknn.py以下的参数修改成对应的平台,例如RK3566_RK3568模型,修改为:
```python
platform="rk3566"
```
2. 执行python onnx2rknn.py
3. rknn模型生成在rknn_models目录

6
examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx2rknn.py

@ -11,7 +11,8 @@ if __name__ == '__main__': @@ -11,7 +11,8 @@ if __name__ == '__main__':
exp = 'yolov5s'
Width = 640
Height = 640
MODEL_PATH = './onnx_models/yolov5s_rm_transpose.onnx'
# Model from https://github.com/airockchip/rknn_model_zoo
MODEL_PATH = './onnx_models/yolov5s_relu.onnx'
NEED_BUILD_MODEL = True
# NEED_BUILD_MODEL = False
im_file = './dog_bike_car_640x640.jpg'
@ -20,7 +21,7 @@ if __name__ == '__main__': @@ -20,7 +21,7 @@ if __name__ == '__main__':
rknn = RKNN()
OUT_DIR = "rknn_models"
RKNN_MODEL_PATH = './{}/{}_rm_transpose_{}.rknn'.format(
RKNN_MODEL_PATH = './{}/{}_{}.rknn'.format(
OUT_DIR, exp+'-'+str(Width)+'-'+str(Height), platform)
if NEED_BUILD_MODEL:
DATASET = './dataset.txt'
@ -55,4 +56,3 @@ if __name__ == '__main__': @@ -55,4 +56,3 @@ if __name__ == '__main__':
ret = rknn.load_rknn(RKNN_MODEL_PATH)
rknn.release()

BIN
examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx_models/yolov5s_rm_transpose.onnx → examples/rknn_yolov5_demo/convert_rknn_demo/yolov5/onnx_models/yolov5s_relu.onnx

Binary file not shown.

BIN
examples/rknn_yolov5_demo/model/RK3562/yolov5s-640-640.rknn

Binary file not shown.

BIN
examples/rknn_yolov5_demo/model/RK3566_RK3568/yolov5s-640-640.rknn

Binary file not shown.

BIN
examples/rknn_yolov5_demo/model/RK3588/yolov5s-640-640.rknn

Binary file not shown.

BIN
examples/rknn_yolov5_demo/model/RV110X/yolov5s-640-640.rknn

Binary file not shown.

2
examples/rknn_yolov5_demo/src/main_video.cc

@ -222,7 +222,7 @@ static int init_model(const char* model_path, rknn_app_context_t* app_ctx) { @@ -222,7 +222,7 @@ static int init_model(const char* model_path, rknn_app_context_t* app_ctx) {
}
static int release_model(rknn_app_context_t* app_ctx) {
if (app_ctx->rknn_ctx != NULL) {
if (app_ctx->rknn_ctx != 0) {
rknn_destroy(app_ctx->rknn_ctx);
}
free(app_ctx->input_attrs);

13
examples/rknn_yolov5_demo/src/postprocess.cc

@ -192,8 +192,7 @@ static int process(int8_t* input, int* anchor, int grid_h, int grid_w, int heigh @@ -192,8 +192,7 @@ static int process(int8_t* input, int* anchor, int grid_h, int grid_w, int heigh
{
int validCount = 0;
int grid_len = grid_h * grid_w;
float thres = unsigmoid(threshold);
int8_t thres_i8 = qnt_f32_to_affine(thres, zp, scale);
int8_t thres_i8 = qnt_f32_to_affine(threshold, zp, scale);
for (int a = 0; a < 3; a++) {
for (int i = 0; i < grid_h; i++) {
for (int j = 0; j < grid_w; j++) {
@ -201,10 +200,10 @@ static int process(int8_t* input, int* anchor, int grid_h, int grid_w, int heigh @@ -201,10 +200,10 @@ static int process(int8_t* input, int* anchor, int grid_h, int grid_w, int heigh
if (box_confidence >= thres_i8) {
int offset = (PROP_BOX_SIZE * a) * grid_len + i * grid_w + j;
int8_t* in_ptr = input + offset;
float box_x = sigmoid(deqnt_affine_to_f32(*in_ptr, zp, scale)) * 2.0 - 0.5;
float box_y = sigmoid(deqnt_affine_to_f32(in_ptr[grid_len], zp, scale)) * 2.0 - 0.5;
float box_w = sigmoid(deqnt_affine_to_f32(in_ptr[2 * grid_len], zp, scale)) * 2.0;
float box_h = sigmoid(deqnt_affine_to_f32(in_ptr[3 * grid_len], zp, scale)) * 2.0;
float box_x = (deqnt_affine_to_f32(*in_ptr, zp, scale)) * 2.0 - 0.5;
float box_y = (deqnt_affine_to_f32(in_ptr[grid_len], zp, scale)) * 2.0 - 0.5;
float box_w = (deqnt_affine_to_f32(in_ptr[2 * grid_len], zp, scale)) * 2.0;
float box_h = (deqnt_affine_to_f32(in_ptr[3 * grid_len], zp, scale)) * 2.0;
box_x = (box_x + j) * (float)stride;
box_y = (box_y + i) * (float)stride;
box_w = box_w * box_w * (float)anchor[a * 2];
@ -222,7 +221,7 @@ static int process(int8_t* input, int* anchor, int grid_h, int grid_w, int heigh @@ -222,7 +221,7 @@ static int process(int8_t* input, int* anchor, int grid_h, int grid_w, int heigh
}
}
if (maxClassProbs>thres_i8){
objProbs.push_back(sigmoid(deqnt_affine_to_f32(maxClassProbs, zp, scale))* sigmoid(deqnt_affine_to_f32(box_confidence, zp, scale)));
objProbs.push_back((deqnt_affine_to_f32(maxClassProbs, zp, scale))* (deqnt_affine_to_f32(box_confidence, zp, scale)));
classId.push_back(maxClassId);
validCount++;
boxes.push_back(box_x);

16
examples/rknn_yolov5_demo/utils/mpp_decoder.cpp

@ -6,8 +6,8 @@ @@ -6,8 +6,8 @@
#include <pthread.h>
#include <sys/syscall.h>
// #define LOGD printf
#define LOGD
#define LOGD printf
// #define LOGD
static unsigned long GetCurrentTimeMS() {
struct timeval tv;
@ -124,12 +124,12 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos) @@ -124,12 +124,12 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos)
MPP_RET ret = MPP_OK;
MppCtx ctx = data->ctx;
MppApi *mpi = data->mpi;
size_t read_size = 0;
size_t packet_size = data->packet_size;
LOGD("receive packet size=%d ", pkt_size);
if (packet == NULL) {
ret = mpp_packet_init(&packet, NULL, 0);
}
@ -144,7 +144,7 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos) @@ -144,7 +144,7 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos)
if (pkt_eos)
mpp_packet_set_eos(packet);
do {
RK_S32 times = 5;
// send the packet first if packet is not done
if (!pkt_done) {
@ -167,12 +167,12 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos) @@ -167,12 +167,12 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos)
}
LOGD("decode_get_frame failed too much time ");
}
if (MPP_OK != ret) {
LOGD("decode_get_frame failed ret %d ", ret);
break;
}
if (frame) {
RK_U32 hor_stride = mpp_frame_get_hor_stride(frame);
RK_U32 ver_stride = mpp_frame_get_ver_stride(frame);
@ -182,7 +182,7 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos) @@ -182,7 +182,7 @@ int MppDecoder::Decode(uint8_t* pkt_data, int pkt_size, int pkt_eos)
RK_S64 pts = mpp_frame_get_pts(frame);
RK_S64 dts = mpp_frame_get_dts(frame);
LOGD("decoder require buffer w:h [%d:%d] stride [%d:%d] buf_size %d pts=%ld dts=%ld ",
LOGD("decoder require buffer w:h [%d:%d] stride [%d:%d] buf_size %d pts=%lld dts=%lld ",
hor_width, ver_height, hor_stride, ver_stride, buf_size, pts, dts);
if (mpp_frame_get_info_change(frame)) {

3
examples/rknn_yolov5_demo/utils/mpp_encoder.cpp

@ -7,7 +7,7 @@ @@ -7,7 +7,7 @@
#define MPP_ALIGN(x, a) (((x)+(a)-1)&~((a)-1))
#define SZ_4K 4096
#define LOGD printf
// #define LOGD
// #define LOGD
#define LOGE printf
int MppEncoder::InitParams(MppEncoderParams& params)
@ -455,6 +455,7 @@ MPP_TEST_OUT: @@ -455,6 +455,7 @@ MPP_TEST_OUT:
int MppEncoder::SetCallback(MppEncoderFrameCallback callback) {
this->callback = callback;
return 0;
}
int MppEncoder::GetHeader(char* enc_buf, int max_size) {

BIN
runtime/RK356X/Android/librknn_api/arm64-v8a/librknnrt.so

Binary file not shown.

BIN
runtime/RK356X/Android/librknn_api/armeabi-v7a/librknnrt.so

Binary file not shown.

29
runtime/RK356X/Android/librknn_api/include/rknn_api.h

@ -59,6 +59,12 @@ extern "C" { @@ -59,6 +59,12 @@ extern "C" {
/* dummy init flag: could only get total_weight_size and total_internal_size by rknn_query*/
#define RKNN_FLAG_COLLECT_MODEL_INFO_ONLY 0x00000100
/* set GPU as the preferred execution backend When the operator is not supported by the NPU */
#define RKNN_FLAG_EXECUTE_FALLBACK_PRIOR_DEVICE_GPU 0x00000400
/* allocate internal memory in outside */
#define RKNN_FLAG_INTERNAL_ALLOC_OUTSIDE 0x00000200
/*
Error code returned by the RKNN API.
*/
@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd { @@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd {
RKNN_QUERY_CURRENT_INPUT_ATTR = 14, /* query the current shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_OUTPUT_ATTR = 15, /* query the current shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_INPUT_ATTR = 16, /* query the current native shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_OUTPUT_ATTR = 17, /* query the current native shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CMD_MAX
} rknn_query_cmd;
@ -204,8 +214,8 @@ typedef enum _rknn_core_mask { @@ -204,8 +214,8 @@ typedef enum _rknn_core_mask {
RKNN_NPU_CORE_0 = 1, /* run on NPU core 0. */
RKNN_NPU_CORE_1 = 2, /* run on NPU core 1. */
RKNN_NPU_CORE_2 = 4, /* run on NPU core 2. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 1 and core 2. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 1 and core 2 and core 3. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 0 and core 1. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 0 and core 1 and core 2. */
RKNN_NPU_CORE_UNDEFINED,
} rknn_core_mask;
@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem); @@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem);
*/
int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *attr);
/* rknn_set_input_shape
/* rknn_set_input_shape(deprecated)
set the input tensor shape (only valid for dynamic shape rknn model).
@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at @@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at
*/
int rknn_set_input_shape(rknn_context ctx, rknn_tensor_attr* attr);
/* rknn_set_input_shapes
set all the input tensor shapes. graph will run under current set of input shapes after rknn_set_input_shapes.(only valid for dynamic shape rknn model).
input:
rknn_context ctx the handle of context.
uint32_t n_inputs the number of inputs.
rknn_tensor_attr attr[] the attribute array of all input tensors.
return:
int error code.
*/
int rknn_set_input_shapes(rknn_context ctx, uint32_t n_inputs, rknn_tensor_attr attr[]);
#ifdef __cplusplus
} //extern "C"
#endif

BIN
runtime/RK356X/Android/rknn_server/arm/rknn_server

Binary file not shown.

BIN
runtime/RK356X/Android/rknn_server/arm64/rknn_server

Binary file not shown.

BIN
runtime/RK356X/Linux/librknn_api/aarch64/librknnrt.so

Binary file not shown.

BIN
runtime/RK356X/Linux/librknn_api/armhf/librknnrt.so

Binary file not shown.

29
runtime/RK356X/Linux/librknn_api/include/rknn_api.h

@ -59,6 +59,12 @@ extern "C" { @@ -59,6 +59,12 @@ extern "C" {
/* dummy init flag: could only get total_weight_size and total_internal_size by rknn_query*/
#define RKNN_FLAG_COLLECT_MODEL_INFO_ONLY 0x00000100
/* set GPU as the preferred execution backend When the operator is not supported by the NPU */
#define RKNN_FLAG_EXECUTE_FALLBACK_PRIOR_DEVICE_GPU 0x00000400
/* allocate internal memory in outside */
#define RKNN_FLAG_INTERNAL_ALLOC_OUTSIDE 0x00000200
/*
Error code returned by the RKNN API.
*/
@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd { @@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd {
RKNN_QUERY_CURRENT_INPUT_ATTR = 14, /* query the current shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_OUTPUT_ATTR = 15, /* query the current shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_INPUT_ATTR = 16, /* query the current native shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_OUTPUT_ATTR = 17, /* query the current native shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CMD_MAX
} rknn_query_cmd;
@ -204,8 +214,8 @@ typedef enum _rknn_core_mask { @@ -204,8 +214,8 @@ typedef enum _rknn_core_mask {
RKNN_NPU_CORE_0 = 1, /* run on NPU core 0. */
RKNN_NPU_CORE_1 = 2, /* run on NPU core 1. */
RKNN_NPU_CORE_2 = 4, /* run on NPU core 2. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 1 and core 2. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 1 and core 2 and core 3. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 0 and core 1. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 0 and core 1 and core 2. */
RKNN_NPU_CORE_UNDEFINED,
} rknn_core_mask;
@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem); @@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem);
*/
int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *attr);
/* rknn_set_input_shape
/* rknn_set_input_shape(deprecated)
set the input tensor shape (only valid for dynamic shape rknn model).
@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at @@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at
*/
int rknn_set_input_shape(rknn_context ctx, rknn_tensor_attr* attr);
/* rknn_set_input_shapes
set all the input tensor shapes. graph will run under current set of input shapes after rknn_set_input_shapes.(only valid for dynamic shape rknn model).
input:
rknn_context ctx the handle of context.
uint32_t n_inputs the number of inputs.
rknn_tensor_attr attr[] the attribute array of all input tensors.
return:
int error code.
*/
int rknn_set_input_shapes(rknn_context ctx, uint32_t n_inputs, rknn_tensor_attr attr[]);
#ifdef __cplusplus
} //extern "C"
#endif

BIN
runtime/RK356X/Linux/rknn_server/aarch64/usr/bin/rknn_server

Binary file not shown.

BIN
runtime/RK356X/Linux/rknn_server/armhf/usr/bin/rknn_server

Binary file not shown.

BIN
runtime/RK3588/Android/librknn_api/arm64-v8a/librknnrt.so

Binary file not shown.

BIN
runtime/RK3588/Android/librknn_api/armeabi-v7a/librknnrt.so

Binary file not shown.

29
runtime/RK3588/Android/librknn_api/include/rknn_api.h

@ -59,6 +59,12 @@ extern "C" { @@ -59,6 +59,12 @@ extern "C" {
/* dummy init flag: could only get total_weight_size and total_internal_size by rknn_query*/
#define RKNN_FLAG_COLLECT_MODEL_INFO_ONLY 0x00000100
/* set GPU as the preferred execution backend When the operator is not supported by the NPU */
#define RKNN_FLAG_EXECUTE_FALLBACK_PRIOR_DEVICE_GPU 0x00000400
/* allocate internal memory in outside */
#define RKNN_FLAG_INTERNAL_ALLOC_OUTSIDE 0x00000200
/*
Error code returned by the RKNN API.
*/
@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd { @@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd {
RKNN_QUERY_CURRENT_INPUT_ATTR = 14, /* query the current shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_OUTPUT_ATTR = 15, /* query the current shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_INPUT_ATTR = 16, /* query the current native shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_OUTPUT_ATTR = 17, /* query the current native shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CMD_MAX
} rknn_query_cmd;
@ -204,8 +214,8 @@ typedef enum _rknn_core_mask { @@ -204,8 +214,8 @@ typedef enum _rknn_core_mask {
RKNN_NPU_CORE_0 = 1, /* run on NPU core 0. */
RKNN_NPU_CORE_1 = 2, /* run on NPU core 1. */
RKNN_NPU_CORE_2 = 4, /* run on NPU core 2. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 1 and core 2. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 1 and core 2 and core 3. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 0 and core 1. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 0 and core 1 and core 2. */
RKNN_NPU_CORE_UNDEFINED,
} rknn_core_mask;
@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem); @@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem);
*/
int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *attr);
/* rknn_set_input_shape
/* rknn_set_input_shape(deprecated)
set the input tensor shape (only valid for dynamic shape rknn model).
@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at @@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at
*/
int rknn_set_input_shape(rknn_context ctx, rknn_tensor_attr* attr);
/* rknn_set_input_shapes
set all the input tensor shapes. graph will run under current set of input shapes after rknn_set_input_shapes.(only valid for dynamic shape rknn model).
input:
rknn_context ctx the handle of context.
uint32_t n_inputs the number of inputs.
rknn_tensor_attr attr[] the attribute array of all input tensors.
return:
int error code.
*/
int rknn_set_input_shapes(rknn_context ctx, uint32_t n_inputs, rknn_tensor_attr attr[]);
#ifdef __cplusplus
} //extern "C"
#endif

BIN
runtime/RK3588/Android/rknn_server/arm/rknn_server

Binary file not shown.

BIN
runtime/RK3588/Android/rknn_server/arm64/rknn_server

Binary file not shown.

BIN
runtime/RK3588/Linux/librknn_api/aarch64/librknnrt.so

Binary file not shown.

BIN
runtime/RK3588/Linux/librknn_api/armhf/librknnrt.so

Binary file not shown.

29
runtime/RK3588/Linux/librknn_api/include/rknn_api.h

@ -59,6 +59,12 @@ extern "C" { @@ -59,6 +59,12 @@ extern "C" {
/* dummy init flag: could only get total_weight_size and total_internal_size by rknn_query*/
#define RKNN_FLAG_COLLECT_MODEL_INFO_ONLY 0x00000100
/* set GPU as the preferred execution backend When the operator is not supported by the NPU */
#define RKNN_FLAG_EXECUTE_FALLBACK_PRIOR_DEVICE_GPU 0x00000400
/* allocate internal memory in outside */
#define RKNN_FLAG_INTERNAL_ALLOC_OUTSIDE 0x00000200
/*
Error code returned by the RKNN API.
*/
@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd { @@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd {
RKNN_QUERY_CURRENT_INPUT_ATTR = 14, /* query the current shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_OUTPUT_ATTR = 15, /* query the current shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_INPUT_ATTR = 16, /* query the current native shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_OUTPUT_ATTR = 17, /* query the current native shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CMD_MAX
} rknn_query_cmd;
@ -204,8 +214,8 @@ typedef enum _rknn_core_mask { @@ -204,8 +214,8 @@ typedef enum _rknn_core_mask {
RKNN_NPU_CORE_0 = 1, /* run on NPU core 0. */
RKNN_NPU_CORE_1 = 2, /* run on NPU core 1. */
RKNN_NPU_CORE_2 = 4, /* run on NPU core 2. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 1 and core 2. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 1 and core 2 and core 3. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 0 and core 1. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 0 and core 1 and core 2. */
RKNN_NPU_CORE_UNDEFINED,
} rknn_core_mask;
@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem); @@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem);
*/
int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *attr);
/* rknn_set_input_shape
/* rknn_set_input_shape(deprecated)
set the input tensor shape (only valid for dynamic shape rknn model).
@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at @@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at
*/
int rknn_set_input_shape(rknn_context ctx, rknn_tensor_attr* attr);
/* rknn_set_input_shapes
set all the input tensor shapes. graph will run under current set of input shapes after rknn_set_input_shapes.(only valid for dynamic shape rknn model).
input:
rknn_context ctx the handle of context.
uint32_t n_inputs the number of inputs.
rknn_tensor_attr attr[] the attribute array of all input tensors.
return:
int error code.
*/
int rknn_set_input_shapes(rknn_context ctx, uint32_t n_inputs, rknn_tensor_attr attr[]);
#ifdef __cplusplus
} //extern "C"
#endif

BIN
runtime/RK3588/Linux/rknn_server/aarch64/usr/bin/rknn_server

Binary file not shown.

BIN
runtime/RK3588/Linux/rknn_server/armhf/usr/bin/rknn_server

Binary file not shown.

BIN
runtime/RV1106/Linux/librknn_api/armhf/librknnmrt.a

Binary file not shown.

BIN
runtime/RV1106/Linux/librknn_api/armhf/librknnmrt.so

Binary file not shown.

29
runtime/RV1106/Linux/librknn_api/include/rknn_api.h

@ -59,6 +59,12 @@ extern "C" { @@ -59,6 +59,12 @@ extern "C" {
/* dummy init flag: could only get total_weight_size and total_internal_size by rknn_query*/
#define RKNN_FLAG_COLLECT_MODEL_INFO_ONLY 0x00000100
/* set GPU as the preferred execution backend When the operator is not supported by the NPU */
#define RKNN_FLAG_EXECUTE_FALLBACK_PRIOR_DEVICE_GPU 0x00000400
/* allocate internal memory in outside */
#define RKNN_FLAG_INTERNAL_ALLOC_OUTSIDE 0x00000200
/*
Error code returned by the RKNN API.
*/
@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd { @@ -125,6 +131,10 @@ typedef enum _rknn_query_cmd {
RKNN_QUERY_CURRENT_INPUT_ATTR = 14, /* query the current shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_OUTPUT_ATTR = 15, /* query the current shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_INPUT_ATTR = 16, /* query the current native shape of rknn input tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CURRENT_NATIVE_OUTPUT_ATTR = 17, /* query the current native shape of rknn output tensor, only valid for dynamic rknn model*/
RKNN_QUERY_CMD_MAX
} rknn_query_cmd;
@ -204,8 +214,8 @@ typedef enum _rknn_core_mask { @@ -204,8 +214,8 @@ typedef enum _rknn_core_mask {
RKNN_NPU_CORE_0 = 1, /* run on NPU core 0. */
RKNN_NPU_CORE_1 = 2, /* run on NPU core 1. */
RKNN_NPU_CORE_2 = 4, /* run on NPU core 2. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 1 and core 2. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 1 and core 2 and core 3. */
RKNN_NPU_CORE_0_1 = RKNN_NPU_CORE_0 | RKNN_NPU_CORE_1, /* run on NPU core 0 and core 1. */
RKNN_NPU_CORE_0_1_2 = RKNN_NPU_CORE_0_1 | RKNN_NPU_CORE_2, /* run on NPU core 0 and core 1 and core 2. */
RKNN_NPU_CORE_UNDEFINED,
} rknn_core_mask;
@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem); @@ -678,7 +688,7 @@ int rknn_set_internal_mem(rknn_context ctx, rknn_tensor_mem *mem);
*/
int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *attr);
/* rknn_set_input_shape
/* rknn_set_input_shape(deprecated)
set the input tensor shape (only valid for dynamic shape rknn model).
@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at @@ -690,6 +700,19 @@ int rknn_set_io_mem(rknn_context ctx, rknn_tensor_mem *mem, rknn_tensor_attr *at
*/
int rknn_set_input_shape(rknn_context ctx, rknn_tensor_attr* attr);
/* rknn_set_input_shapes
set all the input tensor shapes. graph will run under current set of input shapes after rknn_set_input_shapes.(only valid for dynamic shape rknn model).
input:
rknn_context ctx the handle of context.
uint32_t n_inputs the number of inputs.
rknn_tensor_attr attr[] the attribute array of all input tensors.
return:
int error code.
*/
int rknn_set_input_shapes(rknn_context ctx, uint32_t n_inputs, rknn_tensor_attr attr[]);
#ifdef __cplusplus
} //extern "C"
#endif

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save