搞了一小段时间的单目深度估计,目标是在板端部署用起来,但由于基于开源数据或开源模型,将模型估计的相对深度转换成绝对深度误差非常大(或许是转换方法有问题),另一方面如何具体的在项目中用起来还没好的想法,因此暂且先束之高阁。本博客简单记录一下部署流程。
尝试过MiDaS的midas_v21_384模型和 AdelaiDepth 模型,没有rk不支持的算子,直接导出onnx,转换rknn,上板子测试。
1 导出onnx
这一步就不多说了,测试的时候在代码里加上保存onnx的代码。
print(torch.onnx.ir_version)print("=========== onnx =========== ")dummy_input1 = torch.randn(1, 3, 224, 384).cuda()input_names = ["data"]output_names = ["output"]model.eval()torch.onnx.export(model, dummy_input1, "./weights/midas_v21_384.onnx", verbose=True, input_names=input_names, output_names=output_names, opset_version=12)print("======================== convert onnx Finished! .... ")
2 转换rknn
import os
import urllib
import traceback
import time
import sys
import numpy as np
import cv2
from rknn.api import RKNN
import mathONNX_MODEL = './midas_v21_384.onnx'
RKNN_MODEL = './midas_v21_384.rknn'
DATASET = './images_list.txt'QUANTIZE_ON = True
input_height = 224
input_width = 384def export_rknn_inference(img):# Create RKNN objectrknn = RKNN(verbose=False)# pre-process configprint('--> Config model')rknn.config(mean_values=[[123.675, 116.28, 103.53]], std_values=[[58.395, 57.12, 57.375]], quantized_algorithm='normal', quantized_method='channel', target_platform='rk3588')print('done')# Load ONNX modelprint('--> Loading model')ret = rknn.load_onnx(model=ONNX_MODEL, outputs=["output"])if ret != 0:print('Load model failed!')exit(ret)print('done')# Build modelprint('--> Building model')ret = rknn.build(do_quantization=QUANTIZE_ON, dataset=DATASET, rknn_batch_size=1)if ret != 0:print('Build model failed!')exit(ret)print('done')# Export RKNN modelprint('--> Export rknn model')ret = rknn.export_rknn(RKNN_MODEL)if ret != 0:print('Export rknn model failed!')exit(ret)print('done')# Init runtime environmentprint('--> Init runtime environment')ret = rknn.init_runtime()# ret = rknn.init_runtime(target='rk3566')if ret != 0:print('Init runtime environment failed!')exit(ret)print('done')# Inferenceprint('--> Running model')outputs = rknn.inference(inputs=[img])rknn.release()print('done')return outputsif __name__ == '__main__':print('This is main ...')image_path = './test.jpg'origin_image = cv2.imread(image_path)image_height, image_width = origin_image.shape[:2]image = cv2.resize(origin_image, (input_width, input_height), interpolation=cv2.INTER_LINEAR)image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)image = np.expand_dims(image, 0)outputs = export_rknn_inference(image)depth = outputs[0][0]depth_min = depth.min()depth_max = depth.max()max_val = 255if depth_max - depth_min > np.finfo("float").eps:out = max_val * (depth - depth_min) / (depth_max - depth_min)out = cv2.applyColorMap(np.uint8(out), cv2.COLORMAP_INFERNO)result = cv2.resize(out, (image_width, image_height))cv2.imwrite("./test_depth.png", result)
3 测试效果
MiDaS效果
板端效果
由于推理的代码在之前的博客对应的仓库代码已放多次,且深度估计的后处理比较简单,完整的C++代码就不放了,只贴出后处理的部分代码。
int MiDaS::GetResult(int8_t **PtrBlob, std::vector<int> &qnt_zp, std::vector<float> &qnt_scale, cv::Mat &SegMask)
{int ret = 0;int8_t *Output = (int8_t *)PtrBlob[0];int QuantZp = qnt_zp[0];float QuantScale = qnt_scale[0];float DepthMin = 0;float DepthMax = 0;float DeQntValue = 0;for (int h = 0; h < InputHeight_; h++){for (int w = 0; w < InputWidth_; w++){DeQntValue = DeQnt2F32(Output[h * InputWidth_ + w], QuantZp, QuantScale);DepthMap_[h][w] = DeQntValue;if (h == 0 && w == 0){DepthMin = DeQntValue;DepthMax = DeQntValue;}else{if (DeQntValue < DepthMin){DepthMin = DeQntValue;}if (DeQntValue > DepthMax){DepthMax = DeQntValue;}}}}for (int h = 0; h < InputHeight_; h++){for (int w = 0; w < InputWidth_; w++){DeQntValue = DepthMap_[h][w];DepthMap_[h][w] = MaxValue_ * (DeQntValue - DepthMin) / (DepthMax - DepthMin);DepthMapGray_.at<uchar>(h, w) = static_cast<uchar>(int(DepthMap_[h][w]));}}cv::applyColorMap(DepthMapGray_, SegMask, cv::COLORMAP_AUTUMN);return ret;
}
AdelaiDepth效果
以上两个不用修改模型结构,可以直接上rk板子,其它(bts、ZoeDepth、DiverseDepth、Metric3D、monodepth2、DSINE)需要修改摸结构才能上板子的就不贴出了。