K230 1.1.2 nncase 2.9.0
import os
import ujson
import aicube
from libs.PipeLine import ScopedTiming
from libs.Utils import *
from media.sensor import *
from media.display import *
from media.media import *
import nncase_runtime as nn
import ulab.numpy as np
import image
import gc
root_path="/sdcard/mp_deployment_source/" # root_path要以/结尾
config_path=root_path+"deploy_config.json"
deploy_conf={}
debug_mode=1
def read_img(img_path):
img_data = image.Image(img_path)
img_data_rgb888=img_data.to_rgb888()
img_hwc=img_data_rgb888.to_numpy_ref()
shape=img_hwc.shape
img_tmp = img_hwc.reshape((shape[0] * shape[1], shape[2]))
img_tmp_trans = img_tmp.transpose()
img_res=img_tmp_trans.copy()
img_return=img_res.reshape((shape[2],shape[0],shape[1]))
return img_return
# 读取deploy_config.json文件
def read_deploy_config(config_path):
with open(config_path, 'r') as json_file:
try:
config = ujson.load(json_file)
except ValueError as e:
print("JSON 解析错误:", e)
return config
class AI_Detector():
def __init__(self,origin_dir,machine_code,batch_size):
self.bs = batch_size
self.mc = machine_code
self.od = origin_dir
# 使用json读取内容初始化部署变量
deploy_conf=read_deploy_config(config_path)
kmodel_name=deploy_conf["kmodel_path"]
self.labels=deploy_conf["categories"]
self.confidence_threshold = deploy_conf["confidence_threshold"]
self.nms_threshold = deploy_conf["nms_threshold"]
self.model_input_size=[640, 640]
self.num_classes=deploy_conf["num_classes"]
self.color_four=get_colors(self.num_classes)
self.nms_option = deploy_conf["nms_option"]
self.anchors = deploy_conf["anchors"][0] + deploy_conf["anchors"][1] + deploy_conf["anchors"][2]
self.strides = [8,16,32]
# ai2d输入输出初始化
input_array = np.empty((self.bs, 3, 1080, 1920), dtype=np.uint8)
for i in range(0,batch_size):
image_partial_path = self.od + self.mc + f'_{i}.jpg'
image_partial = read_img(image_partial_path).reshape((1,3,1080,1920))
input_array[i] = image_partial
print(input_array.shape)
self.frame_size = [1920,1080]
self.ai2d_input_tensor = nn.from_numpy(input_array)
ai2d_input_shape=input_array.shape
data = np.ones((self.bs,3,640,640),dtype=np.uint8)
self.ai2d_output_tensor = nn.from_numpy(data)
# kpu初始化并加载模型
self.kpu = nn.kpu()
self.kpu.load_kmodel(root_path+kmodel_name)
# ai2d初始化
ai2d = nn.ai2d()
ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)
ai2d.set_pad_param(True, [0,0,0,0,140,140,0,0], 0, [114,114,114])
ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel )
self.ai2d_builder = ai2d.build([self.bs,3,1080,1920], [self.bs,3,640,640])
def get_result(self, origin_path, score_threshold, pos_threshold):
with ScopedTiming("total",debug_mode > 0):
# 使用ai2d实现预处理
self.ai2d_builder.run(self.ai2d_input_tensor, self.ai2d_output_tensor)
# 设置模型的输入tensor
self.kpu.set_input_tensor(0, self.ai2d_output_tensor)
# 模型推理
print('ready to start')
self.kpu.run()
# 获取模型输出
results = []
print('done',self.kpu.outputs_size())
output_numpy = self.ai2d_output_tensor.to_numpy()
print(output_numpy.shape)
for i in range(3):
data = self.kpu.get_output_tensor(i)
result = data.to_numpy()
print(result.shape)
del data
results.append(result)
# 后处理
det_boxes = aicube.anchorbasedet_post_process( results[0], results[1], results[2],
self.model_input_size, self.frame_size, self.strides, self.num_classes,
self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option)
# 绘制结果
print(det_boxes)
# image_draw=image.Image(image_path).to_rgb565()
# if det_boxes:
# for det_boxe in det_boxes:
# x1, y1, x2, y2 = det_boxe[2],det_boxe[3],det_boxe[4],det_boxe[5]
# x,y,w,h=int(x1),int(y1),int(x2 - x1),int((y2 - y1))
# image_draw.draw_rectangle(x, y, w, h, color=self.color_four[det_boxe[0]][1:])
# label = self.labels[det_boxe[0]]
# score = str(round(det_boxe[1],2))
# image_draw.draw_string_advanced( int(x1) , int(y1)-30,20, label + " " + score , color=color_four[det_boxe[0]][1:])
# print("\n" + label + " " + score)
# image_draw.compress_for_ide()
# image_draw.save(root_path+"det_result.jpg")
# else:
# print("No objects were identified.")
input_numpy = self.ai2d_input_tensor.to_numpy()
output_numpy = self.ai2d_output_tensor.to_numpy()
print(input_numpy.shape)
print(output_numpy.shape)
del self.ai2d_input_tensor
del self.ai2d_output_tensor
print("---------------end------------------")
nn.shrink_memory_pool()
gc.collect()
if __name__=="__main__":
detecter = AI_Detector('/sdcard/Origin/','143322',13)
detecter.get_result(1,1,1)
这个推理能支持多张图像吗 为什么改了batchsize输出仍是单通道的? 有没有佬解决一下 官方文档也没有相关说法