K230更改为CSI0出现RuntimeError: MediaManager, vb config failed(-1610317806)

Viewed 371

重现步骤
eb1306d8944a3f47f8918361e6975a96.png
有时反复把按几次k230的reset键,有时把CanMN IDE关闭重新启动,又不出现这个问题。再切换运行另一个demo程序,可能出现这个问题,转回来又出现这个问题。
代码如下:

实验名称:物体检测(基于yolov8n)
实验平台:01Studio CanMV K230
教程:wiki.01studio.cc
'''

from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d

from media.sensor import * #导入sensor模块,使用摄像头相关接口

import os
import ujson
from media.media import *
from time import *
import nncase_runtime as nn
import ulab.numpy as np
import time
import utime
import image
import random
import gc
import sys
import aidemo

自定义YOLOv8检测类

class ObjectDetectionApp(AIBase):
def init(self,kmodel_path,labels,model_input_size,max_boxes_num,confidence_threshold=0.5,nms_threshold=0.2,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0):
super().init(kmodel_path,model_input_size,rgb888p_size,debug_mode)
self.kmodel_path=kmodel_path
self.labels=labels
# 模型输入分辨率
self.model_input_size=model_input_size
# 阈值设置
self.confidence_threshold=confidence_threshold
self.nms_threshold=nms_threshold
self.max_boxes_num=max_boxes_num
# sensor给到AI的图像分辨率
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
# 显示分辨率
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
self.debug_mode=debug_mode
# 检测框预置颜色值
self.color_four=[(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230),
(255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70),
(255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0),
(255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255),
(255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157)]
# 宽高缩放比例
self.x_factor = float(self.rgb888p_size[0])/self.model_input_size[0]
self.y_factor = float(self.rgb888p_size[1])/self.model_input_size[1]
# Ai2d实例,用于实现模型预处理
self.ai2d=Ai2d(debug_mode)
# 设置Ai2d的输入输出格式和类型
self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)

# 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看
def config_preprocess(self,input_image_size=None):
    with ScopedTiming("set preprocess config",self.debug_mode > 0):
        # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸
        ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
        self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
        self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])

# 自定义当前任务的后处理
def postprocess(self,results):
    with ScopedTiming("postprocess",self.debug_mode > 0):
        result=results[0]
        result = result.reshape((result.shape[0] * result.shape[1], result.shape[2]))
        output_data = result.transpose()
        boxes_ori = output_data[:,0:4]
        scores_ori = output_data[:,4:]
        confs_ori = np.max(scores_ori,axis=-1)
        inds_ori = np.argmax(scores_ori,axis=-1)
        boxes,scores,inds = [],[],[]
        for i in range(len(boxes_ori)):
            if confs_ori[i] > confidence_threshold:
                scores.append(confs_ori[i])
                inds.append(inds_ori[i])
                x = boxes_ori[i,0]
                y = boxes_ori[i,1]
                w = boxes_ori[i,2]
                h = boxes_ori[i,3]
                left = int((x - 0.5 * w) * self.x_factor)
                top = int((y - 0.5 * h) * self.y_factor)
                right = int((x + 0.5 * w) * self.x_factor)
                bottom = int((y + 0.5 * h) * self.y_factor)
                boxes.append([left,top,right,bottom])
        if len(boxes)==0:
            return []
        boxes = np.array(boxes)
        scores = np.array(scores)
        inds = np.array(inds)
        # NMS过程
        keep = self.nms(boxes,scores,nms_threshold)
        dets = np.concatenate((boxes, scores.reshape((len(boxes),1)), inds.reshape((len(boxes),1))), axis=1)
        dets_out = []
        for keep_i in keep:
            dets_out.append(dets[keep_i])
        dets_out = np.array(dets_out)
        dets_out = dets_out[:self.max_boxes_num, :]
        return dets_out

# 绘制结果
def draw_result(self,pl,dets):
    with ScopedTiming("display_draw",self.debug_mode >0):
        if dets:
            pl.osd_img.clear()
            for det in dets:
                x1, y1, x2, y2 = map(lambda x: int(round(x, 0)), det[:4])
                x= x1*self.display_size[0] // self.rgb888p_size[0]
                y= y1*self.display_size[1] // self.rgb888p_size[1]
                w = (x2 - x1) * self.display_size[0] // self.rgb888p_size[0]
                h = (y2 - y1) * self.display_size[1] // self.rgb888p_size[1]
                pl.osd_img.draw_rectangle(x,y, w, h, color=self.get_color(int(det[5])),thickness=4)
                pl.osd_img.draw_string_advanced( x , y-50,32," " + self.labels[int(det[5])] + " " + str(round(det[4],2)) , color=self.get_color(int(det[5])))
        else:
            pl.osd_img.clear()


# 多目标检测 非最大值抑制方法实现
def nms(self,boxes,scores,thresh):
    """Pure Python NMS baseline."""
    x1,y1,x2,y2 = boxes[:, 0],boxes[:, 1],boxes[:, 2],boxes[:, 3]
    areas = (x2 - x1 + 1) * (y2 - y1 + 1)
    order = np.argsort(scores,axis = 0)[::-1]
    keep = []
    while order.size > 0:
        i = order[0]
        keep.append(i)
        new_x1,new_y1,new_x2,new_y2,new_areas = [],[],[],[],[]
        for order_i in order:
            new_x1.append(x1[order_i])
            new_x2.append(x2[order_i])
            new_y1.append(y1[order_i])
            new_y2.append(y2[order_i])
            new_areas.append(areas[order_i])
        new_x1 = np.array(new_x1)
        new_x2 = np.array(new_x2)
        new_y1 = np.array(new_y1)
        new_y2 = np.array(new_y2)
        xx1 = np.maximum(x1[i], new_x1)
        yy1 = np.maximum(y1[i], new_y1)
        xx2 = np.minimum(x2[i], new_x2)
        yy2 = np.minimum(y2[i], new_y2)
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        new_areas = np.array(new_areas)
        ovr = inter / (areas[i] + new_areas - inter)
        new_order = []
        for ovr_i,ind in enumerate(ovr):
            if ind < thresh:
                new_order.append(order[ovr_i])
        order = np.array(new_order,dtype=np.uint8)
    return keep

# 根据当前类别索引获取框的颜色
def get_color(self, x):
    idx=x%len(self.color_four)
    return self.color_four[idx]

if name=="main":
# 显示模式,默认"hdmi",可以选择"hdmi"和"lcd"
display_mode="lcd"
if display_mode=="hdmi":
display_size=[1920,1080]
else:
display_size=[800,480]
# 模型路径
kmodel_path="/sdcard/app/tests/kmodel/yolov8n_320.kmodel"
labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# 其它参数设置
confidence_threshold = 0.2
nms_threshold = 0.2
max_boxes_num = 50
rgb888p_size=[320,320]

# 初始化PipeLine
pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode)
sensor = Sensor(id=0) #构建摄像头对象
pl.create(sensor)
# 初始化自定义目标检测实例
ob_det=ObjectDetectionApp(kmodel_path,labels=labels,model_input_size=[320,320],max_boxes_num=max_boxes_num,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0)
ob_det.config_preprocess()

clock = time.clock()

try:
    while True:

        os.exitpoint()

        clock.tick()

        img=pl.get_frame() # 获取当前帧数据
        res=ob_det.run(img) # 推理当前帧
        ob_det.draw_result(pl,res) # 绘制结果到PipeLine的osd图像
        print(res)  # 打印当前结果
        pl.show_image() # 显示当前的绘制结果
        gc.collect()

        print(clock.fps()) #打印帧率

except Exception as e:
    sys.print_exception(e)
finally:
    ob_det.deinit()
    pl.destroy()

步骤是什么?请使用1、2、3描述 -->

期待结果和实际结果

软硬件版本信息

错误日志

尝试解决过程

补充材料

改一下代码,用markdown形式

# your code

将代码包起来,你这种没办法跑

2 Answers

这个是目前canmv的限制,需要使用

try:
    xxxx
catch:
    xxxx

来保证内存被正确释放,后续会进行优化

还请针对这个例子,给一个具体的改造后的代码,谢谢!

可以参考我们的例程

发上来的这段代码就是例程,其他例程也类似。没有见到你说的 try:xxx catch:xxx,不知道catch 什么?catch完做什么?

image.png!点击关闭,再重启还会出现image.pngimage.png出现这些莫名奇妙问题原因是板子对摄像头的供电能力达不到摄像头的要求。