K230使用两个摄像头的速度很慢是怎么回事

Viewed 221

1 使用东山派k230板子(MicroPython v1.1 on 2024-11-20; k230_canmv_dongshanpi with K230),csi0和csi2都是gc2093的摄像头,打算分别跑颜色识别和二维码识别的python(CanMV IDE)程序
2 单独只使用一个摄像头的时候帧率接近50
3 可是只要使用两个摄像头,sensor1.run()后帧率只有11帧左右了,不应该影响那么大吧?什么算法都没有跑呢
4 还有就是一个摄像头很稳定,两个摄像头同时跑容易出现snapshot fail,加了sleep好点,但是时间一长还是会出现,电源应该没有问题
5 另外二维码识别比AprilTag识别速度快,但同样环境识别成功率低?

代码(以下是屏蔽了一个摄像头的,打开帧率就下降为之前的1/4左右了):

import time, os, gc, sys, math
from machine import UART
from machine import FPIOA

from media.sensor import *
from media.display import *
from media.media import *
tag_families = 0
#tag_families |= image.TAG16H5 # comment out to disable this family
#tag_families |= image.TAG25H7 # comment out to disable this family
#tag_families |= image.TAG25H9 # comment out to disable this family
#tag_families |= image.TAG36H10 # comment out to disable this family
tag_families |= image.TAG36H11 # comment out to disable this family (default family)
tag_families |= image.ARTOOLKIT # comment out to disable this family
def family_name(tag):
    if(tag.family() == image.TAG16H5):
        return "TAG16H5"
    if(tag.family() == image.TAG25H7):
        return "TAG25H7"
    if(tag.family() == image.TAG25H9):
        return "TAG25H9"
    if(tag.family() == image.TAG36H10):
        return "TAG36H10"
    if(tag.family() == image.TAG36H11):
        return "TAG36H11"
    if(tag.family() == image.ARTOOLKIT):
        return "ARTOOLKIT"

fpioa = FPIOA()
#pin3 设置为串口2发送管脚
fpioa.set_function(44, fpioa.UART2_TXD)
#设置pin4为串口2接收管脚
fpioa.set_function(45, fpioa.UART2_RXD)
#UART: baudrate 115200, 8bits, parity none, one stopbits
uart = UART(UART.UART2, baudrate=9600, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE)
#打印串口配置
print(uart)
# UART write
r = uart.write("UART test wwwwwwwwwwwww")
print(r)
# UART read
r = uart.read()
print(r)
# UART readline
r = uart.readline()
print(r)
# UART readinto
b = bytearray(8)
r = uart.readinto(b)
print(r) 
count = 0
DETECT_WIDTH1 = 640#640
DETECT_HEIGHT1 = 480#480
DETECT_WIDTH2 = 320#320
DETECT_HEIGHT2 = 240#240
GRAYSCALE_THRESHOLD = [(0, 100)]
ROIS = [ # [ROI, weight]
        (0, 50, 240, 20, 0.1), #0,100 You'll need to tweak the weights for your app
        (0, 100, 240, 20, 0.3), #0,50 depending on how your robot is setup.
        (0, 150, 240, 20, 0.7)  #0,0
       ]

weight_sum = 0
for r in ROIS: weight_sum += r[4] # r[4] is the roi weight.

sensor1 = None
sensor2 = None

try:
#    sensor1 = Sensor(id=2,width = DETECT_WIDTH1, height = DETECT_HEIGHT1)
#    sensor1.reset()
    sensor2 = Sensor(id=0,width = DETECT_WIDTH2, height = DETECT_HEIGHT2)
    sensor2.reset()
#    sensor1.set_framesize(width = DETECT_WIDTH1, height = DETECT_HEIGHT1)
    sensor2.set_framesize(width = DETECT_WIDTH2, height = DETECT_HEIGHT2)
#    sensor1.set_pixformat(Sensor.GRAYSCALE)
    sensor2.set_pixformat(Sensor.GRAYSCALE)
    #sensor2.set_pixformat(Sensor.RGB565)#二维码
    Display.init(Display.VIRT, width = DETECT_WIDTH1, height = DETECT_HEIGHT1, fps = 100)
    MediaManager.init()
#    sensor1.run()
    sensor2.run() #只需要一次run
    fps = time.clock()

    while True:
        fps.tick()

        # check if should exit.
        os.exitpoint()
#        time.sleep(0.001)
#        img1 = sensor1.snapshot()
        time.sleep(0.001)
        img2 = sensor2.snapshot()
       count=0
        centroid_sum = 0
        for r in ROIS:
            blobs = img2.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple.

            if blobs:
                # Find the blob with the most pixels.
                largest_blob = max(blobs, key=lambda b: b.pixels())
                # Draw a rect around the blob.
                #if largest_blob.w()>200:
                if largest_blob.w()<40  :
                    count = count+1
                    img2.draw_rectangle([v for v in largest_blob.rect()])
                    img2.draw_cross(largest_blob.cx(), largest_blob.cy())
                    centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight.
        center_pos = (centroid_sum / weight_sum) # Determine center of line.
        deflection_angle = 0
        deflection_angle = -math.atan((center_pos-80)/60)
        deflection_angle = math.degrees(deflection_angle)
        center_pos=int(center_pos)
        if count<3:
            center_pos=0
        print("Turn Angle: %f" % center_pos)  #deflection_angle

#        for code in img2.find_qrcodes():
#            rect = code.rect()
#            img2.draw_rectangle([v for v in rect], color=(255, 0, 0), thickness = 5)
#            img2.draw_string_advanced(rect[0], rect[1], 32, code.payload())
#            print(code)
        for tag in img2.find_apriltags(families=tag_families):
            img2.draw_rectangle([v for v in tag.rect()], color=(255, 0, 0))
            img2.draw_cross(tag.cx(), tag.cy(), color=(0, 255, 0))
            print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi)
            print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args)

         #uart.write("i={0:0>3}\r\n".format(center_pos)) #deflection_angle

        # draw result to screen
        Display.show_image(img2)
        gc.collect()

        print(int(fps.fps()))
except KeyboardInterrupt as e:
    print(f"user stop")
except BaseException as e:
    print(f"Exception '{e}'")
finally:
    # sensor stop run
    if isinstance(sensor1, Sensor):
        sensor1.stop()
    if isinstance(sensor2, Sensor):
        sensor2.stop()
    # deinit display
    Display.deinit()

    os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
    time.sleep_ms(100)

    # release media buffer
    MediaManager.deinit()


1 Answers

你好,可以提供一下测试代码吗?

已经提交了测试代码

你好,请使用 https://markdown.land/markdown-code-block 来贴代码,现在的格式全部不正确的,无法测试。

加了几个标点符号就格式正常了?