如何提升K230图像处理帧率

Viewed 153

我使用K230 CAMMV同时识别一个激光和多个有颜色的小球
通过修改函数的参数尽可能提升了帧率
有别的方法可以提升帧率吗,比如多线程

 Single Color Code Tracking Example
#
# This example shows off single color code tracking using the CanMV Cam.
#
# A color code is a blob composed of two or more colors. The example below will
# only track colored objects which have both the colors below in them.
import time, os, gc, sys, math

from media.sensor import *
from media.display import *
from media.media import *
import _thread
DETECT_WIDTH = 320
DETECT_HEIGHT = 240

# Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max)
# The below thresholds track in general red/green things. You may wish to tune them...
thresholds = [(30, 100, 4, 73, -7, 15),(42, 100, -19, 40, 41, 65)] # generic_green_thresholds -> index is 1 so code == (1 << 1)
# Codes are or'ed together when "merge=True" for "find_blobs"

# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes.

sensor = None

lock = _thread.allocate_lock()

stop_flag = False


def red_light_thread():
    global sensor,DETECT_WIDTH,DETECT_HEIGHT,stop_flag,fps,img
    while not stop_flag:
        fps.tick()
        # check if should exit.
        os.exitpoint()
        with lock:
            img = sensor.snapshot()

        for c in img.find_circles(
                threshold=1500,  # 设置霍夫变换的阈值。值越大,只有强度更高的圆会被检测到。此值可以根据实际场景调整。较高的值意味着需要更明显的圆形才能被检测到
                x_margin=10,  # x方向的合并误差范围,增大会使相近的圆合并。
                y_margin=10,  # y方向的合并误差范围,增大会使相近的圆合并。
                r_margin=10,  # 半径方向的合并误差范围,增大会使半径相近的圆合并。
                r_min=1,  # 设置检测的最小圆半径
                r_max=10,  # 设置检测的最大圆半径 单位是像素。100 像素对应多少毫米是一个动态计算的问题,需要根据具体的摄像头视场角、分辨率和物体距离来调整。 具体可以借助AI计算和了解
                r_step=1  # 设置检测半径时的步长,步长越小,检测的圆的精度越高,但性能消耗较大
            ):
                # 计算圆形的外接矩形区域,这样可以方便获取圆的统计信息
            area = (c.x() - c.r(), c.y() - c.r(), 2 * c.r(), 2 * c.r())  # (x, y, width, height)

                        # 获取该区域内的像素颜色统计信息
            blob_red = img.find_blobs(thresholds,roi=area)  # 获取外接矩形区域的像素统计信息(颜色分布)

            if blob_red:
                img.draw_circle(c.x(), c.y(), c.r(), color=(0, 0, 0),thickness=2,fill=True)  # 银光绿色圆框
               
        if stop_flag:
            break
        with lock:
            Display.show_image(img)
            print(fps.fps())

        gc.collect()
    MediaManager.deinit()




def red_ball_thread():
    global sensor,DETECT_WIDTH,DETECT_HEIGHT,stop_flag,fps,img2
    while not stop_flag:
#        fps.tick()
        # check if should exit.
        os.exitpoint()
        with lock:
            img = sensor.snapshot()
        for c in img.find_circles(
                        threshold=5000,  # 设置霍夫变换的阈值。值越大,只有强度更高的圆会被检测到。此值可以根据实际场景调整。较高的值意味着需要更明显的圆形才能被检测到
                        x_margin=10,  # x方向的合并误差范围,增大会使相近的圆合并。
                        y_margin=10,  # y方向的合并误差范围,增大会使相近的圆合并。
                        r_margin=10,  # 半径方向的合并误差范围,增大会使半径相近的圆合并。
                        r_min=10,  # 设置检测的最小圆半径
                        r_max=40,  # 设置检测的最大圆半径 单位是像素。100 像素对应多少毫米是一个动态计算的问题,需要根据具体的摄像头视场角、分辨率和物体距离来调整。 具体可以借助AI计算和了解
                        r_step=2  # 设置检测半径时的步长,步长越小,检测的圆的精度越高,但性能消耗较大
            ):
                
            area = (c.x() - c.r(), c.y() - c.r(), 2 * c.r(), 2 * c.r())  # (x, y, width, height)
            yellow_ball = img.find_blobs([thresholds[1]],roi=area)  # 获取外接矩形区域的像素统计信息(颜色分布)
            
            if yellow_ball:
                img.draw_circle(c.x(), c.y(), c.r(), color=(0, 0, 255),thickness=2,fill=False)  # 银光绿色圆框
                        
        if stop_flag:
            break
        with lock:
            Display.show_image(img)
            print(fps.fps())

        gc.collect()
    MediaManager.deinit()




if __name__ == "__main__":
    # construct a Sensor object with default configure
    sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT)
    # sensor reset
    sensor.reset()
    # set hmirror
    # sensor.set_hmirror(False)
    # sensor vflip
    # sensor.set_vflip(False)
    # set chn0 output size
    sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT)
    # set chn0 output format
    sensor.set_pixformat(Sensor.RGB565)

    # use hdmi as display output, set to VGA
    # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True)

    # use hdmi as display output, set to 1080P
    # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True)

    # use lcd as display output
    # Display.init(Display.ST7701, to_ide = True)

    # use IDE as output
    Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100)

    # init media manager
    MediaManager.init()
    # sensor start run
    sensor.run()


    fps = time.clock()

    _thread.start_new_thread(red_ball_thread,())
    _thread.start_new_thread(red_light_thread,())
    try:
        while True:

            time.sleep_ms(50)

    except KeyboardInterrupt as e:
        print(f"user stop")
    except BaseException as e:
        print(f"Exception '{e}'")
        stop_flag = True
    finally:
        # sensor stop run
        if isinstance(sensor, Sensor):
            sensor.stop()
        # deinit display
        Display.deinit()

        os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
        time.sleep_ms(100)
        # release media buffer
        MediaManager.deinit()
        gc.collect()
1 Answers

你好,请贴一下你得代码,如果是在一个while中进行多种算法得识别,可以使用多线程得方式来加速,或者通过降低分辨率得方式来增加帧率。

你好,我重新贴出来了代码,我使用多线程之后,一方面检测效果变差了很多,另一方面多线程帧率只有10帧,使用串行有20帧

在线程得while中最好添加sleep,让出cpu。