#encoding = utf8 import copy import time from queue import Empty import cv2 import numpy as np from .base_render import BaseRender class VideoRender(BaseRender): def __init__(self, play_clock, context, human_render): super().__init__(play_clock, 0.02) self._context = context self._human_render = human_render def _run_step(self): try: frame, ps = self._queue.get(block=True, timeout=0.01) res_frame, idx, type_ = frame print('video render queue size', self._queue.qsize()) except Empty: return if type_ == 0: combine_frame = self._context.frame_list_cycle[idx] else: print('get face', self._queue.qsize()) bbox = self._context.coord_list_cycle[idx] combine_frame = copy.deepcopy(self._context.frame_list_cycle[idx]) y1, y2, x1, x2 = bbox try: res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1)) except: print('resize error') return # combine_frame = get_image(ori_frame,res_frame,bbox) # t=time.perf_counter() combine_frame[y1:y2, x1:x2] = res_frame clock_time = self._play_clock.clock_time() time_difference = clock_time - ps print('video render:', ps, ' ', clock_time, ' ', time_difference) if time_difference < -0.01: # 音频比视频快超过10ms sleep_time = abs(time_difference + 0.01) print("Video frame waiting to catch up with audio", sleep_time) if sleep_time > 0: time.sleep(sleep_time) # 只在正值时调用 sleep return # 继续等待 elif time_difference < -0.01: # 视频比音频快超过10ms print("Video frame dropped to catch up with audio") return # 丢帧 # if time_difference > self._play_clock.audio_diff_threshold: # # print('video is slow') # return # elif time_difference < self._play_clock.audio_diff_threshold: image = combine_frame image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if self._human_render is not None: self._human_render.put_image(image)