human/render/video_render.py
2024-11-02 21:14:54 +08:00

72 lines
2.5 KiB
Python

#encoding = utf8
import copy
import time
from queue import Empty
from enum import Enum
import cv2
import numpy as np
from .base_render import BaseRender
from human.message_type import MessageType
class VideoRender(BaseRender):
def __init__(self, play_clock, context, human_render):
super().__init__(play_clock, context, 'Video', 0.038, "VideoRenderThread")
self._human_render = human_render
self._diff_avg_count = 0
def _run_step(self):
while self._exit_event.is_set():
try:
value = self._queue.get()
if value is None:
return
frame, ps = value
res_frame, idx, type_ = frame
except Empty:
return
clock_time = self._play_clock.clock_time()
time_difference = clock_time - ps
if abs(time_difference) > self._play_clock.audio_diff_threshold:
if self._diff_avg_count < 3:
self._diff_avg_count += 1
else:
if time_difference < -self._play_clock.audio_diff_threshold:
sleep_time = abs(time_difference)
print("Video frame waiting to catch up with audio", sleep_time)
if sleep_time <= 1.0:
time.sleep(sleep_time)
# elif time_difference > self._play_clock.audio_diff_threshold: # 视频比音频快超过10ms
# print("Video frame dropped to catch up with audio")
# continue
else:
self._diff_avg_count = 0
print('video render:',
'get face', self._queue.size(),
'audio queue', self._human_render.get_audio_queue_size())
if type_ == 0:
combine_frame = self._context.frame_list_cycle[idx]
else:
bbox = self._context.coord_list_cycle[idx]
combine_frame = copy.deepcopy(self._context.frame_list_cycle[idx])
y1, y2, x1, x2 = bbox
try:
res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
except:
print('resize error')
return
combine_frame[y1:y2, x1:x2] = res_frame
image = combine_frame
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self._human_render is not None:
self._human_render.put_image(image)
return