human/render/video_render.py

66 lines
2.3 KiB
Python
Raw Normal View History

2024-10-12 11:57:24 +00:00
#encoding = utf8
2024-10-22 11:57:30 +00:00
import copy
2024-10-23 09:44:33 +00:00
import time
2024-10-22 11:57:30 +00:00
from queue import Empty
import cv2
import numpy as np
2024-10-23 09:44:33 +00:00
from .base_render import BaseRender
2024-10-12 11:57:24 +00:00
2024-10-22 11:57:30 +00:00
class VideoRender(BaseRender):
def __init__(self, play_clock, context, human_render):
2024-10-23 09:44:33 +00:00
super().__init__(play_clock, 0.02)
2024-10-22 11:57:30 +00:00
self._context = context
self._human_render = human_render
2024-10-23 09:44:33 +00:00
def _run_step(self):
2024-10-22 11:57:30 +00:00
try:
2024-10-23 09:44:33 +00:00
frame, ps = self._queue.get(block=True, timeout=0.01)
res_frame, idx, type_ = frame
print('video render queue size', self._queue.qsize())
2024-10-22 11:57:30 +00:00
except Empty:
return
2024-10-23 09:44:33 +00:00
if type_ == 0:
2024-10-22 11:57:30 +00:00
combine_frame = self._context.frame_list_cycle[idx]
else:
2024-10-23 09:44:33 +00:00
print('get face', self._queue.qsize())
2024-10-22 11:57:30 +00:00
bbox = self._context.coord_list_cycle[idx]
combine_frame = copy.deepcopy(self._context.frame_list_cycle[idx])
y1, y2, x1, x2 = bbox
try:
res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
except:
2024-10-23 09:44:33 +00:00
print('resize error')
2024-10-22 11:57:30 +00:00
return
# combine_frame = get_image(ori_frame,res_frame,bbox)
# t=time.perf_counter()
combine_frame[y1:y2, x1:x2] = res_frame
clock_time = self._play_clock.clock_time()
2024-10-23 09:44:33 +00:00
time_difference = clock_time - ps
print('video render:', ps, ' ', clock_time, ' ', time_difference)
if time_difference < -0.01: # 音频比视频快超过10ms
sleep_time = abs(time_difference + 0.01)
print("Video frame waiting to catch up with audio", sleep_time)
if sleep_time > 0:
time.sleep(sleep_time) # 只在正值时调用 sleep
return # 继续等待
elif time_difference < -0.01: # 视频比音频快超过10ms
print("Video frame dropped to catch up with audio")
return # 丢帧
# if time_difference > self._play_clock.audio_diff_threshold:
# # print('video is slow')
# return
2024-10-22 11:57:30 +00:00
# elif time_difference < self._play_clock.audio_diff_threshold:
image = combine_frame
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self._human_render is not None:
self._human_render.put_image(image)