49 lines
1.5 KiB
Python
49 lines
1.5 KiB
Python
#encoding = utf8
|
|
import copy
|
|
from queue import Empty
|
|
|
|
import cv2
|
|
import numpy as np
|
|
|
|
from base_render import BaseRender
|
|
|
|
|
|
class VideoRender(BaseRender):
|
|
def __init__(self, play_clock, context, human_render):
|
|
super().__init__(play_clock)
|
|
self._context = context
|
|
self._human_render = human_render
|
|
|
|
def __run_step(self):
|
|
try:
|
|
res_frame, idx, type_, ps = self._queue.get(block=True, timeout=0.01)
|
|
except Empty:
|
|
return
|
|
|
|
if type_ != 0:
|
|
combine_frame = self._context.frame_list_cycle[idx]
|
|
else:
|
|
bbox = self._context.coord_list_cycle[idx]
|
|
combine_frame = copy.deepcopy(self._context.frame_list_cycle[idx])
|
|
y1, y2, x1, x2 = bbox
|
|
try:
|
|
res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
|
|
except:
|
|
return
|
|
# combine_frame = get_image(ori_frame,res_frame,bbox)
|
|
# t=time.perf_counter()
|
|
combine_frame[y1:y2, x1:x2] = res_frame
|
|
|
|
clock_time = self._play_clock.clock_time()
|
|
time_difference = abs(clock_time - ps)
|
|
if time_difference > self._play_clock.audio_diff_threshold:
|
|
print('video is slow')
|
|
return
|
|
# elif time_difference < self._play_clock.audio_diff_threshold:
|
|
|
|
image = combine_frame
|
|
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
if self._human_render is not None:
|
|
self._human_render.put_image(image)
|
|
|