diff --git a/human/human_render.py b/human/human_render.py index 5334b35..cc8061e 100644 --- a/human/human_render.py +++ b/human/human_render.py @@ -47,13 +47,8 @@ class HumanRender(AudioHandler): def _on_run(self): logging.info('human render run') while self._exit_event.is_set() and self._is_running: - # t = time.time() self._run_step() - # delay = time.time() - t - delay = 0.04 # - delay - # print(delay) - # if delay <= 0.0: - # continue + delay = 0.03 time.sleep(delay) logging.info('human render exit') @@ -64,11 +59,9 @@ class HumanRender(AudioHandler): if value is None: return res_frame, idx, audio_frames = value - # print('render queue size', self._queue.size()) if not self._empty_log: self._empty_log = True logging.info('render render:') - # print('voice render queue size', self._queue.size()) except Empty: if self._empty_log: self._empty_log = False diff --git a/render/video_render.py b/render/video_render.py index 8831180..605859f 100644 --- a/render/video_render.py +++ b/render/video_render.py @@ -1,8 +1,7 @@ #encoding = utf8 import copy +import logging import time -from queue import Empty -from enum import Enum import cv2 import numpy as np @@ -12,12 +11,18 @@ from .base_render import BaseRender def img_warp_back_inv_m(img, img_to, inv_m): h_up, w_up, c = img_to.shape - + t = time.perf_counter() mask = np.ones_like(img).astype(np.float32) inv_mask = cv2.warpAffine(mask, inv_m, (w_up, h_up)) inv_img = cv2.warpAffine(img, inv_m, (w_up, h_up)) - - img_to[inv_mask == 1] = inv_img[inv_mask == 1] + mask_indices = inv_mask == 1 + print(f'time1: {time.perf_counter() - t}') + if 4 == c: + t = time.perf_counter() + img_to[:, :, :3][mask_indices] = inv_img[mask_indices] + print(f'time2: {time.perf_counter() - t}') + else: + img_to[inv_mask == 1] = inv_img[inv_mask == 1] return img_to @@ -35,22 +40,20 @@ class VideoRender(BaseRender): else: bbox = self._context.coord_list_cycle[idx] combine_frame = copy.deepcopy(self._context.frame_list_cycle[idx]) - af = copy.deepcopy(self._context.align_frames[idx]) + af = self._context.align_frames[idx] inv_m = self._context.inv_m_frames[idx] y1, y2, x1, x2 = bbox try: + t = time.perf_counter() res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1)) af[y1:y2, x1:x2] = res_frame combine_frame = img_warp_back_inv_m(af, combine_frame, inv_m) + print(time.perf_counter() - t) except Exception as e: - print('resize error', e) + logging.error(f'resize error:{e}') return - # cv2.imwrite(f'./images/res_frame_{ self.index }.png', res_frame) - # combine_frame[y1:y2, x1:x2] = res_frame - # cv2.imwrite(f'/combine_frame_{self.index}.png', combine_frame) - # self.index = self.index + 1 image = combine_frame - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + # image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) if self._human_render is not None: self._human_render.put_image(image) diff --git a/ui/pygame_ui.py b/ui/pygame_ui.py index de4e5dd..f3890fe 100644 --- a/ui/pygame_ui.py +++ b/ui/pygame_ui.py @@ -3,6 +3,8 @@ import logging import os from queue import Queue +import cv2 +import numpy as np import pygame from pygame.locals import * @@ -46,7 +48,10 @@ class PyGameUI: self.screen_.blit(self.background_display_, (0, 0)) self._update_human() if self._human_image is not None: - self.screen_.blit(self._human_image, (0, 0)) + self.screen_.blit(self._human_image, (0, -200)) + + fps = self.clock.get_fps() + pygame.display.set_caption('fps:{:.2f}'.format(fps)) pygame.display.flip() self.stop() pygame.quit() @@ -58,6 +63,9 @@ class PyGameUI: color_format = "RGB" if 4 == image.shape[2]: color_format = "RGBA" + image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) + else: + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) self._human_image = pygame.image.frombuffer(image.tobytes(), image.shape[1::-1], color_format) diff --git a/utils/utils.py b/utils/utils.py index 5d99a7a..eb307ba 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -210,6 +210,21 @@ def load_avatar_from_processed(base_path, avatar_name): return frame_list_cycle, face_list_cycle, coord_list_frames +def jpeg_to_png(image): + min_green = np.array([50, 100, 100]) + max_green = np.array([70, 255, 255]) + + hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) + mask = cv2.inRange(hsv, min_green, max_green) + mask_not = cv2.bitwise_not(mask) + green_not = cv2.bitwise_and(image, image, mask=mask_not) + b, g, r = cv2.split(green_not) + + # todo 合成四通道 + image = cv2.merge([b, g, r, mask_not]) + return image + + def load_avatar_from_256_processed(base_path, avatar_name, pkl): avatar_path = os.path.join(base_path, 'data', 'avatars', avatar_name, pkl) print(f'load avatar from processed:{avatar_path}') @@ -225,9 +240,9 @@ def load_avatar_from_256_processed(base_path, avatar_name, pkl): inv_m_frames = [] frame_info_list = avatar_data['frame_info_list'] - for frame_info in frame_info_list: + for frame_info in tqdm(frame_info_list): face_list_cycle.append(frame_info['img']) - frame_list_cycle.append(frame_info['frame']) + frame_list_cycle.append(jpeg_to_png(frame_info['frame'])) coord_list_frames.append(frame_info['coords']) align_frames.append(frame_info['align_frame']) m_frames.append(frame_info['m'])