add main ipc render
This commit is contained in:
parent
a23a33af1c
commit
e9691dbc81
@ -5,7 +5,7 @@ import queue
|
||||
import time
|
||||
from threading import Event, Thread
|
||||
|
||||
from gfpgan import GFPGANer
|
||||
# from gfpgan import GFPGANer
|
||||
from eventbus import EventBus
|
||||
from human_handler import AudioHandler
|
||||
from utils import load_model, mirror_index, get_device, SyncQueue
|
||||
@ -16,25 +16,26 @@ current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def load_gfpgan_model(model_path):
|
||||
logger.info(f'load_gfpgan_model, path:{model_path}')
|
||||
model = GFPGANer(
|
||||
model_path=model_path,
|
||||
upscale=1,
|
||||
arch='clean',
|
||||
channel_multiplier=2,
|
||||
bg_upsampler=None,
|
||||
)
|
||||
return model
|
||||
# model = GFPGANer(
|
||||
# model_path=model_path,
|
||||
# upscale=1,
|
||||
# arch='clean',
|
||||
# channel_multiplier=2,
|
||||
# bg_upsampler=None,
|
||||
# )
|
||||
return None
|
||||
#model
|
||||
|
||||
|
||||
def load_model(model_path):
|
||||
import onnxruntime as ort
|
||||
|
||||
sess_opt = ort.SessionOptions()
|
||||
sess_opt.intra_op_num_threads = 8
|
||||
sess = ort.InferenceSession(model_path, sess_options=sess_opt, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
||||
|
||||
return sess
|
||||
# import onnxruntime as ort
|
||||
|
||||
# sess_opt = ort.SessionOptions()
|
||||
# sess_opt.intra_op_num_threads = 8
|
||||
# sess = ort.InferenceSession(model_path, sess_options=sess_opt, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
||||
#
|
||||
# return sess
|
||||
return None
|
||||
|
||||
class AudioInferenceOnnxHandler(AudioHandler):
|
||||
def __init__(self, context, handler):
|
||||
|
@ -48,7 +48,7 @@ class HumanRender(AudioHandler):
|
||||
logging.info('human render run')
|
||||
while self._exit_event.is_set() and self._is_running:
|
||||
self._run_step()
|
||||
delay = 0.02
|
||||
delay = 0.04
|
||||
time.sleep(delay)
|
||||
|
||||
logging.info('human render exit')
|
||||
|
17
main.py
Normal file
17
main.py
Normal file
@ -0,0 +1,17 @@
|
||||
#encoding = utf8
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from utils import config_logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
if __name__ == '__main__':
|
||||
config_logging('./logs/info.log', logging.INFO, logging.INFO)
|
||||
|
||||
logger.info('------------start------------')
|
||||
ui = PyGameUI()
|
||||
ui.run()
|
||||
logger.info('------------finish------------')
|
49
ui/ipc_render.py
Normal file
49
ui/ipc_render.py
Normal file
@ -0,0 +1,49 @@
|
||||
#encoding = utf8
|
||||
|
||||
import os
|
||||
import logging
|
||||
from queue import Queue
|
||||
|
||||
from human import HumanContext
|
||||
from ipc import IPCUtil
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class IpcRender:
|
||||
def __init__(self):
|
||||
self._human_context = None
|
||||
self._queue = None
|
||||
self._ipc = IPCUtil('ipc_sender', 'ipc_sender')
|
||||
|
||||
def run(self):
|
||||
self._queue = Queue()
|
||||
self._human_context = HumanContext()
|
||||
self._human_context.build()
|
||||
render = self._human_context.render_handler
|
||||
render.set_image_render(self)
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
def get_image(self):
|
||||
pass
|
||||
|
||||
def get_audio(self):
|
||||
pass
|
||||
|
||||
def send_audio(self, audio):
|
||||
pass
|
||||
|
||||
def send_image(self, image):
|
||||
pass
|
||||
|
||||
def send_text(self, text):
|
||||
pass
|
||||
|
||||
def send_command(self, command):
|
||||
pass
|
||||
|
||||
def send_binary(self, data, length):
|
||||
pass
|
@ -1,18 +1,15 @@
|
||||
#encoding = utf8
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from queue import Queue
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import pygame
|
||||
from pygame.locals import *
|
||||
|
||||
from human import HumanContext
|
||||
from ipc import IPCUtil
|
||||
from utils import config_logging
|
||||
from utils import config_logging, render_image
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
@ -20,9 +17,7 @@ current_file_path = os.path.dirname(os.path.abspath(__file__))
|
||||
ipc = IPCUtil('ipc_sender', 'ipc_sender')
|
||||
|
||||
|
||||
def send_image(image):
|
||||
identifier = b'\x01'
|
||||
|
||||
def send_image(identifier, image):
|
||||
height, width, channels = image.shape
|
||||
|
||||
width_bytes = width.to_bytes(4, byteorder='little')
|
||||
@ -40,80 +35,6 @@ def cal_box(inv_m, p):
|
||||
return x, y
|
||||
|
||||
|
||||
def img_warp_back_inv_m(img, img_to, inv_m):
|
||||
h_up, w_up, c = img_to.shape
|
||||
mask = np.ones_like(img).astype(np.float32)
|
||||
inv_mask = cv2.warpAffine(mask, inv_m, (w_up, h_up))
|
||||
inv_img = cv2.warpAffine(img, inv_m, (w_up, h_up))
|
||||
mask_indices = inv_mask == 1
|
||||
if 4 == c:
|
||||
send_image(img_to)
|
||||
img_to[:, :, :3][mask_indices] = inv_img[mask_indices]
|
||||
else:
|
||||
img_to[inv_mask == 1] = inv_img[inv_mask == 1]
|
||||
cv2.imwrite('./full.png', img_to)
|
||||
return img_to
|
||||
# h_up, w_up, _ = img.shape
|
||||
# _, _, c = img_to.shape
|
||||
# # cv2.imwrite('./face.png', img)
|
||||
# tx = int(inv_m[0][2])
|
||||
# ty = int(inv_m[1][2])
|
||||
#
|
||||
# inv_m[0][2] = 0
|
||||
# inv_m[1][2] = 0
|
||||
#
|
||||
# p0 = cal_box(inv_m, (0, 0))
|
||||
# p1 = cal_box(inv_m, (w_up, 0))
|
||||
# p2 = cal_box(inv_m, (w_up, h_up))
|
||||
# p3 = cal_box(inv_m, (0, h_up))
|
||||
# lp = (min(p0[0], p3[0]), min(p0[1], p1[1]))
|
||||
# rp = (max(p2[0], p1[0]), min(p2[1], p3[1]))
|
||||
#
|
||||
# w_up = int(rp[0] - lp[0])
|
||||
# h_up = int(rp[1] - lp[1])
|
||||
#
|
||||
# # print(f'src_x:{w_up}, src_y:{h_up}')
|
||||
# inv_m[0][2] = 0
|
||||
# inv_m[1][2] = abs(lp[1])
|
||||
#
|
||||
# mask = np.ones_like(img, dtype=np.float32)
|
||||
# inv_mask = cv2.warpAffine(mask, inv_m, (w_up, h_up))
|
||||
# inv_img = cv2.warpAffine(img, inv_m, (w_up, h_up))
|
||||
#
|
||||
# if c == 4:
|
||||
# # img_to[30:h, 30:w][:, :, :3] = img
|
||||
# img_to[ty:(h_up + ty), tx:(w_up + tx)][:, :, :3][inv_mask == 1] = inv_img[inv_mask == 1]
|
||||
# else:
|
||||
# img_to[inv_mask == 1] = inv_img[inv_mask == 1]
|
||||
#
|
||||
# # cv2.imwrite('./full1.png', img_to)
|
||||
# return img_to
|
||||
|
||||
|
||||
def render_image(context, frame):
|
||||
res_frame, idx, type_ = frame
|
||||
|
||||
if type_ == 0:
|
||||
combine_frame = context.frame_list_cycle[idx]
|
||||
else:
|
||||
bbox = context.coord_list_cycle[idx]
|
||||
combine_frame = copy.deepcopy(context.frame_list_cycle[idx])
|
||||
af = context.align_frames[idx]
|
||||
inv_m = context.inv_m_frames[idx]
|
||||
y1, y2, x1, x2 = bbox
|
||||
try:
|
||||
t = time.perf_counter()
|
||||
res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
|
||||
af[y1:y2, x1:x2] = res_frame
|
||||
combine_frame = img_warp_back_inv_m(af, combine_frame, inv_m)
|
||||
except Exception as e:
|
||||
logging.error(f'resize error:{e}')
|
||||
return
|
||||
|
||||
image = combine_frame
|
||||
return image
|
||||
|
||||
|
||||
class PyGameUI:
|
||||
def __init__(self):
|
||||
self._human_context = None
|
||||
|
@ -5,4 +5,5 @@ from .sync_queue import SyncQueue
|
||||
from .utils import mirror_index, load_model, get_device, load_avatar, config_logging
|
||||
from .utils import read_image, object_stop
|
||||
from .utils import load_avatar_from_processed, load_avatar_from_256_processed
|
||||
from .utils import render_image
|
||||
from .audio_utils import melspectrogram, save_wav
|
||||
|
@ -1,4 +1,5 @@
|
||||
#encoding = utf8
|
||||
import copy
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
@ -274,3 +275,38 @@ def config_logging(file_name: str, console_level: int = logging.INFO, file_level
|
||||
def object_stop(obj):
|
||||
if obj is not None:
|
||||
obj.stop()
|
||||
|
||||
|
||||
def img_warp_back_inv_m(img, img_to, inv_m):
|
||||
h_up, w_up, c = img_to.shape
|
||||
mask = np.ones_like(img).astype(np.float32)
|
||||
inv_mask = cv2.warpAffine(mask, inv_m, (w_up, h_up))
|
||||
inv_img = cv2.warpAffine(img, inv_m, (w_up, h_up))
|
||||
mask_indices = inv_mask == 1
|
||||
if 4 == c:
|
||||
img_to[:, :, :3][mask_indices] = inv_img[mask_indices]
|
||||
else:
|
||||
img_to[inv_mask == 1] = inv_img[inv_mask == 1]
|
||||
return img_to
|
||||
|
||||
|
||||
def render_image(context, frame):
|
||||
res_frame, idx, type_ = frame
|
||||
|
||||
if type_ == 0:
|
||||
combine_frame = context.frame_list_cycle[idx]
|
||||
else:
|
||||
bbox = context.coord_list_cycle[idx]
|
||||
combine_frame = copy.deepcopy(context.frame_list_cycle[idx])
|
||||
af = context.align_frames[idx]
|
||||
inv_m = context.inv_m_frames[idx]
|
||||
y1, y2, x1, x2 = bbox
|
||||
try:
|
||||
res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
|
||||
af[y1:y2, x1:x2] = res_frame
|
||||
combine_frame = img_warp_back_inv_m(af, combine_frame, inv_m)
|
||||
except Exception as e:
|
||||
logging.error(f'resize error:{e}')
|
||||
return None
|
||||
|
||||
return combine_frame
|
||||
|
Loading…
Reference in New Issue
Block a user