human/human/human_render.py

118 lines
3.6 KiB
Python

#encoding = utf8
import logging
import time
from queue import Empty
from threading import Event, Thread
from eventbus import EventBus
from human.message_type import MessageType
from human_handler import AudioHandler
from render import VoiceRender, VideoRender, PlayClock
from utils import SyncQueue
logger = logging.getLogger(__name__)
class HumanRender(AudioHandler):
def __init__(self, context, handler):
super().__init__(context, handler)
EventBus().register('stop', self._on_stop)
EventBus().register('clear_cache', self.on_clear_cache)
play_clock = PlayClock()
self._voice_render = VoiceRender(play_clock, context)
self._video_render = VideoRender(play_clock, context, self)
self._is_running = True
self._queue = SyncQueue(context.batch_size, "HumanRender_queue")
self._exit_event = Event()
self._thread = Thread(target=self._on_run, name="AudioMalHandlerThread")
self._exit_event.set()
self._thread.start()
self._image_render = None
self._last_audio_ps = 0
self._last_video_ps = 0
self._empty_log = True
def __del__(self):
EventBus().unregister('stop', self._on_stop)
EventBus().unregister('clear_cache', self.on_clear_cache)
def _on_stop(self, *args, **kwargs):
self.stop()
def on_clear_cache(self, *args, **kwargs):
self._queue.clear()
def _on_run(self):
logging.info('human render run')
while self._exit_event.is_set() and self._is_running:
self._run_step()
delay = 0.075
time.sleep(delay)
logging.info('human render exit')
def _run_step(self):
try:
value = self._queue.get(timeout=.005)
if value is None:
return
res_frame, idx, audio_frames = value
if not self._empty_log:
self._empty_log = True
logging.info('render render:')
except Empty:
if self._empty_log:
self._empty_log = False
logging.info('render queue.Empty:')
return
type_ = 1
if audio_frames[0][1] != 0 and audio_frames[1][1] != 0:
type_ = 0
if self._voice_render is not None:
self._voice_render.render(audio_frames, self._last_audio_ps)
self._last_audio_ps = self._last_audio_ps + 0.4
if self._video_render is not None:
self._video_render.render((res_frame, idx, type_), self._last_video_ps)
self._last_video_ps = self._last_video_ps + 0.4
def set_image_render(self, render):
self._image_render = render
def put_image(self, image):
if self._image_render is not None:
self._image_render.on_render(image)
def on_message(self, message):
super().on_message(message)
def on_handle(self, stream, index):
if not self._is_running:
return
self._queue.put(stream)
def pause_talk(self):
logging.info('hunan pause_talk')
# self._voice_render.pause_talk()
# self._video_render.pause_talk()
def stop(self):
logging.info('hunan render stop')
self._is_running = False
if self._exit_event is None:
return
self._queue.clear()
self._exit_event.clear()
if self._thread.is_alive():
self._thread.join()
logging.info('hunan render stop')
# self._voice_render.stop()
# self._video_render.stop()
# self._exit_event.clear()
# self._thread.join()