try close infer
This commit is contained in:
parent
6445b6ee05
commit
aef7d3d499
18
Human.py
18
Human.py
@ -224,12 +224,13 @@ class Human:
|
||||
# self.coords_path = f"{self.avatar_path}/coords.pkl"
|
||||
# self.__loadavatar()
|
||||
|
||||
self.stop = False
|
||||
self.res_render_queue = Queue(self._batch_size * 2)
|
||||
|
||||
self.chunk_2_mal = Chunk2Mal(self)
|
||||
self._tts = TTSBase(self)
|
||||
self._infer = Infer(self)
|
||||
self.chunk_2_mal.warm_up()
|
||||
# self.chunk_2_mal.warm_up()
|
||||
|
||||
self.audio_render = AudioRender()
|
||||
|
||||
@ -249,6 +250,8 @@ class Human:
|
||||
# )).start()
|
||||
# self.render_event.set()
|
||||
|
||||
def __del__(self):
|
||||
print('Human del')
|
||||
# def play_pcm(self):
|
||||
# p = pyaudio.PyAudio()
|
||||
# stream = p.open(format=p.get_format_from_width(2), channels=1, rate=16000, output=True)
|
||||
@ -413,8 +416,12 @@ class Human:
|
||||
return self._stride_right_size
|
||||
|
||||
def on_destroy(self):
|
||||
self.stop = True
|
||||
# self.render_event.clear()
|
||||
# self._chunk_2_mal.stop()
|
||||
|
||||
self.chunk_2_mal.stop()
|
||||
self._tts.stop()
|
||||
self._infer.stop()
|
||||
# if self._tts is not None:
|
||||
# self._tts.stop()
|
||||
logging.info('human destroy')
|
||||
@ -459,13 +466,14 @@ class Human:
|
||||
self._test_image_queue.put(image)
|
||||
|
||||
def push_res_frame(self, res_frame, idx, audio_frames):
|
||||
if self.stop:
|
||||
print("push_res_frame stop")
|
||||
return
|
||||
self.res_render_queue.put((res_frame, idx, audio_frames))
|
||||
|
||||
def render(self):
|
||||
try:
|
||||
# img, aud = self._res_frame_queue.get(block=True, timeout=.3)
|
||||
# img = self._test_image_queue.get(block=True, timeout=.3)
|
||||
res_frame, idx, audio_frames = self.res_render_queue.get(block=True, timeout=.3)
|
||||
res_frame, idx, audio_frames = self.res_render_queue.get(block=True, timeout=.03)
|
||||
except queue.Empty:
|
||||
# print('render queue.Empty:')
|
||||
return None
|
||||
|
34
infer.py
34
infer.py
@ -348,18 +348,13 @@ class Infer:
|
||||
count = 0
|
||||
count_time = 0
|
||||
print('start inference')
|
||||
#
|
||||
# face_images_path = r'./face/'
|
||||
# face_images_path = utils.read_files_path(face_images_path)
|
||||
# face_list_cycle1 = read_images(face_images_path)
|
||||
# face_det_results = face_detect(face_list_cycle1)
|
||||
|
||||
while True:
|
||||
if self._exit_event.is_set():
|
||||
start_time = time.perf_counter()
|
||||
batch_size = self._human.get_batch_size()
|
||||
try:
|
||||
mel_batch = self._feat_queue.get(block=True, timeout=1)
|
||||
mel_batch = self._feat_queue.get(block=True, timeout=0.1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
@ -370,10 +365,8 @@ class Infer:
|
||||
audio_frames.append((frame, type_))
|
||||
if type_ == 0:
|
||||
is_all_silence = False
|
||||
|
||||
if is_all_silence:
|
||||
for i in range(batch_size):
|
||||
# res_frame_queue.put((None, mirror_index(length, index), audio_frames[i * 2:i * 2 + 2]))
|
||||
self._human.push_res_frame(None, mirror_index(length, index), audio_frames[i * 2:i * 2 + 2])
|
||||
index = index + 1
|
||||
else:
|
||||
@ -385,9 +378,6 @@ class Infer:
|
||||
face = face_list_cycle[idx]
|
||||
img_batch.append(face)
|
||||
|
||||
# img_batch_1, mel_batch_1, frames, coords = datagen_signal(face_list_cycle1,
|
||||
# mel_batch, face_det_results)
|
||||
|
||||
img_batch = np.asarray(img_batch)
|
||||
mel_batch = np.asarray(mel_batch)
|
||||
img_masked = img_batch.copy()
|
||||
@ -402,7 +392,7 @@ class Infer:
|
||||
|
||||
with torch.no_grad():
|
||||
pred = model(mel_batch, img_batch)
|
||||
# pred = model(mel_batch, img_batch) * 255.0
|
||||
|
||||
pred = pred.cpu().numpy().transpose(0, 2, 3, 1) * 255.
|
||||
|
||||
count_time += (time.perf_counter() - t)
|
||||
@ -412,18 +402,30 @@ class Infer:
|
||||
print(f"------actual avg infer fps:{count / count_time:.4f}")
|
||||
count = 0
|
||||
count_time = 0
|
||||
|
||||
image_index = 0
|
||||
for i, res_frame in enumerate(pred):
|
||||
# self.__pushmedia(res_frame,loop,audio_track,video_track)
|
||||
# res_frame_queue.put(
|
||||
# (res_frame, __mirror_index(length, index), audio_frames[i * 2:i * 2 + 2]))
|
||||
self._human.push_res_frame(res_frame, mirror_index(length, index),
|
||||
audio_frames[i * 2:i * 2 + 2])
|
||||
index = index + 1
|
||||
# print('total batch time:',time.perf_counter()-start_time)
|
||||
image_index = image_index + 1
|
||||
print('batch count', image_index)
|
||||
print('total batch time:', time.perf_counter() - start_time)
|
||||
else:
|
||||
time.sleep(1)
|
||||
break
|
||||
print('musereal inference processor stop')
|
||||
|
||||
def stop(self):
|
||||
if self._exit_event is None:
|
||||
return
|
||||
|
||||
self.pause_talk()
|
||||
|
||||
self._exit_event.clear()
|
||||
self._run_thread.join()
|
||||
logging.info('Infer stop')
|
||||
|
||||
def pause_talk(self):
|
||||
self._feat_queue.queue.clear()
|
||||
self._audio_out_queue.queue.clear()
|
||||
|
@ -95,9 +95,8 @@ class TTSBase:
|
||||
pass
|
||||
|
||||
def stop(self):
|
||||
self._pcm_stream.stop_stream()
|
||||
self._pcm_player.close(self._pcm_stream)
|
||||
self._pcm_player.terminate()
|
||||
self.input_stream.seek(0)
|
||||
self.input_stream.truncate()
|
||||
if self._exit_event is None:
|
||||
return
|
||||
|
||||
|
8
ui.py
8
ui.py
@ -59,6 +59,10 @@ class App(customtkinter.CTk):
|
||||
self._render()
|
||||
# self.play_audio()
|
||||
|
||||
def destroy(self):
|
||||
self.on_destroy()
|
||||
super().destroy()
|
||||
|
||||
def on_destroy(self):
|
||||
logger.info('------------App destroy------------')
|
||||
self._human.on_destroy()
|
||||
@ -173,5 +177,5 @@ if __name__ == "__main__":
|
||||
logger.info('------------start------------')
|
||||
app = App()
|
||||
app.mainloop()
|
||||
app.on_destroy()
|
||||
# logger.info('------------exit------------')
|
||||
# app.on_destroy()
|
||||
logger.info('------------exit------------')
|
||||
|
Loading…
Reference in New Issue
Block a user