diff --git a/human/audio_inference_handler.py b/human/audio_inference_handler.py index 30e430b..99a0af1 100644 --- a/human/audio_inference_handler.py +++ b/human/audio_inference_handler.py @@ -59,7 +59,7 @@ class AudioInferenceHandler(AudioHandler): super().on_message(message) def __on_run(self): - wav2lip_path = os.path.join(current_file_path, '..', 'checkpoints', 'wav2lip_gan.pth') + wav2lip_path = os.path.join(current_file_path, '..', 'checkpoints', 'wav2lip.pth') logger.info(f'AudioInferenceHandler init, path:{wav2lip_path}') model = load_model(wav2lip_path) logger.info("Model loaded") diff --git a/human/human_context.py b/human/human_context.py index 75a6df2..4e0ee0f 100644 --- a/human/human_context.py +++ b/human/human_context.py @@ -37,9 +37,9 @@ class HumanContext: print(f'device:{self._device}') base_path = os.path.join(current_file_path, '..') logger.info(f'base path:{base_path}') - # full_images, face_frames, coord_frames = load_avatar(base_path, self._image_size, self._device) - full_images, face_frames, coord_frames = load_avatar_from_processed(base_path, - 'wav2lip_avatar1') + full_images, face_frames, coord_frames = load_avatar(base_path, self._image_size, self._device) + # full_images, face_frames, coord_frames = load_avatar_from_processed(base_path, + # 'wav2lip_avatar1') self._frame_list_cycle = full_images self._face_list_cycle = face_frames self._coord_list_cycle = coord_frames diff --git a/utils/utils.py b/utils/utils.py index 8968245..0a64429 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -170,7 +170,7 @@ def load_model(path): def load_avatar(path, img_size, device): print(f'load avatar:{path}') - face_images_path = path + face_images_path = os.path.join(path, 'face') face_images_path = read_files_path(face_images_path) full_list_cycle = read_images(face_images_path)