Compare commits
1 Commits
19f638ada3
...
c2871cac69
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c2871cac69 |
@ -31,8 +31,6 @@ class AudioInferenceHandler(AudioHandler):
|
|||||||
|
|
||||||
self._is_running = True
|
self._is_running = True
|
||||||
self.last_direction = 1
|
self.last_direction = 1
|
||||||
self.startfrom = 0
|
|
||||||
self.frame_indexes = []
|
|
||||||
self._exit_event = Event()
|
self._exit_event = Event()
|
||||||
self._run_thread = Thread(target=self.__on_run, name="AudioInferenceHandlerThread")
|
self._run_thread = Thread(target=self.__on_run, name="AudioInferenceHandlerThread")
|
||||||
self._exit_event.set()
|
self._exit_event.set()
|
||||||
@ -105,10 +103,9 @@ class AudioInferenceHandler(AudioHandler):
|
|||||||
print('AudioInferenceHandler not running')
|
print('AudioInferenceHandler not running')
|
||||||
break
|
break
|
||||||
|
|
||||||
self.startfrom = self.frame_indexes[-1] if self.frame_indexes else 0
|
|
||||||
if is_all_silence:
|
if is_all_silence:
|
||||||
self.frame_indexes, self.last_direction = human_status.get_index_v2(self.startfrom, self.person_config["frame_config"], self.last_direction, batch_size) # [1,3,4]
|
frame_indexes, self.last_direction = human_status.get_index_v2(self.person_config["frame_config"], self.last_direction, batch_size) # [1,3,4]
|
||||||
for i, frame_idx in zip(range(batch_size), self.frame_indexes):
|
for i, frame_idx in zip(range(batch_size), frame_indexes):
|
||||||
if not self._is_running:
|
if not self._is_running:
|
||||||
break
|
break
|
||||||
# self.on_next_handle((None, mirror_index(length, index),
|
# self.on_next_handle((None, mirror_index(length, index),
|
||||||
@ -122,9 +119,9 @@ class AudioInferenceHandler(AudioHandler):
|
|||||||
img_batch = []
|
img_batch = []
|
||||||
index_list = []
|
index_list = []
|
||||||
# for i in range(batch_size):
|
# for i in range(batch_size):
|
||||||
self.frame_indexes,self.last_direction = human_status.get_index_v2(self.startfrom, self.person_config["frame_config"], self.last_direction, batch_size) # [1,3,4]
|
frame_indexes,self.last_direction = human_status.get_index_v2(self.person_config["frame_config"], self.last_direction, batch_size) # [1,3,4]
|
||||||
# TODO: 推理状态下获取循环帧逻辑
|
# TODO: 推理状态下获取循环帧逻辑
|
||||||
for i, frame_idx in zip(range(len(mel_batch)), self.frame_indexes):
|
for i, frame_idx in zip(range(len(mel_batch)), frame_indexes):
|
||||||
# idx = mirror_index(length, index + i)
|
# idx = mirror_index(length, index + i)
|
||||||
index_list.append(frame_idx)
|
index_list.append(frame_idx)
|
||||||
face = face_list_cycle[frame_idx]
|
face = face_list_cycle[frame_idx]
|
||||||
|
@ -52,7 +52,7 @@ class HumanStatus:
|
|||||||
self._current_frame = (self._current_frame + 1) % self._total_frames
|
self._current_frame = (self._current_frame + 1) % self._total_frames
|
||||||
return index
|
return index
|
||||||
|
|
||||||
def get_index_v2(self, startfrom, frame_config:list, last_direction:int=1, batch_size:int=5):
|
def get_index_v2(self, frame_config:list, last_direction:int=1, batch_size:int=5):
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
audio_frame_length = batch_size
|
audio_frame_length = batch_size
|
||||||
@ -68,7 +68,7 @@ class HumanStatus:
|
|||||||
first_speak,# 刚开始讲话 向讲话移动
|
first_speak,# 刚开始讲话 向讲话移动
|
||||||
last_speak, # 讲话结束 向静默移动
|
last_speak, # 讲话结束 向静默移动
|
||||||
)
|
)
|
||||||
# startfrom = start_idx_list[-1]
|
startfrom = start_idx_list[-1]
|
||||||
# 一次返回一个batch帧号
|
# 一次返回一个batch帧号
|
||||||
return start_idx_list, last_direction
|
return start_idx_list, last_direction
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user