#encoding = utf8
import copy
import time
from queue import Empty
from enum import Enum

import cv2
import numpy as np

from .base_render import BaseRender
from human.message_type import MessageType


class VideoRender(BaseRender):
    def __init__(self, play_clock, context, human_render):
        super().__init__(play_clock, context, 'Video')
        self._human_render = human_render
        self._diff_avg_count = 0

    def _run_step(self):
        while self._exit_event.is_set():
            try:
                frame, ps = self._queue.get(block=True, timeout=0.02)
                res_frame, idx, type_ = frame
            except Empty:
                return

            clock_time = self._play_clock.clock_time()
            time_difference = clock_time - ps
            if abs(time_difference) > self._play_clock.audio_diff_threshold:
                if self._diff_avg_count < 10:
                    self._diff_avg_count += 1
                else:
                    if time_difference < -self._play_clock.audio_diff_threshold:
                        sleep_time = abs(time_difference)
                        # print("Video frame waiting to catch up with audio", sleep_time)
                        if sleep_time <= 1.0:
                            time.sleep(sleep_time)

                    # elif time_difference > self._play_clock.audio_diff_threshold:  # 视频比音频快超过10ms
                    #     print("Video frame dropped to catch up with audio")
                    #     continue

            else:
                self._diff_avg_count = 0

            print('video render:', ps, '  ', clock_time, '  ', time_difference,
                  'get face', self._queue.qsize(), self._diff_avg_count)

            if type_ == 0:
                combine_frame = self._context.frame_list_cycle[idx]
            else:
                bbox = self._context.coord_list_cycle[idx]
                combine_frame = copy.deepcopy(self._context.frame_list_cycle[idx])
                y1, y2, x1, x2 = bbox
                try:
                    res_frame = cv2.resize(res_frame.astype(np.uint8), (x2 - x1, y2 - y1))
                except:
                    print('resize error')
                    return
                combine_frame[y1:y2, x1:x2] = res_frame

            image = combine_frame
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            if self._human_render is not None:
                self._human_render.put_image(image)
            return