Source code for wed.chaptors.spread_tile

# coding: utf-8
"""This submodule contains a set of functions for editing the following image:

.. image:: _images/chaptors/freedom1.png
   :class: popup-img

"""
import math
import os
from typing import Any, Dict, List, Optional, Tuple, Union

import cv2
import numpy as np
import numpy.typing as npt
from PIL import Image
from pydub import AudioSegment

from ..utils._colorings import toBLUE, toGREEN
from ..utils._path import OPENING_TEMPLATE_PATH, SPREAD_TILE_VIDEO_PATH
from ..utils.audio_utils import overlay_audio, synthesize_audio
from ..utils.generic_utils import Cycler, handleKeyError
from ..utils.image_utils import (
    SUPPORTED_CONVERSION_METHODS,
    arr2pil,
    draw_text_in_pil,
    image_conversion,
    pil2arr,
)
from .base_editor import BaseWedOPEditor


[docs]class SpreadTileEditor(BaseWedOPEditor): """Editor which in charge of editing spread-tile. .. image:: _images/chaptors/freedom1.png :class: popup-img Args: video_path (str, optional) : Path to the video file for tile image. Defaults to ``SPREAD_TILE_VIDEO_PATH``. switching_timings (List[int], optional) : When to change the tiles. Specify by the frame position of the template video at ``OPENING_TEMPLATE_PATH`` (FPS = ``30.``). Defaults to ``[25, 25, 33]``. tile_nums (List[int], optional) : Number of tiles at each timing corresponding to ``switching_timings``. Defaults to ``[1, 2, 4]``. conversion_methods (List[str], optional) : Image conversion method at each timing corresponding to ``switching_timings``. Defaults to ``["", "nega", ""]``. """ def __init__( self, video_path: str = SPREAD_TILE_VIDEO_PATH, switching_timings: List[int] = [25, 25, 33], tile_nums: List[int] = [1, 2, 4], conversion_methods: List[str] = ["", "nega", ""], ): super().__init__( positions=(250, 332), ) tile_video = cv2.VideoCapture(video_path) tile_video_frame_count: float = tile_video.get(cv2.CAP_PROP_FRAME_COUNT) tile_video_fps = tile_video.get(cv2.CAP_PROP_FPS) tile_video_length_sec: float = tile_video_frame_count / tile_video_fps self.tile_video_path = video_path self.tile_video = tile_video self.tile_video_fps = tile_video_fps if tile_video_length_sec < max(switching_timings) / BaseWedOPEditor.FPS: self.logger.warning( f"Video length should be greater than or equal to the maximum of {toGREEN('switching_timings')}, but {tile_video_length_sec:.2f}[s] < {max(switching_timings) / BaseWedOPEditor.FPS :.2f}[s]" ) # Chec the switching_timings. if sum(switching_timings) < self.duration: self.logger.warning( f"The sum of {toGREEN('switching_timings')} should be greater than or equal the {toGREEN('duration')}, but {sum(switching_timings)} < {self.duration}" ) num_switching = len(switching_timings) self.switching_timings = switching_timings # Set a "tile_nums" attribute. if num_switching != len(tile_nums): tile_nums = [2 ** i for i in range(len(switching_timings))] self.tile_nums = tile_nums # Set a "conversion_methods" attribute. if num_switching != len(conversion_methods): conversion_methods = [""] * num_switching for conversion_method in conversion_methods: handleKeyError( lst=SUPPORTED_CONVERSION_METHODS, conversion_method=conversion_method ) self.conversion_methods = conversion_methods
[docs] def get_tile_info(self, pos: int) -> Tuple[npt.NDArray[np.uint8], int]: if not self.start_pos <= pos <= self.end_pos: self.logger.warning( f"This editor is in charge of frames from {toGREEN(self.start_pos)} to {toGREEN(self.end_pos)}, so specify {toGREEN('pos')} between them." ) return None curt_pos = self.start_pos for i, timing in enumerate(self.switching_timings): for j in range(timing): if curt_pos == pos: # ret = self.tile_video.set(cv2.CAP_PROP_FRAME_COUNT, j) is_ok = self.tile_video.set( cv2.CAP_PROP_POS_MSEC, 1000 * j / BaseWedOPEditor.FPS ) is_ok, frame = self.tile_video.read() return (frame, i) curt_pos += 1 if curt_pos > self.end_pos: return None return None
[docs] def edit( self, frame: npt.NDArray[np.uint8], pos: int, span: int = 20 ) -> npt.NDArray[np.uint8]: """Edit the image if it is an assigned chapter (``pos``) Args: frame (npt.NDArray[np.uint8]) : Current frame (BGR image) in the video. pos (int) : Current position in the video. Returns: npt.NDArray[np.uint8]: Edited frame. """ if self.start_pos <= pos <= self.end_pos: tile, idx = self.get_tile_info(pos) frame = self.spread_tile( frame=frame, pos=pos, tile=tile, tile_num=self.tile_nums[idx], conversion_method=self.conversion_methods[idx], ) return frame
[docs] def spread_tile( self, frame: npt.NDArray[np.uint8], pos: int, tile: npt.NDArray[np.uint8], tile_num: int, conversion_method: str = "", ) -> npt.NDArray[np.uint8]: """Spread tile. Args: frame (npt.NDArray[np.uint8]) : Input frame (BGR ndarray image). NOT USED. pos (int) : The position of the frame to get. tile (npt.NDArray[np.uint8]) : One tile image. tile_num (int) : How many tiles to spread. conversion_method (str, optional) : How to convert an ``tile``. Use :func:`wed.utils.image_utils.image_conversion` to convert image. Defaults to ``""``. Returns: npt.NDArray[np.uint8]: ``frame`` with ``text`` drawn. """ tile = image_conversion(frame=tile, method=conversion_method) tile_resized = cv2.resize( src=tile, dsize=( math.ceil(BaseWedOPEditor.FRAME_WIDTH / tile_num), math.ceil(BaseWedOPEditor.FRAME_HEIGHT / tile_num), ), ) spreaded_tile = np.tile(tile_resized, reps=(tile_num, tile_num, 1))[ : BaseWedOPEditor.FRAME_HEIGHT, : BaseWedOPEditor.FRAME_WIDTH ] return spreaded_tile
[docs] def audio_for_overlay_create( self, out_path: Optional[str] = None ) -> Tuple[int, str]: """Create a tile (repeated) audio for overlaying based on ``tile_video_path`` attribute. Args: out_path (Optional[str], optional) : The path to the created audio file. Defaults to ``None``. Returns: str: The path to the created audio file. """ tile_audio = AudioSegment.from_file(self.tile_video_path) audio = tile_audio[:0] for timing in self.switching_timings: audio += tile_audio[: int(timing / BaseWedOPEditor.FPS * 1000)] if out_path is None: root, ext = os.path.splitext(self.tile_video_path) out_path = f"{root}_repeated.mp3" audio.export(out_path) self.logger.info(f"Tile repeated audio file is created at {toBLUE(out_path)}") return (True, out_path)