unit
diff --git a/docker-compose.yaml b/docker-compose.yaml
index e56e2a4..18d47c5 100644
--- a/docker-compose.yaml
+++ b/docker-compose.yaml
@@ -6,8 +6,8 @@ services:
ports:
- "8100:8100"
volumes:
- - "B:/frames_evaluators/input_directory:/app/input_directory"
- - "B:/frames_evaluators/output_directory:/app/output_directory"
+ - "./input_directory:/app/input_directory"
+ - "./output_directory:/app/output_directory"
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,video,utility
diff --git a/extractor_service/Dockerfile b/extractor_service/Dockerfile
index 683edd6..7782f5b 100644
--- a/extractor_service/Dockerfile
+++ b/extractor_service/Dockerfile
@@ -29,6 +29,7 @@ RUN pip install --no-cache-dir -r requirements.txt
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,video,utility
ENV TF_CPP_MIN_LOG_LEVEL 3
+ENV DOCKER_ENV=1
COPY . .
diff --git a/common/__init__.py b/extractor_service/__init__.py
similarity index 100%
rename from common/__init__.py
rename to extractor_service/__init__.py
diff --git a/extractor_service/app/dependencies.py b/extractor_service/app/dependencies.py
new file mode 100644
index 0000000..74c7605
--- /dev/null
+++ b/extractor_service/app/dependencies.py
@@ -0,0 +1,95 @@
+"""
+This module provides dependency management for extractors using FastAPI's dependency injection.
+LICENSE
+=======
+Copyright (C) 2024 Bartłomiej Flis
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+from dataclasses import dataclass
+from typing import Type
+
+from fastapi import Depends
+
+from .image_evaluators import InceptionResNetNIMA
+from .image_processors import OpenCVImage
+from .video_processors import OpenCVVideo
+
+
+@dataclass
+class ExtractorDependencies:
+ """
+ Data class to hold dependencies for the extractor.
+
+ Attributes:
+ image_processor (Type[OpenCVImage]): Processor for image processing.
+ video_processor (Type[OpenCVVideo]): Processor for video processing.
+ evaluator (Type[InceptionResNetNIMA]): Evaluator for image quality.
+ """
+ image_processor: Type[OpenCVImage]
+ video_processor: Type[OpenCVVideo]
+ evaluator: Type[InceptionResNetNIMA]
+
+
+def get_image_processor() -> Type[OpenCVImage]:
+ """
+ Provides the image processor dependency.
+
+ Returns:
+ Type[OpenCVImage]: The image processor class.
+ """
+ return OpenCVImage
+
+
+def get_video_processor() -> Type[OpenCVVideo]:
+ """
+ Provides the video processor dependency.
+
+ Returns:
+ Type[OpenCVVideo]: The video processor class.
+ """
+ return OpenCVVideo
+
+
+def get_evaluator() -> Type[InceptionResNetNIMA]:
+ """
+ Provides the image evaluator dependency.
+
+ Returns:
+ Type[InceptionResNetNIMA]: The image evaluator class.
+ """
+ return InceptionResNetNIMA
+
+
+def get_extractor_dependencies(
+ image_processor=Depends(get_image_processor),
+ video_processor=Depends(get_video_processor),
+ evaluator=Depends(get_evaluator)
+) -> ExtractorDependencies:
+ """
+ Provides the dependencies required for the extractor.
+
+ Args:
+ image_processor (Type[OpenCVImage], optional): Dependency injection for image processor.
+ video_processor (Type[OpenCVVideo], optional): Dependency injection for video processor.
+ evaluator (Type[InceptionResNetNIMA], optional): Dependency injection for image evaluator.
+
+ Returns:
+ ExtractorDependencies: All necessary dependencies for the extractor.
+ """
+ return ExtractorDependencies(
+ image_processor=image_processor,
+ video_processor=video_processor,
+ evaluator=evaluator
+ )
diff --git a/extractor_service/app/extractor_manager.py b/extractor_service/app/extractor_manager.py
index b4aeec0..da0de85 100644
--- a/extractor_service/app/extractor_manager.py
+++ b/extractor_service/app/extractor_manager.py
@@ -19,10 +19,10 @@
along with this program. If not, see .
"""
import logging
-from typing import Type
from fastapi import HTTPException, BackgroundTasks
+from .dependencies import ExtractorDependencies
from .extractors import Extractor, ExtractorFactory
from .schemas import ExtractorConfig
@@ -35,7 +35,6 @@ class ExtractorManager:
maintaining system stability.
"""
_active_extractor = None
- _config = None
@classmethod
def get_active_extractor(cls) -> str:
@@ -48,30 +47,28 @@ def get_active_extractor(cls) -> str:
return cls._active_extractor
@classmethod
- def start_extractor(cls, background_tasks: BackgroundTasks, config: ExtractorConfig,
- extractor_name: str) -> str:
+ def start_extractor(cls, extractor_name: str, background_tasks: BackgroundTasks,
+ config: ExtractorConfig, dependencies: ExtractorDependencies) -> str:
"""
Initializes the extractor class and runs the extraction process in the background.
Args:
- config (ExtractorConfig): A Pydantic model with configuration
- parameters for the extractor.
- background_tasks: A FastAPI tool for running tasks in background,
- which allows non-blocking operation of long-running tasks.
extractor_name (str): The name of the extractor that will be used.
+ background_tasks (BackgroundTasks): A FastAPI tool for running tasks in background.
+ config (ExtractorConfig): A Pydantic model with extractor configuration.
+ dependencies(ExtractorDependencies): Dependencies that will be used in extractor.
Returns:
str: Endpoint feedback message with started extractor name.
"""
- cls._config = config
cls._check_is_already_extracting()
- extractor_class = ExtractorFactory.create_extractor(extractor_name)
- background_tasks.add_task(cls.__run_extractor, extractor_class, extractor_name)
+ extractor = ExtractorFactory.create_extractor(extractor_name, config, dependencies)
+ background_tasks.add_task(cls.__run_extractor, extractor, extractor_name)
message = f"'{extractor_name}' started."
return message
@classmethod
- def __run_extractor(cls, extractor: Type[Extractor], extractor_name: str) -> None:
+ def __run_extractor(cls, extractor: Extractor, extractor_name: str) -> None:
"""
Run extraction process and clean after it's done.
@@ -81,10 +78,9 @@ def __run_extractor(cls, extractor: Type[Extractor], extractor_name: str) -> Non
"""
try:
cls._active_extractor = extractor_name
- extractor(cls._config).process()
+ extractor.process()
finally:
cls._active_extractor = None
- cls._config = None
@classmethod
def _check_is_already_extracting(cls) -> None:
diff --git a/extractor_service/app/extractors.py b/extractor_service/app/extractors.py
index 71227f1..4919fd0 100644
--- a/extractor_service/app/extractors.py
+++ b/extractor_service/app/extractors.py
@@ -16,11 +16,11 @@
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with this program. If not, see .
+along with this program. If not, see .
"""
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
@@ -31,35 +31,46 @@
import numpy as np
+from .dependencies import ExtractorDependencies
from .schemas import ExtractorConfig
-from .video_processors import OpenCVVideo
-from .image_processors import OpenCVImage
-from .image_evaluators import InceptionResNetNIMA
+from .video_processors import VideoProcessor
+from .image_processors import ImageProcessor
+from .image_evaluators import ImageEvaluator
logger = logging.getLogger(__name__)
class Extractor(ABC):
"""Abstract class for creating extractors."""
+
class EmptyInputDirectoryError(Exception):
"""Error appear when extractor can't get any input to extraction."""
- def __init__(self, config: ExtractorConfig) -> None:
+ def __init__(self, config: ExtractorConfig,
+ image_processor: Type[ImageProcessor],
+ video_processor: Type[VideoProcessor],
+ image_evaluator_class: Type[ImageEvaluator]) -> None:
"""
Initializes the manager with the given extractor configuration.
Args:
config (ExtractorConfig): A Pydantic model with configuration
parameters for the extractor.
+ image_processor (Type[ImageProcessor]): The class for processing images.
+ video_processor (Type[VideoProcessor]): The class for processing videos.
+ image_evaluator_class (Type[ImageEvaluator]): The class for evaluating images.
"""
self._config = config
+ self._image_processor = image_processor
+ self._video_processor = video_processor
+ self._image_evaluator_class = image_evaluator_class
self._image_evaluator = None
@abstractmethod
def process(self) -> None:
"""Abstract main method for extraction process implementation."""
- def _get_image_evaluator(self) -> InceptionResNetNIMA:
+ def _get_image_evaluator(self) -> ImageEvaluator:
"""
Initializes one of image evaluators (currently NIMA) and
adds it to extractor instance parameters.
@@ -67,10 +78,10 @@ def _get_image_evaluator(self) -> InceptionResNetNIMA:
Returns:
PyIQA: Image evaluator class instance for evaluating images.
"""
- self._image_evaluator = InceptionResNetNIMA(self._config)
+ self._image_evaluator = self._image_evaluator_class(self._config)
return self._image_evaluator
- def _list_input_directory_files(self, extensions: tuple[str],
+ def _list_input_directory_files(self, extensions: tuple[str, ...],
prefix: str | None = None) -> list[Path]:
"""
List all files with given extensions except files with given filename prefix form
@@ -88,8 +99,8 @@ def _list_input_directory_files(self, extensions: tuple[str],
files = [
entry for entry in entries
if entry.is_file()
- and entry.suffix in extensions
- and (prefix is None or not entry.name.startswith(prefix))
+ and entry.suffix in extensions
+ and (prefix is None or not entry.name.startswith(prefix))
]
if not files:
prefix = prefix if prefix else "Prefix not provided"
@@ -101,7 +112,7 @@ def _list_input_directory_files(self, extensions: tuple[str],
)
logger.error(error_massage)
raise self.EmptyInputDirectoryError(error_massage)
- logger.info(f"Directory '%s' files listed.", str(directory))
+ logger.info("Directory '%s' files listed.", str(directory))
logger.debug("Listed file paths: %s", files)
return files
@@ -110,7 +121,7 @@ def _evaluate_images(self, normalized_images: np.ndarray) -> np.array:
Rating all images in provided images batch using already initialized image evaluator.
Args:
- normalized_images (list[np.ndarray]): Already normalized images np.ndarray for evaluating.
+ normalized_images (list[np.ndarray]): Already normalized images for evaluating.
Returns:
np.array: Array with images scores in given images order.
@@ -118,8 +129,7 @@ def _evaluate_images(self, normalized_images: np.ndarray) -> np.array:
scores = np.array(self._image_evaluator.evaluate_images(normalized_images))
return scores
- @staticmethod
- def _read_images(paths: list[Path]) -> list[np.ndarray]:
+ def _read_images(self, paths: list[Path]) -> list[np.ndarray]:
"""
Read all images from given paths synonymously.
@@ -132,7 +142,7 @@ def _read_images(paths: list[Path]) -> list[np.ndarray]:
with ThreadPoolExecutor() as executor:
images = []
futures = [executor.submit(
- OpenCVImage.read_image, path,
+ self._image_processor.read_image, path,
) for path in paths]
for future in futures:
image = future.result()
@@ -149,15 +159,15 @@ def _save_images(self, images: list[np.ndarray]) -> None:
"""
with ThreadPoolExecutor() as executor:
futures = [executor.submit(
- OpenCVImage.save_image, image,
+ self._image_processor.save_image, image,
self._config.output_directory,
self._config.images_output_format
) for image in images]
for future in futures:
future.result()
- @staticmethod
- def _normalize_images(images: list[np.ndarray], target_size: tuple[int, int]) -> np.ndarray:
+ def _normalize_images(self, images: list[np.ndarray],
+ target_size: tuple[int, int]) -> np.ndarray:
"""
Normalize all images in given list to target size for further operations.
@@ -168,7 +178,7 @@ def _normalize_images(images: list[np.ndarray], target_size: tuple[int, int]) ->
Returns:
np.ndarray: All images as a one numpy array.
"""
- normalized_images = OpenCVImage.normalize_images(images, target_size)
+ normalized_images = self._image_processor.normalize_images(images, target_size)
return normalized_images
@staticmethod
@@ -200,22 +210,28 @@ def _signal_readiness_for_shutdown() -> None:
class ExtractorFactory:
"""Extractor factory for getting extractors class by their names."""
+
@staticmethod
- def create_extractor(extractor_name: str) -> Type[Extractor]:
+ def create_extractor(extractor_name: str, config: ExtractorConfig,
+ dependencies: ExtractorDependencies) -> Extractor:
"""
Match extractor class by its name and return its class.
Args:
- extractor_name (str): Name of the extractor that class will be returned.
+ extractor_name (str): Name of the extractor.
+ config (ExtractorConfig): A Pydantic model with extractor configuration.
+ dependencies(ExtractorDependencies): Dependencies that will be used in extractor.
Returns:
Extractor: Chosen extractor class.
"""
match extractor_name:
case "best_frames_extractor":
- return BestFramesExtractor
+ return BestFramesExtractor(config, dependencies.image_processor,
+ dependencies.video_processor, dependencies.evaluator)
case "top_images_extractor":
- return TopImagesExtractor
+ return TopImagesExtractor(config, dependencies.image_processor,
+ dependencies.video_processor, dependencies.evaluator)
case _:
error_massage = f"Provided unknown extractor name: {extractor_name}"
logger.error(error_massage)
@@ -224,6 +240,7 @@ def create_extractor(extractor_name: str) -> Type[Extractor]:
class BestFramesExtractor(Extractor):
"""Extractor for extracting best frames from videos in any input directory."""
+
def process(self) -> None:
"""
Rate all videos in given config input directory and
@@ -249,7 +266,9 @@ def _extract_best_frames(self, video_path: Path) -> None:
Args:
video_path (Path): Path of the video that will be extracted.
"""
- frames_batch_generator = OpenCVVideo.get_next_frames(video_path, self._config.batch_size)
+ frames_batch_generator = self._video_processor.get_next_frames(
+ video_path, self._config.batch_size
+ )
for frames in frames_batch_generator:
if not frames:
continue
@@ -286,7 +305,8 @@ def _get_best_frames(self, frames: list[np.ndarray]) -> list[np.ndarray]:
class TopImagesExtractor(Extractor):
- """Extractor for extracting images that are in top percent of images in config input directory."""
+ """Images extractor for extracting top percent of images in config input directory."""
+
def process(self) -> None:
"""
Rate all images in given config input directory and
diff --git a/extractor_service/app/image_evaluators.py b/extractor_service/app/image_evaluators.py
index 2ae2543..1a917a9 100644
--- a/extractor_service/app/image_evaluators.py
+++ b/extractor_service/app/image_evaluators.py
@@ -26,9 +26,9 @@
import requests
import numpy as np
from tensorflow import convert_to_tensor
-from tensorflow.keras.models import Model
+from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Dropout
-from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
+import tensorflow as tf
from .schemas import ExtractorConfig
@@ -189,35 +189,39 @@ def _get_model_weights(cls) -> Path:
Path: Path to the model weights.
"""
model_weights_directory = cls._config.weights_directory
- logger.info("Searching for model weights in weights directory: %s", model_weights_directory)
+ logger.info("Searching for model weights in weights directory: %s",
+ model_weights_directory)
model_weights_path = Path(model_weights_directory) / cls._config.weights_filename
if not model_weights_path.is_file():
- logger.debug("Can't find model weights in weights directory: %s", model_weights_directory)
+ logger.debug("Can't find model weights in weights directory: %s",
+ model_weights_directory)
cls._download_model_weights(model_weights_path)
else:
- logger.debug(f"Model weights loaded from: {model_weights_path}")
+ logger.debug("Model weights loaded from: %s", model_weights_path)
return model_weights_path
@classmethod
- def _download_model_weights(cls, weights_path: Path) -> None:
+ def _download_model_weights(cls, weights_path: Path, timeout: int = 10) -> None:
"""
Download the model weights from the specified URL.
Args:
weights_path (Path): Path to save the downloaded weights.
+ timeout (int): Timeout for the request in seconds.
Raises:
cls.DownloadingModelWeightsError: If there's an issue downloading the weights.
"""
url = f"{cls._config.weights_repo_url}{cls._config.weights_filename}"
logger.debug("Downloading model weights from ulr: %s", url)
- response = requests.get(url, allow_redirects=True)
+ response = requests.get(url, allow_redirects=True, timeout=timeout)
if response.status_code == 200:
weights_path.parent.mkdir(parents=True, exist_ok=True)
weights_path.write_bytes(response.content)
- logger.debug(f"Model weights downloaded and saved to %s", weights_path)
+ logger.debug("Model weights downloaded and saved to %s", weights_path)
else:
- error_message = f"Failed to download the weights: HTTP status code {response.status_code}"
+ error_message = (f"Failed to download the weights: HTTP status code "
+ f"{response.status_code}")
logger.error(error_message)
raise cls.DownloadingModelWeightsError(error_message)
@@ -251,7 +255,7 @@ def _create_model(cls, model_weights_path: Path) -> Model:
Returns:
Model: NIMA model instance.
"""
- base_model = InceptionResNetV2(
+ base_model = tf.keras.applications.InceptionResNetV2(
input_shape=cls._input_shape, include_top=False,
pooling="avg", weights=None
)
diff --git a/extractor_service/app/image_processors.py b/extractor_service/app/image_processors.py
index 5ea30fb..f75718a 100644
--- a/extractor_service/app/image_processors.py
+++ b/extractor_service/app/image_processors.py
@@ -62,7 +62,7 @@ def save_image(cls, image: np.ndarray, output_directory: Path, output_extension:
@staticmethod
@abstractmethod
- def normalize_images(images: list[np.ndarray], target_size: tuple[int]) -> np.array:
+ def normalize_images(images: list[np.ndarray], target_size: tuple[int, int]) -> np.array:
"""
Resize a batch of images and convert them to a normalized numpy array.
diff --git a/extractor_service/app/schemas.py b/extractor_service/app/schemas.py
index 9460969..df2f484 100644
--- a/extractor_service/app/schemas.py
+++ b/extractor_service/app/schemas.py
@@ -3,7 +3,7 @@
Models:
- ExtractorConfig: Model containing the extractors configuration parameters.
- Message: Model for encapsulating messages returned by the application.
- - ExtractorStatus: Model representing the status of the currently working extractor in the system.
+ - ExtractorStatus: Model representing the status of the working extractor in the system.
LICENSE
=======
Copyright (C) 2024 Bartłomiej Flis
@@ -36,14 +36,14 @@ class ExtractorConfig(BaseModel):
Attributes:
input_directory (DirectoryPath): Input directory path containing entries for extraction.
By default, it sets value for docker container volume.
- output_directory (DirectoryPath): Output directory path where extraction results will be saved.
+ output_directory (DirectoryPath): Output directory path for extraction results.
By default, it sets value for docker container volume.
video_extensions (tuple[str]): Supported videos' extensions in service for reading videos.
images_extensions (tuple[str]): Supported images' extensions in service for reading images.
- processed_video_prefix (str): Prefix that will be added to processed video filename after extraction.
+ processed_video_prefix (str): Prefix will be added to processed video after extraction.
batch_size (int): Maximum number of images processed in a single batch.
- compering_group_size (int): Maximum number of images in a group to compare for finding the best one.
- top_images_percent (float): Percentage threshold to determine the top images based on scores.
+ compering_group_size (int): Images group number to compare for finding the best one.
+ top_images_percent (float): Percentage threshold to determine the top images.
images_output_format (str): Format for saving output images, e.g., '.jpg', '.png'.
target_image_size (tuple[int, int]): Images will be normalized to this size.
weights_directory (Path | str): Directory path where model weights are stored.
diff --git a/extractor_service/app/tests/common.py b/extractor_service/app/tests/common.py
deleted file mode 100644
index 2773964..0000000
--- a/extractor_service/app/tests/common.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Common fixtures for all conftest files."""
-import sys
-from pathlib import Path
-
-import pytest
-
-from app.schemas import ExtractorConfig
-
-common_path = Path(__file__).parent.parent.parent.parent / "common"
-sys.path.insert(0, str(common_path))
-from common import (
- files_dir, best_frames_dir, top_images_dir,
- setup_top_images_extractor_env, setup_best_frames_extractor_env
-)
-
-
-@pytest.fixture(scope="package")
-def config(files_dir, best_frames_dir) -> ExtractorConfig:
- config = ExtractorConfig(
- input_directory=files_dir,
- output_directory=best_frames_dir,
- images_output_format=".jpg",
- video_extensions=(".mp4",),
- processed_video_prefix="done_"
- )
- return config
diff --git a/extractor_service/app/tests/integration/conftest.py b/extractor_service/app/tests/integration/conftest.py
deleted file mode 100644
index aa6207a..0000000
--- a/extractor_service/app/tests/integration/conftest.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import pytest
-
-from ..common import (
- config, files_dir, best_frames_dir, top_images_dir,
- setup_top_images_extractor_env, setup_best_frames_extractor_env
-) # import fixtures from common.py
-from ...extractors import BestFramesExtractor
-
-
-@pytest.fixture
-def extractor(config):
- extractor = BestFramesExtractor(config)
- return extractor
diff --git a/extractor_service/app/tests/unit/conftest.py b/extractor_service/app/tests/unit/conftest.py
deleted file mode 100644
index dbb3281..0000000
--- a/extractor_service/app/tests/unit/conftest.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from app.schemas import ExtractorConfig
-from ..common import config, files_dir, best_frames_dir
diff --git a/extractor_service/app/video_processors.py b/extractor_service/app/video_processors.py
index d270d2b..ee48bfa 100644
--- a/extractor_service/app/video_processors.py
+++ b/extractor_service/app/video_processors.py
@@ -33,8 +33,10 @@
class VideoProcessor(ABC):
"""Abstract class for creating video processors used for managing video operations."""
+ @classmethod
@abstractmethod
- def get_next_frames(self, video_path: Path, batch_size: int) -> Generator[list[np.ndarray], None, None]:
+ def get_next_frames(cls, video_path: Path,
+ batch_size: int) -> Generator[list[np.ndarray], None, None]:
"""
Abstract generator method to generate batches of frames from a video file.
@@ -51,6 +53,7 @@ def get_next_frames(self, video_path: Path, batch_size: int) -> Generator[list[n
class OpenCVVideo(VideoProcessor):
+ """Video processor based on OpenCV with FFMPEG extension."""
class CantOpenVideoCapture(Exception):
"""Exception raised when the video file cannot be opened."""
@@ -84,7 +87,8 @@ def _video_capture(video_path: Path) -> cv2.VideoCapture:
video_cap.release()
@classmethod
- def get_next_frames(cls, video_path: Path, batch_size: int) -> Generator[list[np.ndarray], None, None]:
+ def get_next_frames(cls, video_path: Path,
+ batch_size: int) -> Generator[list[np.ndarray], None, None]:
"""
Generates batches of frames from the specified video using OpenCV.
@@ -99,8 +103,10 @@ def get_next_frames(cls, video_path: Path, batch_size: int) -> Generator[list[np
list[np.ndarray]: A batch of video frames.
"""
with cls._video_capture(video_path) as video:
- frame_rate = cls._get_video_attribute(video, cv2.CAP_PROP_FPS, "frame rate")
- total_frames = cls._get_video_attribute(video, cv2.CAP_PROP_FRAME_COUNT, "total frames")
+ frame_rate = cls._get_video_attribute(
+ video, cv2.CAP_PROP_FPS, "frame rate")
+ total_frames = cls._get_video_attribute(
+ video, cv2.CAP_PROP_FRAME_COUNT, "total frames")
frames_batch = []
logger.info("Getting frames batch...")
for frame_index in range(0, total_frames, frame_rate):
@@ -136,7 +142,8 @@ def _read_next_frame(cls, video: cv2.VideoCapture, frame_index: int) -> np.ndarr
return frame
@classmethod
- def _get_video_attribute(cls, video: cv2.VideoCapture, attribute_id: int, display_name: str) -> int:
+ def _get_video_attribute(cls, video: cv2.VideoCapture,
+ attribute_id: int, display_name: str) -> int:
"""
Retrieves a specified attribute value from the video capture object and validates it.
diff --git a/extractor_service/main.py b/extractor_service/main.py
index 9b9559e..a3ec1b6 100644
--- a/extractor_service/main.py
+++ b/extractor_service/main.py
@@ -24,17 +24,24 @@
along with this program. If not, see .
"""
import logging
+import os
import sys
import uvicorn
-from fastapi import FastAPI, BackgroundTasks
-
-from app.schemas import ExtractorConfig, Message, ExtractorStatus
-from app.extractor_manager import ExtractorManager
-
-logging.basicConfig(level=logging.DEBUG,
- format='%(asctime)s - %(levelname)s - %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S',
+from fastapi import FastAPI, BackgroundTasks, Depends
+
+if os.getenv("DOCKER_ENV"):
+ from app.schemas import ExtractorConfig, Message, ExtractorStatus
+ from app.extractor_manager import ExtractorManager
+ from app.dependencies import ExtractorDependencies, get_extractor_dependencies
+else:
+ from .app.schemas import ExtractorConfig, Message, ExtractorStatus
+ from .app.extractor_manager import ExtractorManager
+ from .app.dependencies import ExtractorDependencies, get_extractor_dependencies
+
+logging.basicConfig(level=logging.INFO,
+ format="%(asctime)s - %(levelname)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)])
logger = logging.getLogger(__name__)
@@ -53,22 +60,26 @@ def get_extractors_status() -> ExtractorStatus:
@app.post("/v2/extractors/{extractor_name}")
-def run_extractor(background_tasks: BackgroundTasks, extractor_name: str,
- config: ExtractorConfig = ExtractorConfig()) -> Message:
+def run_extractor(
+ extractor_name: str,
+ background_tasks: BackgroundTasks,
+ config: ExtractorConfig = ExtractorConfig(),
+ dependencies: ExtractorDependencies = Depends(get_extractor_dependencies)
+) -> Message:
"""
Runs provided extractor.
Args:
- background_tasks (BackgroundTasks): A FastAPI tool for running tasks in background,
- which allows non-blocking operation of long-running tasks.
extractor_name (str): The name of the extractor that will be used.
- config (ExtractorConfig): A Pydantic model with configuration
- parameters for the extractor.
+ background_tasks (BackgroundTasks): A FastAPI tool for running tasks in background.
+ dependencies(ExtractorDependencies): Dependencies that will be used in extractor.
+ config (ExtractorConfig): A Pydantic model with extractor configuration.
Returns:
Message: Contains the operation status.
"""
- message = ExtractorManager.start_extractor(background_tasks, config, extractor_name)
+ message = ExtractorManager.start_extractor(extractor_name, background_tasks,
+ config, dependencies)
return Message(message=message)
diff --git a/poetry.lock b/poetry.lock
index 2e4da0e..250ed31 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -44,6 +44,20 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
trio = ["trio (>=0.23)"]
+[[package]]
+name = "astroid"
+version = "3.2.2"
+description = "An abstract syntax tree for Python with inference support."
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "astroid-3.2.2-py3-none-any.whl", hash = "sha256:e8a0083b4bb28fcffb6207a3bfc9e5d0a68be951dd7e336d5dcf639c682388c0"},
+ {file = "astroid-3.2.2.tar.gz", hash = "sha256:8ead48e31b92b2e217b6c9733a21afafe479d52d6e164dd25fb1a770c7c3cf94"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
+
[[package]]
name = "astunparse"
version = "1.6.3"
@@ -261,6 +275,21 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1
[package.extras]
toml = ["tomli"]
+[[package]]
+name = "dill"
+version = "0.3.8"
+description = "serialize all of Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"},
+ {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"},
+]
+
+[package.extras]
+graph = ["objgraph (>=1.7.2)"]
+profile = ["gprof2dot (>=2022.7.29)"]
+
[[package]]
name = "dnspython"
version = "2.6.1"
@@ -629,6 +658,20 @@ files = [
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
+[[package]]
+name = "isort"
+version = "5.13.2"
+description = "A Python utility / library to sort Python imports."
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"},
+ {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"},
+]
+
+[package.extras]
+colors = ["colorama (>=0.4.6)"]
+
[[package]]
name = "jinja2"
version = "3.1.4"
@@ -792,6 +835,17 @@ files = [
{file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"},
]
+[[package]]
+name = "mccabe"
+version = "0.7.0"
+description = "McCabe checker, plugin for flake8"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
+]
+
[[package]]
name = "mdurl"
version = "0.1.2"
@@ -831,8 +885,8 @@ files = [
[package.dependencies]
numpy = [
- {version = ">=1.23.3", markers = "python_version >= \"3.11\""},
{version = ">=1.21.2", markers = "python_version >= \"3.10\" and python_version < \"3.11\""},
+ {version = ">=1.23.3", markers = "python_version >= \"3.11\""},
]
[package.extras]
@@ -912,9 +966,9 @@ files = [
[package.dependencies]
numpy = [
- {version = ">=1.23.5", markers = "python_version >= \"3.11\""},
{version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""},
{version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""},
+ {version = ">=1.23.5", markers = "python_version >= \"3.11\""},
]
[[package]]
@@ -1061,6 +1115,22 @@ files = [
{file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"},
]
+[[package]]
+name = "platformdirs"
+version = "4.2.2"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"},
+ {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"]
+type = ["mypy (>=1.8)"]
+
[[package]]
name = "pluggy"
version = "1.5.0"
@@ -1220,6 +1290,34 @@ files = [
[package.extras]
windows-terminal = ["colorama (>=0.4.6)"]
+[[package]]
+name = "pylint"
+version = "3.2.2"
+description = "python code static checker"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "pylint-3.2.2-py3-none-any.whl", hash = "sha256:3f8788ab20bb8383e06dd2233e50f8e08949cfd9574804564803441a4946eab4"},
+ {file = "pylint-3.2.2.tar.gz", hash = "sha256:d068ca1dfd735fb92a07d33cb8f288adc0f6bc1287a139ca2425366f7cbe38f8"},
+]
+
+[package.dependencies]
+astroid = ">=3.2.2,<=3.3.0-dev0"
+colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
+dill = [
+ {version = ">=0.2", markers = "python_version < \"3.11\""},
+ {version = ">=0.3.6", markers = "python_version >= \"3.11\""},
+]
+isort = ">=4.2.5,<5.13.0 || >5.13.0,<6"
+mccabe = ">=0.6,<0.8"
+platformdirs = ">=2.2.0"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+tomlkit = ">=0.10.1"
+
+[package.extras]
+spelling = ["pyenchant (>=3.2,<4.0)"]
+testutils = ["gitpython (>3)"]
+
[[package]]
name = "pytest"
version = "8.2.0"
@@ -1639,6 +1737,17 @@ files = [
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
+[[package]]
+name = "tomlkit"
+version = "0.12.5"
+description = "Style preserving TOML library"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomlkit-0.12.5-py3-none-any.whl", hash = "sha256:af914f5a9c59ed9d0762c7b64d3b5d5df007448eb9cd2edc8a46b1eafead172f"},
+ {file = "tomlkit-0.12.5.tar.gz", hash = "sha256:eef34fba39834d4d6b73c9ba7f3e4d1c417a4e56f89a7e96e090dd0d24b8fb3c"},
+]
+
[[package]]
name = "typer"
version = "0.12.3"
@@ -2109,4 +2218,4 @@ files = [
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<3.12"
-content-hash = "cbe22020ea3dea5c64425f8078184d478cd0fb0f99a06589b9174bcef8fa9f68"
+content-hash = "234f30d82590835a3828e9a898fac4bb6bfac0f00406e3849f6174d4fa19c537"
diff --git a/pyproject.toml b/pyproject.toml
index f0d5139..090e2ad 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,6 +20,7 @@ requests = "^2.32.2"
tensorflow = "^2.16.1"
tensorflow-io-gcs-filesystem = "0.31.0"
docker = "^7.1.0"
+pylint = "^3.2.2"
[build-system]
requires = ["poetry-core"]
diff --git a/quick_demo_cpu.bat b/quick_demo_cpu.bat
new file mode 100644
index 0000000..ed63939
--- /dev/null
+++ b/quick_demo_cpu.bat
@@ -0,0 +1,4 @@
+@echo off
+echo Starting demo...
+python start.py best_frames_extractor --cpu
+pause
diff --git a/quick_demo.bat b/quick_demo_gpu.bat
similarity index 100%
rename from quick_demo.bat
rename to quick_demo_gpu.bat
diff --git a/service_manager/docker_manager.py b/service_manager/docker_manager.py
index ad83276..379465c 100644
--- a/service_manager/docker_manager.py
+++ b/service_manager/docker_manager.py
@@ -25,6 +25,7 @@
import subprocess
import sys
import logging
+from typing import Optional
logger = logging.getLogger(__name__)
@@ -38,7 +39,7 @@ class ServiceShutdownSignal(Exception):
"""Exception raised when the service signals it is ready to be shut down."""
def __init__(self, container_name: str, input_dir: str,
- output_dir: str, port: int, force_build: bool) -> None:
+ output_dir: str, port: int, force_build: bool, cpu_only: bool) -> None:
"""
Initialize the DockerManager with specific parameters for container and image management.
@@ -54,10 +55,17 @@ def __init__(self, container_name: str, input_dir: str,
self._output_directory = output_dir
self._port = port
self._force_build = force_build
+ self._cpu_only = cpu_only
self.__log_input()
@property
def image_name(self):
+ """
+ Returns the name of the image.
+
+ Returns:
+ str: The name of the image.
+ """
return self._image_name
def __log_input(self) -> None:
@@ -68,19 +76,31 @@ def __log_input(self) -> None:
logger.debug("Output directory from user: %s", self._output_directory)
logger.debug("Port from user: %s", self._port)
logger.debug("Force build: %s", self._force_build)
+ logger.debug("CPU only: %s", self._cpu_only)
@property
def docker_image_existence(self) -> bool:
+ """
+ Checks if the Docker image exists.
+
+ This property calls a method that checks for the existence of the Docker
+ image associated with this instance.
+
+ Returns:
+ bool: True if the Docker image exists, False otherwise.
+ """
return self._check_image_exists()
def _check_image_exists(self) -> bool:
- """Checks whether the Docker image already exists in the system.
+ """
+ Checks whether the Docker image already exists in the system.
Returns:
bool: True if the image exists, False otherwise.
"""
command = ["docker", "images", "-q", self._image_name]
- process_output = subprocess.run(command, capture_output=True, text=True).stdout.strip()
+ process_output = subprocess.run(command, capture_output=True,
+ text=True, check=True).stdout.strip()
is_exists = process_output != ""
return is_exists
@@ -94,7 +114,7 @@ def build_image(self, dockerfile_path: str) -> None:
if not self.docker_image_existence or self._force_build:
logging.info("Building Docker image...")
command = ["docker", "build", "-t", self._image_name, dockerfile_path]
- subprocess.run(command)
+ subprocess.run(command, check=True)
else:
logger.info("Image is already created. Using existing one.")
@@ -108,7 +128,7 @@ def container_status(self) -> str:
"""
return self._check_container_status()
- def _check_container_status(self) -> str:
+ def _check_container_status(self) -> Optional[str]:
"""
Check the status of the container.
@@ -116,9 +136,10 @@ def _check_container_status(self) -> str:
str: The status of the container.
"""
command = ["docker", "inspect", "--format='{{.State.Status}}'", self._container_name]
- result = subprocess.run(command, capture_output=True, text=True)
+ result = subprocess.run(command, capture_output=True, text=True, check=False)
if result.returncode == 0:
return result.stdout.strip().replace("'", "")
+ return None
def deploy_container(self, container_port: int, container_input_directory: str,
container_output_directory: str) -> None:
@@ -132,19 +153,21 @@ def deploy_container(self, container_port: int, container_input_directory: str,
status = self.container_status
if status is None:
logging.info("No existing container found. Running a new container.")
- self._run_container(container_port, container_input_directory, container_output_directory)
+ self._run_container(container_port, container_input_directory,
+ container_output_directory)
elif self._force_build:
logging.info("Force rebuild initiated.")
if status in ["running", "paused"]:
self._stop_container()
self._delete_container()
- self._run_container(container_port, container_input_directory, container_output_directory)
- elif status == "exited":
+ self._run_container(container_port, container_input_directory,
+ container_output_directory)
+ elif status in ["exited", "created"]:
self._start_container()
elif status == "running":
- logging.info(f"Container is already running.")
+ logging.info("Container is already running.")
else:
- logging.warning(f"Container in unsupported status: %s. Fix container on your own.",
+ logging.warning("Container in unsupported status: %s. Fix container on your own.",
status)
def _start_container(self) -> None:
@@ -165,13 +188,15 @@ def _run_container(self, container_port: int, container_input_directory: str,
"""
logging.info("Running a new container...")
command = [
- "docker", "run", "--name", self._container_name, "--gpus", "all",
+ "docker", "run", "--name", self._container_name,
"--restart", "unless-stopped", "-d",
"-p", f"{self._port}:{container_port}",
"-v", f"{self._input_directory}:{container_input_directory}",
- "-v", f"{self._output_directory}:{container_output_directory}",
- self._image_name
+ "-v", f"{self._output_directory}:{container_output_directory}"
]
+ if not self._cpu_only:
+ command.extend(["--gpus", "all"])
+ command.append(self._image_name)
subprocess.run(command, check=True)
def follow_container_logs(self) -> None:
@@ -195,7 +220,7 @@ def _run_log_process(self) -> subprocess.Popen:
Returns:
subprocess.Popen: The process object for the log following command.
"""
- logger.info(f"Following logs for {self._container_name}...")
+ logger.info("Following logs for %s...", self._container_name)
command = ["docker", "logs", "-f", "--since", "1s", self._container_name]
process = subprocess.Popen(
command, stdout=subprocess.PIPE,
@@ -216,14 +241,14 @@ def __stop_log_process(self, process: subprocess.Popen) -> None:
def _stop_container(self) -> None:
"""Stops the running Docker container."""
- logger.info(f"Stopping container %s...", self._container_name)
+ logger.info("Stopping container %s...", self._container_name)
command = ["docker", "stop", self._container_name]
subprocess.run(command, check=True, capture_output=True)
logger.info("Container stopped.")
def _delete_container(self) -> None:
"""Deletes the Docker container."""
- logger.info(f"Deleting container %s...", self._container_name)
+ logger.info("Deleting container %s...", self._container_name)
command = ["docker", "rm", self._container_name]
subprocess.run(command, check=True, capture_output=True)
logger.info("Container deleted.")
diff --git a/start.py b/start.py
index 24e186c..582bb6c 100644
--- a/start.py
+++ b/start.py
@@ -38,7 +38,8 @@ def main() -> None:
user_input.input_dir,
user_input.output_dir,
user_input.port,
- user_input.build
+ user_input.build,
+ user_input.cpu
)
docker.build_image(Config.dockerfile)
docker.deploy_container(
@@ -75,6 +76,8 @@ def parse_args() -> argparse.Namespace:
parser.add_argument("--all_frames", action="store_true",
help="Returning all frames every second without filtering. "
"For best_frames_extractor - does nothing with others.")
+ parser.add_argument("--cpu", action="store_true",
+ help="Turn on cpu-only mode.")
args = parser.parse_args()
return args
diff --git a/extractor_service/app/tests/__init__.py b/tests/__init__.py
similarity index 100%
rename from extractor_service/app/tests/__init__.py
rename to tests/__init__.py
diff --git a/common/common.py b/tests/common.py
similarity index 79%
rename from common/common.py
rename to tests/common.py
index 875abc2..5c713fe 100644
--- a/common/common.py
+++ b/tests/common.py
@@ -20,22 +20,23 @@ def top_images_dir(files_dir):
return files_dir / "top_images"
-@pytest.fixture(scope="function")
+@pytest.fixture
def setup_top_images_extractor_env(files_dir, top_images_dir) -> tuple[Path, Path]:
assert files_dir.is_dir()
- # found_files = [file for file in files_dir.iterdir() if file.suffix == ".jpg"]
- # assert len(found_files) > 0, "No JPG files found in test directory"
-
if top_images_dir.is_dir():
shutil.rmtree(top_images_dir)
assert not top_images_dir.is_dir(), "Output directory was not removed"
top_images_dir.mkdir()
- return files_dir, top_images_dir
+ yield files_dir, top_images_dir
+
+ gitkeep_file = top_images_dir / ".gitkeep"
+ gitkeep_file.touch()
+ assert gitkeep_file.exists()
-@pytest.fixture(scope="function")
+@pytest.fixture
def setup_best_frames_extractor_env(files_dir, best_frames_dir) -> tuple[Path, Path, Path]:
video_filename = "test_video.mp4"
expected_video_path = files_dir / f"frames_extracted_{video_filename}"
@@ -50,4 +51,8 @@ def setup_best_frames_extractor_env(files_dir, best_frames_dir) -> tuple[Path, P
best_frames_dir.mkdir()
assert best_frames_dir.is_dir(), "Output dir was not created after cleaning."
- return files_dir, best_frames_dir, expected_video_path
+ yield files_dir, best_frames_dir, expected_video_path
+
+ gitkeep_file = best_frames_dir / ".gitkeep"
+ gitkeep_file.touch()
+ assert gitkeep_file.exists()
\ No newline at end of file
diff --git a/extractor_service/app/tests/e2e/__init__.py b/tests/extractor_service/__init__.py
similarity index 100%
rename from extractor_service/app/tests/e2e/__init__.py
rename to tests/extractor_service/__init__.py
diff --git a/tests/extractor_service/common.py b/tests/extractor_service/common.py
new file mode 100644
index 0000000..7cf2dd7
--- /dev/null
+++ b/tests/extractor_service/common.py
@@ -0,0 +1,43 @@
+"""Common fixtures for all conftest files."""
+import pytest
+
+from extractor_service.app.extractors import BestFramesExtractor
+from extractor_service.app.schemas import ExtractorConfig
+from extractor_service.app.dependencies import (
+ ExtractorDependencies, get_image_processor,
+ get_video_processor, get_evaluator
+)
+
+
+@pytest.fixture(scope="package")
+def dependencies():
+ image_processor = get_image_processor()
+ video_processor = get_video_processor()
+ evaluator = get_evaluator()
+
+ return ExtractorDependencies(
+ image_processor=image_processor,
+ video_processor=video_processor,
+ evaluator=evaluator
+ )
+
+
+@pytest.fixture(scope="package")
+def extractor(config, dependencies):
+ extractor = BestFramesExtractor(
+ config, dependencies.image_processor,
+ dependencies.video_processor, dependencies.evaluator
+ )
+ return extractor
+
+
+@pytest.fixture(scope="package")
+def config(files_dir, best_frames_dir) -> ExtractorConfig:
+ config = ExtractorConfig(
+ input_directory=files_dir,
+ output_directory=best_frames_dir,
+ images_output_format=".jpg",
+ video_extensions=(".mp4",),
+ processed_video_prefix="done_"
+ )
+ return config
diff --git a/extractor_service/app/tests/integration/__init__.py b/tests/extractor_service/e2e/__init__.py
similarity index 100%
rename from extractor_service/app/tests/integration/__init__.py
rename to tests/extractor_service/e2e/__init__.py
diff --git a/extractor_service/app/tests/e2e/best_frames_extractor_api_test.py b/tests/extractor_service/e2e/best_frames_extractor_api_test.py
similarity index 100%
rename from extractor_service/app/tests/e2e/best_frames_extractor_api_test.py
rename to tests/extractor_service/e2e/best_frames_extractor_api_test.py
diff --git a/extractor_service/app/tests/e2e/conftest.py b/tests/extractor_service/e2e/conftest.py
similarity index 67%
rename from extractor_service/app/tests/e2e/conftest.py
rename to tests/extractor_service/e2e/conftest.py
index cfd4797..7d51d52 100644
--- a/extractor_service/app/tests/e2e/conftest.py
+++ b/tests/extractor_service/e2e/conftest.py
@@ -1,11 +1,12 @@
from fastapi.testclient import TestClient
import pytest
-from main import app, run_extractor
-from ..common import (
+from tests.extractor_service.common import config
+from tests.common import (
files_dir, best_frames_dir, top_images_dir,
setup_top_images_extractor_env, setup_best_frames_extractor_env
-) # import fixtures from common.py
+)
+from extractor_service.main import app, run_extractor
@pytest.fixture(scope="package")
diff --git a/extractor_service/app/tests/e2e/frames_extractor_test.py b/tests/extractor_service/e2e/frames_extractor_test.py
similarity index 100%
rename from extractor_service/app/tests/e2e/frames_extractor_test.py
rename to tests/extractor_service/e2e/frames_extractor_test.py
diff --git a/extractor_service/app/tests/e2e/top_images_extractor_api_test.py b/tests/extractor_service/e2e/top_images_extractor_api_test.py
similarity index 100%
rename from extractor_service/app/tests/e2e/top_images_extractor_api_test.py
rename to tests/extractor_service/e2e/top_images_extractor_api_test.py
diff --git a/extractor_service/app/tests/unit/__init__.py b/tests/extractor_service/integration/__init__.py
similarity index 100%
rename from extractor_service/app/tests/unit/__init__.py
rename to tests/extractor_service/integration/__init__.py
diff --git a/extractor_service/app/tests/integration/best_frames_extrator_test.py b/tests/extractor_service/integration/best_frames_extrator_test.py
similarity index 65%
rename from extractor_service/app/tests/integration/best_frames_extrator_test.py
rename to tests/extractor_service/integration/best_frames_extrator_test.py
index 57d6fcc..ae3d2ee 100644
--- a/extractor_service/app/tests/integration/best_frames_extrator_test.py
+++ b/tests/extractor_service/integration/best_frames_extrator_test.py
@@ -1,14 +1,15 @@
import pytest
-from app.extractors import BestFramesExtractor
-from app.schemas import ExtractorConfig
+from extractor_service.app.extractors import BestFramesExtractor
+from extractor_service.app.schemas import ExtractorConfig
# @pytest.mark.skip(reason="Test time-consuming and dependent on hardware performance")
-def test_best_frames_extractor(setup_best_frames_extractor_env):
+def test_best_frames_extractor(setup_best_frames_extractor_env, dependencies):
input_directory, output_directory, expected_video_path = setup_best_frames_extractor_env
config = ExtractorConfig(input_directory=input_directory, output_directory=output_directory)
- extractor = BestFramesExtractor(config)
+ extractor = BestFramesExtractor(config, dependencies.image_processor,
+ dependencies.video_processor, dependencies.evaluator)
extractor.process()
found_best_frame_files = [
diff --git a/tests/extractor_service/integration/conftest.py b/tests/extractor_service/integration/conftest.py
new file mode 100644
index 0000000..fc2e724
--- /dev/null
+++ b/tests/extractor_service/integration/conftest.py
@@ -0,0 +1,8 @@
+import pytest
+
+from tests.extractor_service.common import extractor, config, dependencies
+from tests.common import (
+ files_dir, best_frames_dir, top_images_dir,
+ setup_top_images_extractor_env, setup_best_frames_extractor_env
+)
+from extractor_service.app.extractors import BestFramesExtractor
diff --git a/extractor_service/app/tests/integration/extractor_and_evaluator_integration_test.py b/tests/extractor_service/integration/extractor_and_evaluator_integration_test.py
similarity index 80%
rename from extractor_service/app/tests/integration/extractor_and_evaluator_integration_test.py
rename to tests/extractor_service/integration/extractor_and_evaluator_integration_test.py
index 541e562..3b8b5c1 100644
--- a/extractor_service/app/tests/integration/extractor_and_evaluator_integration_test.py
+++ b/tests/extractor_service/integration/extractor_and_evaluator_integration_test.py
@@ -1,8 +1,8 @@
import numpy as np
import pytest
-from tensorflow.keras.models import Model
+from tensorflow.keras import Model
-from app.image_evaluators import InceptionResNetNIMA
+from extractor_service.app.image_evaluators import InceptionResNetNIMA
@pytest.mark.order(1) # this test must be first because of hugging face limitations
@@ -14,8 +14,8 @@ def test_get_image_evaluator_download_weights_and_create_model(extractor, config
evaluator = extractor._get_image_evaluator()
- isinstance(evaluator, InceptionResNetNIMA)
- isinstance(evaluator._model, Model)
+ assert isinstance(evaluator, InceptionResNetNIMA)
+ assert isinstance(evaluator._model, Model)
assert weights_path.exists()
diff --git a/extractor_service/app/tests/integration/extractor_and_image_processor_integration_test.py b/tests/extractor_service/integration/extractor_and_image_processor_integration_test.py
similarity index 100%
rename from extractor_service/app/tests/integration/extractor_and_image_processor_integration_test.py
rename to tests/extractor_service/integration/extractor_and_image_processor_integration_test.py
diff --git a/extractor_service/app/tests/integration/extractor_and_video_processor_integration_test.py b/tests/extractor_service/integration/extractor_and_video_processor_integration_test.py
similarity index 100%
rename from extractor_service/app/tests/integration/extractor_and_video_processor_integration_test.py
rename to tests/extractor_service/integration/extractor_and_video_processor_integration_test.py
diff --git a/extractor_service/app/tests/integration/manager_and_fastapi_integration_test.py b/tests/extractor_service/integration/manager_and_fastapi_integration_test.py
similarity index 50%
rename from extractor_service/app/tests/integration/manager_and_fastapi_integration_test.py
rename to tests/extractor_service/integration/manager_and_fastapi_integration_test.py
index 1bac042..0b72f71 100644
--- a/extractor_service/app/tests/integration/manager_and_fastapi_integration_test.py
+++ b/tests/extractor_service/integration/manager_and_fastapi_integration_test.py
@@ -1,19 +1,17 @@
from fastapi import BackgroundTasks
from starlette.testclient import TestClient
-from app.extractor_manager import ExtractorManager
-from app.schemas import ExtractorConfig
-from main import app
+from extractor_service.app.extractor_manager import ExtractorManager
+from extractor_service.main import app
client = TestClient(app)
-def test_extractor_start_and_stop():
+def test_extractor_start_and_stop(config, dependencies):
extractor_name = "best_frames_extractor"
background_tasks = BackgroundTasks()
- config = ExtractorConfig(parameters="example_parameters")
- response = ExtractorManager.start_extractor(background_tasks, config, extractor_name)
+ response = ExtractorManager.start_extractor(extractor_name, background_tasks, config, dependencies)
assert response == f"'{extractor_name}' started."
assert ExtractorManager.get_active_extractor() is None
diff --git a/extractor_service/app/tests/integration/top_images_extractor_test.py b/tests/extractor_service/integration/top_images_extractor_test.py
similarity index 60%
rename from extractor_service/app/tests/integration/top_images_extractor_test.py
rename to tests/extractor_service/integration/top_images_extractor_test.py
index b0dcdcb..5f0112e 100644
--- a/extractor_service/app/tests/integration/top_images_extractor_test.py
+++ b/tests/extractor_service/integration/top_images_extractor_test.py
@@ -1,13 +1,14 @@
-from app.extractors import TopImagesExtractor
-from app.schemas import ExtractorConfig
+from extractor_service.app.extractors import TopImagesExtractor
+from extractor_service.app.schemas import ExtractorConfig
# @pytest.mark.skip(reason="Test time-consuming and dependent on hardware performance")
-def test_top_frames_extractor(setup_top_images_extractor_env):
+def test_top_frames_extractor(setup_top_images_extractor_env, dependencies):
input_directory, output_directory = setup_top_images_extractor_env
config = ExtractorConfig(input_directory=input_directory, output_directory=output_directory)
- selector = TopImagesExtractor(config)
+ selector = TopImagesExtractor(config, dependencies.image_processor,
+ dependencies.video_processor, dependencies.evaluator)
selector.process()
found_top_frame_files = [
diff --git a/service_manager/tests/__init__.py b/tests/extractor_service/unit/__init__.py
similarity index 100%
rename from service_manager/tests/__init__.py
rename to tests/extractor_service/unit/__init__.py
diff --git a/extractor_service/app/tests/unit/best_frames_extractor_test.py b/tests/extractor_service/unit/best_frames_extractor_test.py
similarity index 92%
rename from extractor_service/app/tests/unit/best_frames_extractor_test.py
rename to tests/extractor_service/unit/best_frames_extractor_test.py
index 36d49cc..9881a72 100644
--- a/extractor_service/app/tests/unit/best_frames_extractor_test.py
+++ b/tests/extractor_service/unit/best_frames_extractor_test.py
@@ -5,8 +5,10 @@
import numpy as np
import pytest
-from app.extractors import BestFramesExtractor
-from app.video_processors import OpenCVVideo
+from extractor_service.app.extractors import BestFramesExtractor
+from extractor_service.app.image_evaluators import InceptionResNetNIMA
+from extractor_service.app.image_processors import OpenCVImage
+from extractor_service.app.video_processors import OpenCVVideo
@pytest.fixture
@@ -18,7 +20,9 @@ def all_frames_extractor(extractor):
@pytest.fixture(scope="function")
def extractor(config):
- extractor = BestFramesExtractor(config)
+ extractor = BestFramesExtractor(
+ config, OpenCVImage, OpenCVVideo, InceptionResNetNIMA
+ )
return extractor
@@ -73,7 +77,7 @@ def test_process_if_all_frames(extractor, caplog, config, all_frames_extractor):
assert f"Starting frames extraction process from '{config.input_directory}'." in caplog.text
-@patch("app.extractors.gc.collect")
+@patch("extractor_service.app.extractors.gc.collect")
@patch.object(BestFramesExtractor, "_get_best_frames")
@patch.object(BestFramesExtractor, "_save_images")
@patch.object(OpenCVVideo, "get_next_frames")
@@ -97,7 +101,7 @@ def test_extract_best_frames(mock_generator, mock_save, mock_get, mock_collect,
assert mock_collect.call_count == 2
-@patch("app.extractors.gc.collect")
+@patch("extractor_service.app.extractors.gc.collect")
@patch.object(BestFramesExtractor, "_get_best_frames")
@patch.object(BestFramesExtractor, "_save_images")
@patch.object(OpenCVVideo, "get_next_frames")
diff --git a/tests/extractor_service/unit/conftest.py b/tests/extractor_service/unit/conftest.py
new file mode 100644
index 0000000..4cecfdf
--- /dev/null
+++ b/tests/extractor_service/unit/conftest.py
@@ -0,0 +1,6 @@
+import pytest
+
+from extractor_service.app.extractors import BestFramesExtractor
+from extractor_service.app.schemas import ExtractorConfig
+from tests.extractor_service.common import extractor, config, dependencies
+from tests.common import files_dir, best_frames_dir
diff --git a/tests/extractor_service/unit/dependencies_test.py b/tests/extractor_service/unit/dependencies_test.py
new file mode 100644
index 0000000..77f3f4b
--- /dev/null
+++ b/tests/extractor_service/unit/dependencies_test.py
@@ -0,0 +1,32 @@
+from extractor_service.app.image_processors import OpenCVImage
+from extractor_service.app.video_processors import OpenCVVideo
+from extractor_service.app.image_evaluators import InceptionResNetNIMA
+from extractor_service.app.dependencies import (
+ get_image_processor, get_video_processor,
+ get_evaluator, get_extractor_dependencies, ExtractorDependencies
+)
+
+
+def test_get_image_processor():
+ assert get_image_processor() == OpenCVImage
+
+
+def test_get_video_processor():
+ assert get_video_processor() == OpenCVVideo
+
+
+def test_get_evaluator():
+ assert get_evaluator() == InceptionResNetNIMA
+
+
+def test_get_extractor_dependencies():
+ dependencies = get_extractor_dependencies(
+ image_processor=get_image_processor(),
+ video_processor=get_video_processor(),
+ evaluator=get_evaluator()
+ )
+
+ assert isinstance(dependencies, ExtractorDependencies)
+ assert dependencies.image_processor == OpenCVImage
+ assert dependencies.video_processor == OpenCVVideo
+ assert dependencies.evaluator == InceptionResNetNIMA
diff --git a/extractor_service/app/tests/unit/extractor_manager_test.py b/tests/extractor_service/unit/extractor_manager_test.py
similarity index 75%
rename from extractor_service/app/tests/unit/extractor_manager_test.py
rename to tests/extractor_service/unit/extractor_manager_test.py
index 69a84f2..d2d0449 100644
--- a/extractor_service/app/tests/unit/extractor_manager_test.py
+++ b/tests/extractor_service/unit/extractor_manager_test.py
@@ -3,8 +3,9 @@
import pytest
from fastapi import HTTPException, BackgroundTasks
-from app.extractor_manager import ExtractorManager
-from app.extractors import ExtractorFactory
+
+from extractor_service.app.extractor_manager import ExtractorManager
+from extractor_service.app.extractors import ExtractorFactory
def test_get_active_extractor():
@@ -13,34 +14,32 @@ def test_get_active_extractor():
@patch.object(ExtractorFactory, "create_extractor")
@patch.object(ExtractorManager, "_check_is_already_extracting")
-def test_start_extractor(mock_checking, mock_create_extractor, config):
+def test_start_extractor(mock_checking, mock_create_extractor, config, dependencies):
extractor_name = "some_extractor"
- mock_extractor_class = MagicMock()
+ mock_extractor = MagicMock()
mock_background_tasks = MagicMock(spec=BackgroundTasks)
- mock_create_extractor.return_value = mock_extractor_class
+ mock_create_extractor.return_value = mock_extractor
- message = ExtractorManager.start_extractor(mock_background_tasks, config, extractor_name)
+ message = ExtractorManager.start_extractor(extractor_name, mock_background_tasks, config, dependencies)
mock_checking.assert_called_once()
- mock_create_extractor.assert_called_once_with(extractor_name)
+ mock_create_extractor.assert_called_once_with(extractor_name, config, dependencies)
mock_background_tasks.add_task.assert_called_once_with(
ExtractorManager._ExtractorManager__run_extractor,
- mock_extractor_class,
+ mock_extractor,
extractor_name
)
expected_message = f"'{extractor_name}' started."
assert message == expected_message, "The return message does not match expected."
-@patch("app.extractors.BestFramesExtractor")
+@patch("extractor_service.app.extractors.BestFramesExtractor")
def test_run_extractor(mock_extractor):
extractor_name = "some_extractor"
- mock_extractor.return_value.process = MagicMock()
- mock_extractor.__name__ = MagicMock()
ExtractorManager._ExtractorManager__run_extractor(mock_extractor, extractor_name)
- mock_extractor.assert_called_once()
+ mock_extractor.process.assert_called_once()
def test_check_is_already_evaluating_true():
diff --git a/extractor_service/app/tests/unit/extractor_test.py b/tests/extractor_service/unit/extractor_test.py
similarity index 71%
rename from extractor_service/app/tests/unit/extractor_test.py
rename to tests/extractor_service/unit/extractor_test.py
index 3844226..ca86994 100644
--- a/extractor_service/app/tests/unit/extractor_test.py
+++ b/tests/extractor_service/unit/extractor_test.py
@@ -5,36 +5,35 @@
import numpy as np
import pytest
-from app.image_processors import OpenCVImage
-from app.extractors import (Extractor,
- ExtractorFactory,
- BestFramesExtractor,
- TopImagesExtractor)
-
-
-def test_extractor_initialization(config):
- extractor = BestFramesExtractor(config)
- assert extractor is not None
+from extractor_service.app.image_processors import OpenCVImage
+from extractor_service.app.video_processors import OpenCVVideo
+from extractor_service.app.image_evaluators import InceptionResNetNIMA
+from extractor_service.app.extractors import (ExtractorFactory,
+ BestFramesExtractor,
+ TopImagesExtractor)
+
+
+def test_extractor_initialization(config, dependencies):
+ extractor = BestFramesExtractor(
+ config, dependencies.image_processor,
+ dependencies.video_processor, dependencies.evaluator
+ )
+ assert extractor
assert extractor._config == config
assert extractor._image_evaluator is None
-@pytest.fixture
-def extractor(config):
- return BestFramesExtractor(config)
-
-
-@patch("app.extractors.InceptionResNetNIMA")
-def test_get_image_evaluator(mock_evaluator, extractor, config):
- expected_evaluator = MagicMock()
- mock_evaluator.return_value = expected_evaluator
+def test_get_image_evaluator(extractor, config):
+ expected = "value"
+ mock_class = MagicMock(return_value=expected)
+ extractor._image_evaluator_class = mock_class
result = extractor._get_image_evaluator()
- mock_evaluator.assert_called_once_with(config)
- assert result == expected_evaluator, \
+ mock_class.assert_called_once_with(config)
+ assert result == expected, \
"The method did not return the correct ImageEvaluator instance."
- assert extractor._image_evaluator == expected_evaluator, \
+ assert extractor._image_evaluator == expected, \
"The ImageEvaluator instance was not stored correctly in the extractor."
@@ -52,8 +51,8 @@ def test_evaluate_images(extractor):
@pytest.mark.parametrize("image", ("some_image", None))
-@patch("app.extractors.OpenCVImage.read_image", return_value=None)
-@patch("app.extractors.ThreadPoolExecutor")
+@patch.object(OpenCVImage, "read_image", return_value=None)
+@patch("extractor_service.app.extractors.ThreadPoolExecutor")
def test_read_images(mock_executor, mock_read_image, image, extractor):
mock_paths = [MagicMock(spec=Path) for _ in range(3)]
mock_executor.return_value.__enter__.return_value = mock_executor
@@ -74,14 +73,14 @@ def test_read_images(mock_executor, mock_read_image, image, extractor):
assert not result
-@patch("app.extractors.OpenCVImage.save_image", return_value=None)
-@patch("app.extractors.ThreadPoolExecutor")
+@patch.object(OpenCVImage, "read_image", return_value=None)
+@patch("extractor_service.app.extractors.ThreadPoolExecutor")
def test_save_images(mock_executor, mock_save_image, extractor, config):
images = [MagicMock(spec=np.ndarray) for _ in range(3)]
mock_executor.return_value.__enter__.return_value = mock_executor
mock_executor.submit.return_value.result.return_value = None
calls = [
- ((mock_save_image, image, config.output_directory, config.images_output_format),)
+ ((OpenCVImage.save_image, image, config.output_directory, config.images_output_format),)
for image in images
]
@@ -114,7 +113,7 @@ def test_list_input_directory_files(mock_is_file, mock_iterdir, extractor, caplo
assert result == mock_files
assert f"Directory '{config.input_directory}' files listed." in caplog.text
- assert f"Listed file paths: {mock_files}"
+ assert f"Listed file paths: {mock_files}" in caplog.text
@patch.object(Path, "iterdir")
@@ -138,8 +137,8 @@ def test_list_input_directory_files_no_files_found(mock_iterdir, extractor, capl
def test_add_prefix(extractor, caplog):
test_prefix = "prefix_"
- test_path = Path("test_path\\file.mp4")
- test_new_path = Path("test_path\\prefix_file.mp4")
+ test_path = Path("test_path/file.mp4")
+ test_new_path = Path("test_path/prefix_file.mp4")
expected_massage = f"Prefix '{test_prefix}' added to file '{test_path}'. New path: {test_new_path}"
with patch("pathlib.Path.rename") as mock_rename, \
@@ -157,17 +156,21 @@ def test_signal_readiness_for_shutdown(extractor, caplog):
assert "Service ready for shutdown" in caplog.text
-def test_create_extractor_known_extractors():
- assert ExtractorFactory.create_extractor("best_frames_extractor") is BestFramesExtractor
- assert ExtractorFactory.create_extractor("top_images_extractor") is TopImagesExtractor
+@pytest.mark.parametrize("extractor_name, extractor", (
+ ("best_frames_extractor", BestFramesExtractor),
+ ("top_images_extractor", TopImagesExtractor)
+))
+def test_create_extractor_known_extractors(extractor_name, extractor, config, dependencies):
+ extractor_instance = ExtractorFactory.create_extractor(extractor_name, config, dependencies)
+ assert isinstance(extractor_instance, extractor)
-def test_create_extractor_unknown_extractor_raises(caplog):
+def test_create_extractor_unknown_extractor_raises(caplog, config, dependencies):
unknown_extractor_name = "unknown_extractor"
expected_massage = f"Provided unknown extractor name: {unknown_extractor_name}"
with pytest.raises(ValueError, match=expected_massage), \
caplog.at_level(logging.ERROR):
- ExtractorFactory.create_extractor(unknown_extractor_name)
+ ExtractorFactory.create_extractor(unknown_extractor_name, config, dependencies)
assert expected_massage in caplog.text
diff --git a/extractor_service/app/tests/unit/image_evaluators_test.py b/tests/extractor_service/unit/image_evaluators_test.py
similarity index 95%
rename from extractor_service/app/tests/unit/image_evaluators_test.py
rename to tests/extractor_service/unit/image_evaluators_test.py
index 21a8c18..81c3f5f 100644
--- a/extractor_service/app/tests/unit/image_evaluators_test.py
+++ b/tests/extractor_service/unit/image_evaluators_test.py
@@ -4,8 +4,7 @@
import numpy as np
import pytest
-from app.image_evaluators import InceptionResNetNIMA, _ResNetModel
-from app.image_processors import OpenCVImage
+from extractor_service.app.image_evaluators import InceptionResNetNIMA, _ResNetModel
@pytest.fixture
@@ -26,7 +25,7 @@ def test_evaluator_initialization(mock_get_model, config):
assert instance._model == test_model
-@patch("app.image_evaluators.convert_to_tensor")
+@patch("extractor_service.app.image_evaluators.convert_to_tensor")
@patch.object(InceptionResNetNIMA, "_calculate_weighted_mean")
@patch.object(InceptionResNetNIMA, "_check_scores")
def test_evaluate_images(mock_check, mock_calculate, mock_convert_to_tensor, evaluator, caplog):
diff --git a/extractor_service/app/tests/unit/image_processors_test.py b/tests/extractor_service/unit/image_processors_test.py
similarity index 97%
rename from extractor_service/app/tests/unit/image_processors_test.py
rename to tests/extractor_service/unit/image_processors_test.py
index f22e30c..55a4e2c 100644
--- a/extractor_service/app/tests/unit/image_processors_test.py
+++ b/tests/extractor_service/unit/image_processors_test.py
@@ -6,7 +6,7 @@
import cv2
import numpy as np
-from app.image_processors import OpenCVImage
+from extractor_service.app.image_processors import OpenCVImage
@patch.object(cv2, "imread")
diff --git a/extractor_service/app/tests/unit/nima_models_test.py b/tests/extractor_service/unit/nima_models_test.py
similarity index 87%
rename from extractor_service/app/tests/unit/nima_models_test.py
rename to tests/extractor_service/unit/nima_models_test.py
index a1d4ec7..1f25ace 100644
--- a/extractor_service/app/tests/unit/nima_models_test.py
+++ b/tests/extractor_service/unit/nima_models_test.py
@@ -5,7 +5,7 @@
import numpy as np
import pytest
-from app.image_evaluators import _ResNetModel
+from extractor_service.app.image_evaluators import _ResNetModel
@pytest.fixture(autouse=True)
@@ -21,10 +21,10 @@ def test_get_prediction_weights():
assert result is _ResNetModel._prediction_weights
-@patch("app.image_evaluators.InceptionResNetV2")
-@patch("app.image_evaluators.Dropout")
-@patch("app.image_evaluators.Dense")
-@patch("app.image_evaluators.Model")
+@patch("extractor_service.app.image_evaluators.tf.keras.applications.InceptionResNetV2")
+@patch("extractor_service.app.image_evaluators.Dropout")
+@patch("extractor_service.app.image_evaluators.Dense")
+@patch("extractor_service.app.image_evaluators.Model")
def test_create_model(mock_model, mock_dense, mock_dropout, mock_resnet, caplog):
model_weights_path = Path("/fake/path/to/weights.h5")
model_inputs = "mock_input"
@@ -64,7 +64,7 @@ def test_class_arguments():
assert model._model is None
assert list(model._prediction_weights) == list(np.arange(1, 11))
assert model._input_shape == (224, 224, 3)
- assert model._dropout_rate == 0.75
+ assert np.isclose(model._dropout_rate, 0.75, rtol=1e-9)
assert model._num_classes == 10
@@ -133,13 +133,14 @@ def test_get_model_weights(mock_download, mock_is_file, file_exists, caplog):
@pytest.mark.parametrize("status_code", (200, 404))
@patch.object(Path, "write_bytes")
-@patch("app.image_evaluators.requests.get")
+@patch("extractor_service.app.image_evaluators.requests.get")
@patch.object(Path, "mkdir")
def test_download_model_weights_success(mock_mkdir, mock_get, mock_write_bytes, status_code, caplog):
- test_url = "http://example.com/weights.h5"
+ test_url = "https://example.com/weights.h5"
test_path = Path("/fake/path/to/weights.h5")
- _ResNetModel._config = MagicMock(weights_repo_url="http://example.com/", weights_filename="weights.h5")
+ _ResNetModel._config = MagicMock(weights_repo_url="https://example.com/", weights_filename="weights.h5")
weights_data = b"weights data"
+ timeout = 12
mock_response = MagicMock()
mock_response.status_code = status_code
@@ -148,7 +149,7 @@ def test_download_model_weights_success(mock_mkdir, mock_get, mock_write_bytes,
if status_code == 200:
with caplog.at_level(logging.DEBUG):
- _ResNetModel._download_model_weights(test_path)
+ _ResNetModel._download_model_weights(test_path, timeout)
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_write_bytes.assert_called_once_with(weights_data)
assert f"Model weights downloaded and saved to {test_path}" in caplog.text
@@ -156,7 +157,7 @@ def test_download_model_weights_success(mock_mkdir, mock_get, mock_write_bytes,
error_message = f"Failed to download the weights: HTTP status code {status_code}"
with caplog.at_level(logging.DEBUG), \
pytest.raises(_ResNetModel.DownloadingModelWeightsError, match=error_message):
- _ResNetModel._download_model_weights(test_path)
+ _ResNetModel._download_model_weights(test_path, timeout)
assert "Failed to download the weights: HTTP status code 404" in caplog.text
assert f"Downloading model weights from ulr: {test_url}" in caplog.text
- mock_get.assert_called_once_with(test_url, allow_redirects=True)
+ mock_get.assert_called_once_with(test_url, allow_redirects=True, timeout=timeout)
diff --git a/extractor_service/app/tests/unit/schemas_test.py b/tests/extractor_service/unit/schemas_test.py
similarity index 95%
rename from extractor_service/app/tests/unit/schemas_test.py
rename to tests/extractor_service/unit/schemas_test.py
index a8ffe3e..796e923 100644
--- a/extractor_service/app/tests/unit/schemas_test.py
+++ b/tests/extractor_service/unit/schemas_test.py
@@ -4,7 +4,7 @@
import pytest
from pydantic import ValidationError
-from app.schemas import ExtractorConfig, Message, ExtractorStatus
+from extractor_service.app.schemas import ExtractorConfig, Message, ExtractorStatus
def test_config_default():
diff --git a/extractor_service/app/tests/unit/top_images_extractor_test.py b/tests/extractor_service/unit/top_images_extractor_test.py
similarity index 87%
rename from extractor_service/app/tests/unit/top_images_extractor_test.py
rename to tests/extractor_service/unit/top_images_extractor_test.py
index b35bfed..f48f5ea 100644
--- a/extractor_service/app/tests/unit/top_images_extractor_test.py
+++ b/tests/extractor_service/unit/top_images_extractor_test.py
@@ -4,13 +4,17 @@
import numpy as np
import pytest
-from app.extractors import TopImagesExtractor
-from app.image_processors import OpenCVImage
+from extractor_service.app.extractors import TopImagesExtractor
+from extractor_service.app.image_evaluators import InceptionResNetNIMA
+from extractor_service.app.image_processors import OpenCVImage
+from extractor_service.app.video_processors import OpenCVVideo
@pytest.fixture()
def extractor(config):
- extractor = TopImagesExtractor(config)
+ extractor = TopImagesExtractor(
+ config, OpenCVImage, OpenCVVideo, InceptionResNetNIMA
+ )
return extractor
diff --git a/extractor_service/app/tests/unit/video_processors_test.py b/tests/extractor_service/unit/video_processors_test.py
similarity index 92%
rename from extractor_service/app/tests/unit/video_processors_test.py
rename to tests/extractor_service/unit/video_processors_test.py
index a76b69b..a7df3ea 100644
--- a/extractor_service/app/tests/unit/video_processors_test.py
+++ b/tests/extractor_service/unit/video_processors_test.py
@@ -5,7 +5,9 @@
import cv2
import pytest
-from app.video_processors import OpenCVVideo
+from extractor_service.app.video_processors import OpenCVVideo
+
+TOTAL_FRAMES_ATTR = "total frames"
@patch.object(cv2, "VideoCapture")
@@ -30,6 +32,7 @@ def test_get_video_capture_failure(mock_cap):
with pytest.raises(OpenCVVideo.CantOpenVideoCapture):
with OpenCVVideo._video_capture(test_path):
+ # No additional operations are needed here, we are just testing the exception
pass
mock_video.release.assert_called_once()
@@ -53,11 +56,12 @@ def mock_video():
@patch.object(OpenCVVideo, '_read_next_frame')
def test_get_next_video_frames(mock_read, mock_get_attribute, mock_video_cap,
batch_size, expected_num_batches, caplog):
+ frame_rate_attr = "frame rate"
video_path = MagicMock()
mock_video = MagicMock()
frames_number = 3
mock_get_attribute.side_effect = lambda video, attribute_id, value_name: \
- frames_number if "total frames" in value_name else 1
+ frames_number if TOTAL_FRAMES_ATTR in value_name else 1
mock_video_cap.return_value.__enter__.return_value = mock_video
mock_read.side_effect = lambda video, idx: f"frame{idx // 30}"
@@ -70,8 +74,8 @@ def test_get_next_video_frames(mock_read, mock_get_attribute, mock_video_cap,
assert len(batch) <= batch_size, "Batch size is larger than expected"
assert mock_video_cap.called
assert mock_get_attribute.call_count == 2
- mock_get_attribute.assert_any_call(mock_video, cv2.CAP_PROP_FPS, "frame rate")
- mock_get_attribute.assert_any_call(mock_video, cv2.CAP_PROP_FRAME_COUNT, "total frames")
+ mock_get_attribute.assert_any_call(mock_video, cv2.CAP_PROP_FPS, frame_rate_attr)
+ mock_get_attribute.assert_any_call(mock_video, cv2.CAP_PROP_FRAME_COUNT, TOTAL_FRAMES_ATTR)
assert mock_read.call_count == 3
assert "Frame appended to frames batch." in caplog.text
@@ -103,7 +107,7 @@ def test_read_next_frame(mock_check_cap, read_return, caplog):
def test_get_video_attribute(mock_check_cap, caplog):
mock_cap = MagicMock(spec=cv2.VideoCapture)
attribute_id = cv2.CAP_PROP_FRAME_COUNT
- value_name = "total frames"
+ value_name = TOTAL_FRAMES_ATTR
total_frames = 24.6
mock_cap.get.return_value = total_frames
@@ -119,7 +123,7 @@ def test_get_video_attribute(mock_check_cap, caplog):
def test_get_video_attribute_invalid(mock_check_cap, caplog):
mock_cap = MagicMock(spec=cv2.VideoCapture)
attribute_id = cv2.CAP_PROP_FRAME_COUNT
- value_name = "total frames"
+ value_name = TOTAL_FRAMES_ATTR
total_frames = -24.6
mock_cap.get.return_value = total_frames
expected_message = f"Invalid {value_name} retrieved: {total_frames}."
diff --git a/service_manager/tests/e2e/__init__.py b/tests/service_manager/__init__.py
similarity index 100%
rename from service_manager/tests/e2e/__init__.py
rename to tests/service_manager/__init__.py
diff --git a/service_manager/tests/integration/__init__.py b/tests/service_manager/e2e/__init__.py
similarity index 100%
rename from service_manager/tests/integration/__init__.py
rename to tests/service_manager/e2e/__init__.py
diff --git a/service_manager/tests/e2e/best_frames_extractor_test.py b/tests/service_manager/e2e/best_frames_extractor_test.py
similarity index 84%
rename from service_manager/tests/e2e/best_frames_extractor_test.py
rename to tests/service_manager/e2e/best_frames_extractor_test.py
index 9c6fa6e..fad7f05 100644
--- a/service_manager/tests/e2e/best_frames_extractor_test.py
+++ b/tests/service_manager/e2e/best_frames_extractor_test.py
@@ -1,14 +1,18 @@
import subprocess
import sys
+import pytest
+import os
+@pytest.mark.skipif("CI" in os.environ, reason="Test skipped in GitHub Actions.")
def test_best_frames_extractor(setup_best_frames_extractor_env, start_script_path):
input_directory, output_directory, expected_video_path = setup_best_frames_extractor_env
command = [
sys.executable, str(start_script_path), "best_frames_extractor",
"--input_dir", str(input_directory),
"--output_dir", str(output_directory),
- "--build"
+ "--build",
+ "--cpu"
]
subprocess.run(command)
diff --git a/service_manager/tests/e2e/conftest.py b/tests/service_manager/e2e/conftest.py
similarity index 72%
rename from service_manager/tests/e2e/conftest.py
rename to tests/service_manager/e2e/conftest.py
index 7a7b380..04b2ec8 100644
--- a/service_manager/tests/e2e/conftest.py
+++ b/tests/service_manager/e2e/conftest.py
@@ -1,11 +1,8 @@
-import sys
from pathlib import Path
import pytest
-common_path = Path(__file__).parent.parent.parent.parent / "common"
-sys.path.insert(0, str(common_path))
-from common import (
+from tests.common import (
files_dir, best_frames_dir, top_images_dir,
setup_top_images_extractor_env, setup_best_frames_extractor_env
)
@@ -14,5 +11,6 @@
@pytest.fixture(scope="module")
def start_script_path():
base_path = Path(__file__).parent.parent.parent.parent
+ print(base_path)
start_script_path = base_path / "start.py"
return start_script_path
diff --git a/service_manager/tests/e2e/top_images_extractor_test.py b/tests/service_manager/e2e/top_images_extractor_test.py
similarity index 82%
rename from service_manager/tests/e2e/top_images_extractor_test.py
rename to tests/service_manager/e2e/top_images_extractor_test.py
index 79a10b9..af515ec 100644
--- a/service_manager/tests/e2e/top_images_extractor_test.py
+++ b/tests/service_manager/e2e/top_images_extractor_test.py
@@ -1,14 +1,18 @@
import subprocess
import sys
+import pytest
+import os
+@pytest.mark.skipif("CI" in os.environ, reason="Test skipped in GitHub Actions.")
def test_top_images_extractor(setup_top_images_extractor_env, start_script_path):
input_directory, output_directory = setup_top_images_extractor_env
command = [
sys.executable, str(start_script_path), "top_images_extractor",
"--input_dir", input_directory,
"--output_dir", output_directory,
- "--build"
+ "--build",
+ "--cpu"
]
subprocess.run(command)
diff --git a/service_manager/tests/unit/__init__.py b/tests/service_manager/integration/__init__.py
similarity index 100%
rename from service_manager/tests/unit/__init__.py
rename to tests/service_manager/integration/__init__.py
diff --git a/service_manager/tests/integration/conftest.py b/tests/service_manager/integration/conftest.py
similarity index 75%
rename from service_manager/tests/integration/conftest.py
rename to tests/service_manager/integration/conftest.py
index 83d8b6a..eab8e5a 100644
--- a/service_manager/tests/integration/conftest.py
+++ b/tests/service_manager/integration/conftest.py
@@ -2,7 +2,7 @@
import pytest
from config import Config
-from ...docker_manager import DockerManager
+from service_manager.docker_manager import DockerManager
@pytest.fixture(scope="package")
@@ -15,7 +15,8 @@ def config():
def manager(config):
manager = DockerManager(
config.service_name, config.input_directory,
- config.output_directory, config.port, False
+ config.output_directory, config.port,
+ False, True
)
return manager
diff --git a/service_manager/tests/integration/docker_container_test.py b/tests/service_manager/integration/docker_container_test.py
similarity index 86%
rename from service_manager/tests/integration/docker_container_test.py
rename to tests/service_manager/integration/docker_container_test.py
index f25e143..32102dc 100644
--- a/service_manager/tests/integration/docker_container_test.py
+++ b/tests/service_manager/integration/docker_container_test.py
@@ -1,11 +1,12 @@
import docker
import pytest
+COMMAND = "sleep 300"
+
@pytest.fixture
def image(client, manager, config):
image_name = "image_name"
- client = docker.from_env()
image = client.images.pull("busybox")
image.tag(image_name)
manager._image_name = image_name
@@ -43,7 +44,7 @@ def test_run_container(manager, config, client, cleanup_container, image):
def test_start_container(manager, cleanup_container, client, image):
- container = client.containers.create(image, command="sleep 300", detach=True, name=manager._container_name)
+ container = client.containers.create(image, command=COMMAND, detach=True, name=manager._container_name)
assert container.status == "created"
manager._start_container()
container.reload()
@@ -51,7 +52,7 @@ def test_start_container(manager, cleanup_container, client, image):
def test_stop_container(manager, cleanup_container, client, image):
- container = client.containers.create(image, command="sleep 300", detach=True, name=manager._container_name)
+ container = client.containers.create(image, command=COMMAND, detach=True, name=manager._container_name)
assert container.status == "created"
container.start()
container.reload()
@@ -62,7 +63,7 @@ def test_stop_container(manager, cleanup_container, client, image):
def test_delete_container(manager, cleanup_container, client, image):
- container = client.containers.create(image, command="sleep 300", detach=True, name=manager._container_name)
+ container = client.containers.create(image, command=COMMAND, detach=True, name=manager._container_name)
assert container.status == "created"
manager._delete_container()
with pytest.raises(docker.errors.NotFound):
@@ -70,7 +71,7 @@ def test_delete_container(manager, cleanup_container, client, image):
def test_container_status(manager, cleanup_container, client, image):
- container = client.containers.create(image, command="sleep 300", detach=True, name=manager._container_name)
+ container = client.containers.create(image, command=COMMAND, detach=True, name=manager._container_name)
assert container.status == "created"
assert manager.container_status == "created"
container.start()
@@ -80,7 +81,7 @@ def test_container_status(manager, cleanup_container, client, image):
def test_run_log_process(manager, cleanup_container, client, image):
- container = client.containers.run(
+ client.containers.run(
image,
command="sh -c 'while true; do date; done'",
detach=True,
diff --git a/service_manager/tests/integration/docker_image_test.py b/tests/service_manager/integration/docker_image_test.py
similarity index 100%
rename from service_manager/tests/integration/docker_image_test.py
rename to tests/service_manager/integration/docker_image_test.py
diff --git a/service_manager/tests/integration/service_initializer_test.py b/tests/service_manager/integration/service_initializer_test.py
similarity index 100%
rename from service_manager/tests/integration/service_initializer_test.py
rename to tests/service_manager/integration/service_initializer_test.py
diff --git a/tests/service_manager/unit/__init__.py b/tests/service_manager/unit/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/service_manager/tests/unit/conftest.py b/tests/service_manager/unit/conftest.py
similarity index 100%
rename from service_manager/tests/unit/conftest.py
rename to tests/service_manager/unit/conftest.py
diff --git a/service_manager/tests/unit/docker_manager_test.py b/tests/service_manager/unit/docker_manager_test.py
similarity index 86%
rename from service_manager/tests/unit/docker_manager_test.py
rename to tests/service_manager/unit/docker_manager_test.py
index 7bbf575..e41ed38 100644
--- a/service_manager/tests/unit/docker_manager_test.py
+++ b/tests/service_manager/unit/docker_manager_test.py
@@ -6,6 +6,9 @@
from service_manager.docker_manager import DockerManager
+LOG_LINE_1 = "log line 1\n"
+LOG_LINE_2 = "log line 2\n"
+
def test_docker_manager_init(caplog, config):
image_name = f"{config.service_name}_image"
@@ -15,13 +18,15 @@ def test_docker_manager_init(caplog, config):
f"Input directory from user: {config.input_directory}",
f"Output directory from user: {config.output_directory}",
f"Port from user: {config.port}",
- f"Force build: False"
+ "Force build: False",
+ "CPU only: False"
)
with caplog.at_level(logging.DEBUG):
docker = DockerManager(
config.service_name, config.input_directory,
- config.output_directory, config.port, False
+ config.output_directory, config.port,
+ False, False
)
assert docker._container_name == config.service_name
@@ -30,6 +35,7 @@ def test_docker_manager_init(caplog, config):
assert docker._output_directory == config.output_directory
assert docker._port == config.port
assert docker._force_build is False
+ assert docker._cpu_only is False
for message in expected_logs:
assert message in caplog.text, \
f"Expected phrase not found in logs: {message}"
@@ -39,7 +45,8 @@ def test_docker_manager_init(caplog, config):
def docker(config):
docker = DockerManager(
config.service_name, config.input_directory,
- config.output_directory, config.port, False
+ config.output_directory, config.port,
+ False, False
)
return docker
@@ -56,7 +63,7 @@ def test_check_image_exists(mock_image, is_exists, docker, mock_run):
mock_run.return_value = MagicMock(stdout=mock_image)
assert docker.docker_image_existence is is_exists
- mock_run.assert_called_with(expected_command, capture_output=True, text=True)
+ mock_run.assert_called_with(expected_command, capture_output=True, text=True, check=True)
@patch.object(DockerManager, "_check_image_exists")
@@ -66,7 +73,7 @@ def test_build_image(mock_check_image_exists, docker, mock_run, caplog, config):
docker.build_image(config.dockerfile)
- mock_run.assert_called_once_with(expected_command)
+ mock_run.assert_called_once_with(expected_command, check=True)
@patch.object(DockerManager, "_check_image_exists")
@@ -95,22 +102,22 @@ def test_build_image_when_image_exists_and_force_build(
assert "Building Docker image..." in caplog.text
-@pytest.mark.parametrize("code, output, status", ((1, "", None), (0, "'running'", "'running'")))
+@pytest.mark.parametrize("code, output, status", ((1, "", None), (0, "'running'", "running")))
def test_container_status(code, output, status, docker, mock_run):
command_output = MagicMock()
command_output.returncode = code
command_output.stdout = output
- mock_subprocess_run.return_value = command_output
+ mock_run.return_value = command_output
expected_command = ["docker", "inspect", "--format='{{.State.Status}}'", docker._container_name]
- status = docker.container_status
+ result_status = docker.container_status
- mock_run.assert_called_once_with(expected_command, capture_output=True, text=True)
- assert status == status
+ mock_run.assert_called_once_with(expected_command, capture_output=True, text=True, check=False)
+ assert status == result_status
@pytest.mark.parametrize("build", (True, False))
-@pytest.mark.parametrize("status", ("exited", None, "running", "dead"))
+@pytest.mark.parametrize("status", ("exited", None, "running", "dead", "created"))
@patch.object(DockerManager, "_stop_container")
@patch.object(DockerManager, "_delete_container")
@patch.object(DockerManager, "_run_container")
@@ -147,7 +154,7 @@ def test_deploy_container(
mock_stop.assert_not_called()
mock_delete.assert_called_once()
mock_run.assert_called_once_with(*deploy_container_args)
- elif status == "exited":
+ elif status in ["exited", "created"]:
mock_start.assert_called_once()
mock_run.assert_not_called()
elif status == "running":
@@ -170,17 +177,26 @@ def test_start_container_success(docker, mock_run, caplog):
assert "Starting the existing container..." in caplog.text
-def test_run_container(docker, mock_run, config, caplog):
+@pytest.mark.parametrize("cpu", (True, False))
+def test_run_container(docker, mock_run, config, caplog, cpu):
+
expected_command = [
- "docker", "run", "--name", docker._container_name, "--gpus", "all",
+ "docker", "run", "--name", docker._container_name,
"--restart", "unless-stopped", "-d",
"-p", f"{docker._port}:{config.port}",
"-v", f"{docker._input_directory}:{config.input_directory}",
- "-v", f"{docker._output_directory}:{config.input_directory}",
- docker._image_name
+ "-v", f"{docker._output_directory}:{config.input_directory}"
]
- with caplog.at_level(logging.INFO):
- docker._run_container(config.port, config.input_directory, config.input_directory)
+ if not cpu:
+ expected_command.extend(["--gpus", "all"])
+ expected_command.append(docker._image_name)
+ try:
+ if cpu:
+ docker._cpu_only = True
+ with caplog.at_level(logging.INFO):
+ docker._run_container(config.port, config.input_directory, config.input_directory)
+ finally:
+ docker._cpu_only = False
mock_run.assert_called_once_with(expected_command, check=True)
assert "Running a new container..." in caplog.text
@@ -228,13 +244,13 @@ def test_delete_container_success(docker, mock_run, caplog):
@patch.object(DockerManager, "_stop_container")
def test_follow_container_logs_stopped_by_user(mock_stop, mock_run_log, mock_stdout, docker, caplog):
mock_process = MagicMock()
- mock_process.stdout.readline.side_effect = ["log line 1\n", "log line 2\n", KeyboardInterrupt()]
+ mock_process.stdout.readline.side_effect = [LOG_LINE_1, LOG_LINE_2, KeyboardInterrupt()]
mock_run_log.return_value = mock_process
mock_process.terminate = MagicMock()
mock_process.wait = MagicMock()
with caplog.at_level(logging.INFO), \
- patch.object(subprocess, "Popen", autospec=True) as pop:
+ patch.object(subprocess, "Popen", autospec=True):
docker.follow_container_logs()
mock_run_log.assert_called_once()
@@ -242,7 +258,7 @@ def test_follow_container_logs_stopped_by_user(mock_stop, mock_run_log, mock_std
mock_process.wait.assert_called_once()
mock_stop.assert_called_once()
- calls = [call("log line 1\n"), call("log line 2\n")]
+ calls = [call(LOG_LINE_1), call(LOG_LINE_2)]
mock_stdout.assert_has_calls(calls, any_order=True)
assert "Process stopped by user." in caplog.text
assert "Following container logs stopped." in caplog.text
@@ -255,7 +271,7 @@ def test_follow_container_logs_stopped_automatically(mock_stop, mock_run_log,
mock_stdout, docker, caplog):
mock_process = MagicMock()
mock_process.stdout.readline.side_effect = [
- "log line 1\n", "log line 2\n", DockerManager.ServiceShutdownSignal()
+ LOG_LINE_1, LOG_LINE_2, DockerManager.ServiceShutdownSignal()
]
mock_run_log.return_value = mock_process
mock_process.terminate = MagicMock()
@@ -270,7 +286,7 @@ def test_follow_container_logs_stopped_automatically(mock_stop, mock_run_log,
mock_process.wait.assert_called_once()
mock_stop.assert_called_once()
- calls = [call("log line 1\n"), call("log line 2\n")]
+ calls = [call(LOG_LINE_1), call(LOG_LINE_2)]
mock_stdout.assert_has_calls(calls, any_order=True)
assert "Service has signaled readiness for shutdown." in caplog.text
assert "Following container logs stopped." in caplog.text
diff --git a/service_manager/tests/unit/service_initializer_test.py b/tests/service_manager/unit/service_initializer_test.py
similarity index 100%
rename from service_manager/tests/unit/service_initializer_test.py
rename to tests/service_manager/unit/service_initializer_test.py
diff --git a/tests/test_files/best_frames/.gitkeep b/tests/test_files/best_frames/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/common/test_files/frames_extracted_test_video.mp4 b/tests/test_files/frames_extracted_test_video.mp4
similarity index 100%
rename from common/test_files/frames_extracted_test_video.mp4
rename to tests/test_files/frames_extracted_test_video.mp4
diff --git a/common/test_files/image_3e4aa2ce-7f83-45fd-b56f-e3bed645224e.jpg b/tests/test_files/image_3e4aa2ce-7f83-45fd-b56f-e3bed645224e.jpg
similarity index 100%
rename from common/test_files/image_3e4aa2ce-7f83-45fd-b56f-e3bed645224e.jpg
rename to tests/test_files/image_3e4aa2ce-7f83-45fd-b56f-e3bed645224e.jpg
diff --git a/tests/test_files/top_images/.gitkeep b/tests/test_files/top_images/.gitkeep
new file mode 100644
index 0000000..e69de29