diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 789b2e3..f39aa53 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,16 +2,17 @@ name: Memoraith CI on: push: - branches: [ main ] + branches: [ main, develop ] pull_request: - branches: [ main ] + branches: [ main, develop ] jobs: - build: + test: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9, '3.10'] + python-version: [3.8, 3.9, '3.10'] + fail-fast: false steps: - uses: actions/checkout@v2 @@ -22,15 +23,66 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -r requirements.txt - - name: Run tests + pip install tox tox-gh-actions + - name: Test with tox + run: tox + env: + PYTHON_VERSION: ${{ matrix.python-version }} + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + fail_ci_if_error: true + + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies run: | - pytest tests/ - - name: Lint with flake8 + python -m pip install --upgrade pip + pip install flake8 mypy + - name: Run linters run: | - pip install flake8 flake8 . - - name: Check type hints with mypy + mypy memoraith + + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies run: | - pip install mypy - mypy memoraith/ \ No newline at end of file + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install sphinx sphinx-rtd-theme + - name: Build documentation + run: | + cd docs + make html + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./docs/build/html + + build: + needs: [test, lint, docs] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install pypa/build + run: python -m pip install build + - name: Build a binary wheel and a source tarball + run: python -m build --sdist --wheel --outdir dist/ . \ No newline at end of file diff --git a/README.md b/README.md index bc6cf50..531a76e 100644 --- a/README.md +++ b/README.md @@ -1,34 +1,34 @@ # Memoraith -Memoraith is a lightweight model profiler for deep learning frameworks, designed to help you optimize your neural network models by providing detailed insights into their performance characteristics. +Memoraith is a cutting-edge, lightweight model profiler for deep learning frameworks, developed by Mehdi El Jouhfi. It's designed to revolutionize the optimization of neural network models by providing unparalleled insights into their performance characteristics. ## Features -- Supports PyTorch and TensorFlow models -- Profiles memory usage (CPU and GPU) -- Measures computation time for each layer -- Detects bottlenecks and anomalies -- Generates comprehensive reports with visualizations -- Provides real-time visualization capabilities -- Offers both programmatic and command-line interfaces +- Advanced support for PyTorch and TensorFlow models +- High-precision profiling of memory usage (CPU and GPU) +- Microsecond-accurate computation time measurement for each layer +- Sophisticated bottleneck and anomaly detection algorithms +- Generation of comprehensive, interactive reports with advanced visualizations +- Real-time visualization capabilities with minimal overhead +- Flexible programmatic and command-line interfaces ## Installation -You can install Memoraith using pip: +Install Memoraith using pip: ```bash pip install memoraith ``` -For GPU support, install with: +For GPU support and additional features: ```bash -pip install memoraith[gpu] +pip install memoraith[full] ``` ## Quick Start -Here's a simple example of how to use Memoraith with a PyTorch model: +Here's an example of Memoraith in action with a PyTorch model: ```python from memoraith import profile_model, set_output_path @@ -37,38 +37,43 @@ import torch.nn as nn set_output_path('profiling_results/') -class SimpleNet(nn.Module): +class AdvancedNet(nn.Module): def __init__(self): - super(SimpleNet, self).__init__() - self.fc = nn.Linear(10, 5) + super(AdvancedNet, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) + self.conv2 = nn.Conv2d(64, 128, kernel_size=3, padding=1) + self.fc = nn.Linear(128 * 8 * 8, 10) def forward(self, x): + x = torch.relu(self.conv1(x)) + x = torch.relu(self.conv2(x)) + x = x.view(x.size(0), -1) return self.fc(x) -@profile_model(memory=True, computation=True, gpu=True) +@profile_model(memory=True, computation=True, gpu=True, network=True) def train_model(model): optimizer = torch.optim.Adam(model.parameters()) for _ in range(100): - input_data = torch.randn(32, 10) + input_data = torch.randn(32, 3, 32, 32) output = model(input_data) loss = output.sum() loss.backward() optimizer.step() if __name__ == "__main__": - model = SimpleNet() + model = AdvancedNet() train_model(model) ``` -This will generate a profiling report in the 'profiling_results/' directory. +This will generate a comprehensive profiling report in the 'profiling_results/' directory. ## Documentation -For more detailed information on how to use Memoraith, please refer to our [documentation](https://memoraith.readthedocs.io). +For detailed information on Memoraith's advanced features, please refer to our [comprehensive documentation](https://memoraith.readthedocs.io). ## Contributing -We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for more details. +Contributions are welcome! Please read our [Contributing Guide](CONTRIBUTING.md) for details on our code of conduct and the process for submitting pull requests. ## License @@ -76,17 +81,21 @@ Memoraith is released under the MIT License. See the [LICENSE](LICENSE) file for ## Support -If you encounter any issues or have questions, please file an issue on the [GitHub issue tracker](https://github.com/yourusername/memoraith/issues). +If you encounter any issues or have questions, please file an issue on the [GitHub issue tracker](https://github.com/mehdi342/Memoraith/issues). ## Citing Memoraith If you use Memoraith in your research, please cite it as follows: -``` +```bibtex @software{memoraith, - author = {Your Name}, - title = {Memoraith: A Lightweight Model Profiler for Deep Learning}, - year = {2023}, - url = {https://github.com/yourusername/memoraith} + author = {El Jouhfi, Mehdi}, + title = {Memoraith: Advanced Lightweight Model Profiler for Deep Learning}, + year = {2024}, + url = {https://github.com/mehdi342/Memoraith} } -``` \ No newline at end of file +``` + +## Contact + +For inquiries, please contact Mehdi El Jouhfi at midojouhfi@gmail.com. \ No newline at end of file diff --git a/memoraith/__init__.py b/memoraith/__init__.py index a0823ed..78bc964 100644 --- a/memoraith/__init__.py +++ b/memoraith/__init__.py @@ -7,4 +7,5 @@ from .profiler import profile_model, set_output_path from .config import Config from .exceptions import MemoraithError -from .visualization.real_time_visualizer import RealTimeVisualizer \ No newline at end of file +from .visualization.real_time_visualizer import RealTimeVisualizer +from .network_profiler import NetworkProfiler \ No newline at end of file diff --git a/memoraith/data_collection/__init__.py b/memoraith/data_collection/__init__.py index 8ea0527..04f0b8d 100644 --- a/memoraith/data_collection/__init__.py +++ b/memoraith/data_collection/__init__.py @@ -2,3 +2,4 @@ from .gpu_memory import GPUMemoryTracker from .time_tracking import TimeTracker from .resource_lock import ResourceLock +from memoraith.data_collection.network_profiler import NetworkProfiler \ No newline at end of file diff --git a/memoraith/profiler.py b/memoraith/profiler.py index 4a6d549..7cf8d35 100644 --- a/memoraith/profiler.py +++ b/memoraith/profiler.py @@ -1,81 +1,80 @@ -import functools +from typing import Dict, Any import logging import asyncio -from typing import Callable, Any, Optional -from .config import config -from .logging_config import setup_logging -from .integration import get_framework_adapter -from .analysis import Analyzer -from .reporting import ReportGenerator -from .exceptions import MemoraithError -from .visualization.real_time_visualizer import RealTimeVisualizer +from memoraith.data_collection.cpu_memory import CPUMemoryTracker +from memoraith.data_collection.gpu_memory import GPUMemoryTracker +from memoraith.data_collection.time_tracking import TimeTracker +from memoraith.data_collection.network_profiler import NetworkProfiler -def profile_model( - memory: bool = True, - computation: bool = True, - gpu: bool = False, - save_report: bool = True, - report_format: str = 'html', - real_time_viz: bool = False -) -> Callable: - """ - Decorator to profile a model's training or inference function. +class ModelProfiler: + def __init__(self): + self.cpu_tracker = CPUMemoryTracker() + self.gpu_tracker = GPUMemoryTracker() + self.time_tracker = TimeTracker() + self.network_profiler = NetworkProfiler() + self.logger = logging.getLogger(__name__) - Args: - memory (bool): Enable memory profiling - computation (bool): Enable computation time profiling - gpu (bool): Enable GPU profiling - save_report (bool): Save the profiling report - report_format (str): Format of the saved report ('html' or 'pdf') - real_time_viz (bool): Enable real-time visualization + async def start_profiling(self): + self.logger.info("Starting model profiling") + await self.cpu_tracker.start() + if self.gpu_tracker: + await self.gpu_tracker.start() + self.time_tracker.start('training') + self.network_profiler.start() - Returns: - Callable: Decorated function - """ - def decorator(func: Callable) -> Callable: - @functools.wraps(func) - async def wrapper(*args: Any, **kwargs: Any) -> Any: - setup_logging(config.log_level) - logger = logging.getLogger('memoraith') - logger.info("Starting Memoraith Profiler...") + async def stop_profiling(self): + self.logger.info("Stopping model profiling") + cpu_memory = await self.cpu_tracker.get_peak_memory() + gpu_memory = await self.gpu_tracker.get_peak_memory() if self.gpu_tracker else None + duration = self.time_tracker.get_duration('training') + network_usage = self.network_profiler.stop() - config.enable_memory = memory - config.enable_time = computation - config.enable_gpu = gpu + profiling_results = { + 'cpu_memory': cpu_memory, + 'gpu_memory': gpu_memory, + 'training_time': duration, + 'network_usage': network_usage + } + self.logger.info(f"Profiling results: {profiling_results}") + return profiling_results - try: - model = kwargs.get('model') or args[0] - adapter = get_framework_adapter(model) + async def profile_step(self, step_name: str): + self.time_tracker.start(step_name) + cpu_memory_before = await self.cpu_tracker.get_current_memory() + gpu_memory_before = await self.gpu_tracker.get_current_memory() if self.gpu_tracker else None + network_usage_before = self.network_profiler.get_current_usage() - visualizer = RealTimeVisualizer() if real_time_viz else None + yield # Yield control to allow the step to execute - async with adapter: - if asyncio.iscoroutinefunction(func): - result = await func(*args, **kwargs) - else: - result = await asyncio.to_thread(func, *args, **kwargs) + cpu_memory_after = await self.cpu_tracker.get_current_memory() + gpu_memory_after = await self.gpu_tracker.get_current_memory() if self.gpu_tracker else None + network_usage_after = self.network_profiler.get_current_usage() + duration = self.time_tracker.stop(step_name) - if visualizer: - await visualizer.update(adapter.data) + step_profile = { + 'name': step_name, + 'duration': duration, + 'cpu_memory_used': cpu_memory_after - cpu_memory_before, + 'gpu_memory_used': gpu_memory_after - gpu_memory_before if gpu_memory_after and gpu_memory_before else None, + 'network_sent': network_usage_after['bytes_sent'] - network_usage_before['bytes_sent'], + 'network_recv': network_usage_after['bytes_recv'] - network_usage_before['bytes_recv'], + } - analysis_results = await Analyzer(adapter.data).run_analysis() + self.logger.info(f"Step profile for {step_name}: {step_profile}") + yield step_profile - if save_report: - await ReportGenerator(analysis_results).generate(format=report_format) + def get_summary(self) -> Dict[str, Any]: + return { + 'total_time': self.time_tracker.get_total_duration(), + 'peak_cpu_memory': self.cpu_tracker.get_peak_memory(), + 'peak_gpu_memory': self.gpu_tracker.get_peak_memory() if self.gpu_tracker else None, + 'average_network_usage': self.network_profiler.get_average_usage(), + } - logger.info("Memoraith Profiling Completed.") - return result - - except MemoraithError as e: - logger.error(f"MemoraithError: {e}") - raise - except Exception as e: - logger.exception("An unexpected error occurred during profiling.") - raise - - return wrapper - return decorator - -def set_output_path(path: str) -> None: - """Set the output path for profiling reports.""" - config.set_output_path(path) \ No newline at end of file + def reset(self): + self.cpu_tracker.reset() + if self.gpu_tracker: + self.gpu_tracker.reset() + self.time_tracker.reset() + self.network_profiler.reset() + self.logger.info("All profilers reset") \ No newline at end of file diff --git a/network_profiler.py b/network_profiler.py new file mode 100644 index 0000000..909b7fa --- /dev/null +++ b/network_profiler.py @@ -0,0 +1,182 @@ +import psutil +import time +from typing import Dict, Any, Optional, List +import logging +import threading +import asyncio + +class NetworkProfiler: + def __init__(self, interval: float = 0.1, detailed: bool = True): + self.interval = interval + self.detailed = detailed + self.start_data: Optional[psutil._common.snetio] = None + self.end_data: Optional[psutil._common.snetio] = None + self.is_profiling = False + self.thread: Optional[threading.Thread] = None + self.network_usage: List[Dict[str, int]] = [] + self.lock = threading.Lock() + self.logger = logging.getLogger(__name__) + self.async_mode = False + self.task: Optional[asyncio.Task] = None + + def start(self): + self.start_data = psutil.net_io_counters() + self.is_profiling = True + self.thread = threading.Thread(target=self._profile_network, daemon=True) + self.thread.start() + self.logger.info("Network profiling started") + + async def start_async(self): + self.start_data = psutil.net_io_counters() + self.is_profiling = True + self.async_mode = True + self.task = asyncio.create_task(self._profile_network_async()) + self.logger.info("Async network profiling started") + + def stop(self) -> Dict[str, int]: + self.is_profiling = False + if self.thread: + self.thread.join() + self.end_data = psutil.net_io_counters() + total_usage = self._calculate_usage(self.start_data, self.end_data) + self.logger.info(f"Network profiling stopped. Total usage: {total_usage}") + return total_usage + + async def stop_async(self) -> Dict[str, int]: + self.is_profiling = False + if self.task: + await self.task + self.end_data = psutil.net_io_counters() + total_usage = self._calculate_usage(self.start_data, self.end_data) + self.logger.info(f"Async network profiling stopped. Total usage: {total_usage}") + return total_usage + + def _profile_network(self): + while self.is_profiling: + current_data = psutil.net_io_counters() + with self.lock: + self.network_usage.append(self._calculate_usage(self.start_data, current_data)) + time.sleep(self.interval) + + async def _profile_network_async(self): + while self.is_profiling: + current_data = psutil.net_io_counters() + with self.lock: + self.network_usage.append(self._calculate_usage(self.start_data, current_data)) + await asyncio.sleep(self.interval) + + def _calculate_usage(self, start: psutil._common.snetio, end: psutil._common.snetio) -> Dict[str, int]: + basic_usage = { + 'bytes_sent': end.bytes_sent - start.bytes_sent, + 'bytes_recv': end.bytes_recv - start.bytes_recv, + 'packets_sent': end.packets_sent - start.packets_sent, + 'packets_recv': end.packets_recv - start.packets_recv, + } + if self.detailed: + basic_usage.update({ + 'errin': end.errin - start.errin, + 'errout': end.errout - start.errout, + 'dropin': end.dropin - start.dropin, + 'dropout': end.dropout - start.dropout + }) + return basic_usage + + def get_current_usage(self) -> Dict[str, int]: + with self.lock: + if self.network_usage: + return self.network_usage[-1] + return {'bytes_sent': 0, 'bytes_recv': 0, 'packets_sent': 0, 'packets_recv': 0} + + def get_average_usage(self) -> Dict[str, float]: + with self.lock: + if not self.network_usage: + return {'bytes_sent': 0.0, 'bytes_recv': 0.0, 'packets_sent': 0.0, 'packets_recv': 0.0} + avg_usage = {k: sum(usage[k] for usage in self.network_usage) / len(self.network_usage) + for k in self.network_usage[0].keys()} + return avg_usage + + def get_total_usage(self) -> Dict[str, int]: + with self.lock: + if not self.network_usage: + return {'bytes_sent': 0, 'bytes_recv': 0, 'packets_sent': 0, 'packets_recv': 0} + total_usage = {k: sum(usage[k] for usage in self.network_usage) + for k in self.network_usage[0].keys()} + return total_usage + + def get_usage_over_time(self) -> List[Dict[str, int]]: + with self.lock: + return self.network_usage.copy() + + def reset(self): + with self.lock: + self.network_usage.clear() + self.start_data = None + self.end_data = None + self.logger.info("Network profiler reset") + + def get_network_stats(self) -> Dict[str, Any]: + stats = psutil.net_io_counters() + return { + 'bytes_sent': stats.bytes_sent, + 'bytes_recv': stats.bytes_recv, + 'packets_sent': stats.packets_sent, + 'packets_recv': stats.packets_recv, + 'errin': stats.errin, + 'errout': stats.errout, + 'dropin': stats.dropin, + 'dropout': stats.dropout + } + + def get_network_connections(self) -> List[Dict[str, Any]]: + connections = psutil.net_connections() + return [ + { + 'fd': conn.fd, + 'family': conn.family, + 'type': conn.type, + 'laddr': conn.laddr, + 'raddr': conn.raddr, + 'status': conn.status, + 'pid': conn.pid + } + for conn in connections + ] + + def get_network_interfaces(self) -> Dict[str, Any]: + interfaces = psutil.net_if_addrs() + stats = psutil.net_if_stats() + result = {} + for interface, addrs in interfaces.items(): + result[interface] = { + 'addresses': [ + { + 'family': addr.family, + 'address': addr.address, + 'netmask': addr.netmask, + 'broadcast': addr.broadcast, + 'ptp': addr.ptp + } + for addr in addrs + ], + 'stats': { + 'isup': stats[interface].isup, + 'duplex': stats[interface].duplex, + 'speed': stats[interface].speed, + 'mtu': stats[interface].mtu + } + } + return result + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.stop() + + async def __aenter__(self): + await self.start_async() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.stop_async() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 627d528..73aa91b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,8 +11,15 @@ pynvml>=8.0.0 colorama>=0.4.4 tqdm>=4.60.0 pytest>=6.2.0 +pytest-asyncio>=0.14.0 black>=20.8b1 isort>=5.7.0 flake8>=3.8.0 mypy>=0.800 -pdfkit==1.0.0 \ No newline at end of file +aiofiles>=0.6.0 +asyncio>=3.4.3 +networkx>=2.5 +optuna>=2.3.0 +ray>=1.2.0 +sphinx>=3.4.3 +sphinx-rtd-theme>=0.5.1 \ No newline at end of file diff --git a/setup.py b/setup.py index a0e29ed..0108feb 100644 --- a/setup.py +++ b/setup.py @@ -1,26 +1,25 @@ from setuptools import setup, find_packages -with open('README.md', encoding='utf-8') as f: - long_description = f.read() - setup( name="memoraith", - version="0.1.0", - author="Your Name", - author_email="your.email@example.com", - description="A lightweight model profiler for deep learning frameworks", - long_description=long_description, + version="0.2.0", + author="Mehdi El Jouhfi", + author_email="midojouhfi@gmail.com", + description="Advanced lightweight model profiler for deep learning frameworks", + long_description=open("README.md").read(), long_description_content_type="text/markdown", - url="https://github.com/yourusername/memoraith", - packages=find_packages(), + url="https://github.com/mehdi342/Memoraith", + packages=find_packages(exclude=['tests', 'tests.*']), classifiers=[ - "Development Status :: 3 - Alpha", + "Development Status :: 4 - Beta", "Intended Audience :: Developers", + "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Topic :: Scientific/Engineering :: Artificial Intelligence", ], python_requires='>=3.7', install_requires=[ @@ -28,18 +27,47 @@ 'tensorflow>=2.4.0', 'matplotlib>=3.3.0', 'seaborn>=0.11.0', - 'pdfkit>=0.6.0', - 'aiofiles>=0.6.0', + 'plotly>=4.14.0', + 'pandas>=1.2.0', 'jinja2>=2.11.0', + 'pdfkit>=0.6.0', + 'psutil>=5.8.0', 'pynvml>=8.0.0', + 'colorama>=0.4.4', + 'tqdm>=4.60.0', + 'aiofiles>=0.6.0', + 'asyncio>=3.4.3', + 'networkx>=2.5', ], extras_require={ + 'full': [ + 'torch>=1.7.0', + 'tensorflow>=2.4.0', + 'tensorboard>=2.4.0', + 'optuna>=2.3.0', + 'ray>=1.2.0', + ], 'dev': [ 'pytest>=6.2.0', 'pytest-asyncio>=0.14.0', 'black>=20.8b1', 'isort>=5.7.0', 'flake8>=3.8.0', + 'mypy>=0.800', + 'tox>=3.20.0', + 'sphinx>=3.4.3', + 'sphinx-rtd-theme>=0.5.1', + ], + }, + entry_points={ + 'console_scripts': [ + 'memoraith=memoraith.cli:main', ], }, + include_package_data=True, + zip_safe=False, + project_urls={ + 'Bug Reports': 'https://github.com/mehdi342/Memoraith/issues', + 'Source': 'https://github.com/mehdi342/Memoraith/', + }, ) \ No newline at end of file diff --git a/testdata.py b/testdata.py index f8e0466..9838ce2 100644 --- a/testdata.py +++ b/testdata.py @@ -12,13 +12,13 @@ def generate_project_structure(directory, indent_level=0): structure += f"{indent}├── {os.path.basename(root)}/\n" sub_indent = '│ ' * (level + 1 - indent_level) for file in files: - structure += f"{sub_indent}├── {file}\n" + if file.endswith('.py'): + structure += f"{sub_indent}├── {file}\n" dirs[:] = [d for d in dirs if d != 'venv'] # Skip venv directory return structure def extract_classes_and_methods(content): - # Regular expressions to capture class and method definitions in Python files class_regex = r'class\s+(\w+)\s*(\(.*?\))?:' method_regex = r'def\s+(\w+)\s*\(.*?\):' @@ -30,7 +30,6 @@ def extract_classes_and_methods(content): extracted_content += f"\nClass: {class_name}\n" extracted_content += "-" * 80 + "\n" - # Now, extract methods inside the class method_matches = re.findall(method_regex, content) for method_match in method_matches: extracted_content += f" Method: {method_match}\n" @@ -44,28 +43,26 @@ def read_files_recursively(directory): continue for file in files: - file_path = os.path.join(root, file) - print(f"Processing file: {file_path}") - content += f"File: {file_path}\n\n" - try: - # Attempt to read every file as a text file - with open(file_path, 'r', encoding='utf-8') as f: - file_content = f.read() - content += file_content + if file.endswith('.py'): + file_path = os.path.join(root, file) + print(f"Processing file: {file_path}") + content += f"File: {file_path}\n\n" + try: + with open(file_path, 'r', encoding='utf-8') as f: + file_content = f.read() + content += file_content - # If it's a Python file, extract class and method definitions - if file.endswith('.py'): extracted_classes_methods = extract_classes_and_methods(file_content) content += extracted_classes_methods - except UnicodeDecodeError: - try: - with open(file_path, 'r', encoding='ISO-8859-1') as f: - file_content = f.read() - content += file_content - except Exception as e: - content += f"Error reading file: {e}" - content += "\n\n" + "-"*80 + "\n\n" + except UnicodeDecodeError: + try: + with open(file_path, 'r', encoding='ISO-8859-1') as f: + file_content = f.read() + content += file_content + except Exception as e: + content += f"Error reading file: {e}" + content += "\n\n" + "-"*80 + "\n\n" return content def save_content_to_txt(directory, output_file): diff --git a/tox.ini b/tox.ini index 1597174..4e630b2 100644 --- a/tox.ini +++ b/tox.ini @@ -1,25 +1,56 @@ [tox] -envlist = py37, py38, py39, flake8, mypy +envlist = py38, py39, py310, flake8, mypy, docs isolated_build = true +[gh-actions] +python = + 3.8: py38 + 3.9: py39 + 3.10: py310, flake8, mypy, docs + [testenv] deps = pytest>=6.2.0 - -r requirements.txt + pytest-asyncio>=0.14.0 + -r{toxinidir}/requirements.txt commands = - pytest tests + pytest tests {posargs} +setenv = + PYTHONPATH = {toxinidir} [testenv:flake8] deps = flake8 commands = flake8 memoraith tests [testenv:mypy] -deps = mypy +deps = + mypy + types-psutil + types-setuptools commands = mypy memoraith +[testenv:docs] +description = Build the documentation +deps = + sphinx + sphinx-rtd-theme +commands = + sphinx-build -W -b html docs/source docs/build/html + [flake8] max-line-length = 120 -exclude = .tox,*.egg,build,data +exclude = .tox,*.egg,build,data,.git,__pycache__,docs +select = E,W,F [mypy] ignore_missing_imports = True +strict_optional = True +warn_redundant_casts = True +warn_unused_ignores = True +disallow_any_generics = True +check_untyped_defs = True +no_implicit_reexport = True + +[pytest] +asyncio_mode = auto +testpaths = tests \ No newline at end of file