Skip to content

Commit

Permalink
remove distributed calls
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Sep 23, 2024
1 parent 2e9eb50 commit 0501317
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 20 deletions.
7 changes: 1 addition & 6 deletions optimum_benchmark/trackers/energy.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,11 @@
from logging import getLogger
from typing import List, Literal, Optional, Union

from ..import_utils import is_codecarbon_available, is_torch_available, is_torch_distributed_available
from ..import_utils import is_codecarbon_available, is_torch_available

if is_torch_available():
import torch

if is_torch_distributed_available():
import torch.distributed

if is_codecarbon_available():
from codecarbon import EmissionsTracker, OfflineEmissionsTracker
from codecarbon.output import EmissionsData
Expand Down Expand Up @@ -115,9 +112,7 @@ def __init__(self, backend: str, device: str, device_ids: Optional[Union[str, in
self.device_ids = device_ids

self.is_gpu = self.device == "cuda"
self.is_engine = self.backend in ["vllm", "tensorrt-llm"]
self.is_pytorch_cuda = (self.backend, self.device) == ("pytorch", "cuda")
self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized()

LOGGER.info("\t+ Tracking CPU and RAM energy")

Expand Down
9 changes: 0 additions & 9 deletions optimum_benchmark/trackers/latency.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,6 @@
from logging import getLogger
from typing import List, Literal, Optional, Union

from ..import_utils import is_torch_distributed_available

if is_torch_distributed_available():
import torch.distributed

import numpy as np
import torch
from transformers import LogitsProcessor, TrainerCallback
Expand Down Expand Up @@ -123,9 +118,7 @@ def __init__(self, device: str, backend: str):
self.device = device
self.backend = backend

self.is_engine = self.backend in ["vllm", "tensorrt-llm"]
self.is_pytorch_cuda = (self.backend, self.device) == ("pytorch", "cuda")
self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized()

if self.is_pytorch_cuda:
LOGGER.info("\t+ Tracking latency using Pytorch CUDA events")
Expand Down Expand Up @@ -253,9 +246,7 @@ def __init__(self, device: str, backend: str):
self.device = device
self.backend = backend

self.is_engine = self.backend in ["vllm", "tensorrt-llm"]
self.is_pytorch_cuda = (self.backend, self.device) == ("pytorch", "cuda")
self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized()

if self.is_pytorch_cuda:
LOGGER.info("\t+ Tracking latency using Pytorch CUDA events")
Expand Down
5 changes: 0 additions & 5 deletions optimum_benchmark/trackers/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,12 @@
is_pynvml_available,
is_pyrsmi_available,
is_torch_available,
is_torch_distributed_available,
)
from ..system_utils import is_nvidia_system, is_rocm_system

if is_rocm_system() and is_pyrsmi_available():
from pyrsmi import rocml

if is_torch_distributed_available():
import torch.distributed

if is_nvidia_system() and is_pynvml_available():
import pynvml
Expand Down Expand Up @@ -102,9 +99,7 @@ def __init__(self, device: str, backend: str, device_ids: Optional[Union[str, in
self.monitored_pid = os.getpid()

self.is_gpu = device == "cuda"
self.is_engine = backend in ["vllm", "tensorrt-llm"]
self.is_pytorch_cuda = (self.backend, self.device) == ("pytorch", "cuda")
self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized()

LOGGER.info(f"\t+ Tracking RAM memory of process [{self.monitored_pid}]")

Expand Down

0 comments on commit 0501317

Please sign in to comment.