Skip to content

Commit

Permalink
Feature/sg 216 remove dataset interface (#356)
Browse files Browse the repository at this point in the history
* lint

* lint cont

* tests

* pretrained transfer learning tests fixed

* dataloaders concentrated to single file

* train from config and get added

* recipes updated and checked

* lint

* unit test fixes

* lr cooldown unit testfix

* pretrained models test fixes

* dataloader imports and minonr fixes
  • Loading branch information
shaydeci authored Sep 11, 2022
1 parent e7ade1c commit 769d996
Show file tree
Hide file tree
Showing 114 changed files with 827 additions and 3,578 deletions.
8 changes: 3 additions & 5 deletions src/super_gradients/__init__.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,13 @@
from super_gradients.training import ARCHITECTURES, losses, utils, datasets_utils, DataAugmentation, \
TestDatasetInterface, SegmentationTestDatasetInterface, DetectionTestDatasetInterface, ClassificationTestDatasetInterface, SgModel, KDModel, \
from super_gradients.training import ARCHITECTURES, losses, utils, datasets_utils, DataAugmentation, SgModel, KDModel, \
Trainer, KDTrainer
from super_gradients.common import init_trainer, is_distributed
from super_gradients.examples.train_from_recipe_example import train_from_recipe
from super_gradients.examples.train_from_kd_recipe_example import train_from_kd_recipe
from super_gradients.sanity_check import env_sanity_check

__all__ = ['ARCHITECTURES', 'losses', 'utils', 'datasets_utils', 'DataAugmentation',
'TestDatasetInterface', 'Trainer', 'KDTrainer', 'SegmentationTestDatasetInterface', 'DetectionTestDatasetInterface',
'ClassificationTestDatasetInterface', 'init_trainer', 'is_distributed', 'train_from_recipe', 'train_from_kd_recipe',
'Trainer', 'KDTrainer',
'init_trainer', 'is_distributed', 'train_from_recipe', 'train_from_kd_recipe',
'env_sanity_check', 'KDModel', 'SgModel']


env_sanity_check()
26 changes: 0 additions & 26 deletions src/super_gradients/common/factories/datasets_factory.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,7 @@
LRSchedulerCallback(scheduler=step_lr_scheduler, phase=Phase.TRAIN_EPOCH_END)]

# Bring everything together with Trainer and start training
trainer = Trainer("Cifar10_external_objects_example", multi_gpu=MultiGPUMode.OFF,
train_loader=train_loader, valid_loader=valid_loader, classes=train_dataset.classes)
trainer.build_model(net)
trainer = Trainer("Cifar10_external_objects_example", multi_gpu=MultiGPUMode.OFF)

train_params = {"max_epochs": 300,
"phase_callbacks": phase_callbacks,
Expand All @@ -65,4 +63,4 @@
"greater_metric_to_watch_is_better": True,
"lr_scheduler_step_type": "epoch"}

trainer.train(training_params=train_params)
trainer.train(model=net, training_params=train_params, train_loader=train_loader, valid_loader=valid_loader)
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,14 @@

import torch

from super_gradients.training.datasets.dataset_interfaces.dataset_interface import ImageNetDatasetInterface

from super_gradients.common import MultiGPUMode
from super_gradients.training.datasets.datasets_utils import RandomResizedCropAndInterpolation
from torchvision.transforms import RandomHorizontalFlip, ColorJitter, ToTensor, Normalize
import super_gradients
from super_gradients.training import Trainer, MultiGPUMode
from super_gradients.training.models import HpmStruct
from super_gradients.training import Trainer, models, dataloaders
import argparse

from super_gradients.training.metrics import Accuracy, Top5

from super_gradients.training.datasets.data_augmentation import RandomErase
parser = argparse.ArgumentParser()
super_gradients.init_trainer()

Expand Down Expand Up @@ -56,19 +55,27 @@
"random_erase_prob": 0.2,
"random_erase_value": 'random',
"train_interpolation": 'random',
"auto_augment_config_string": 'rand-m9-mstd0.5'
}


train_transforms = [RandomResizedCropAndInterpolation(size=224, interpolation="random"),
RandomHorizontalFlip(),
ColorJitter(0.4, 0.4, 0.4),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
RandomErase(0.2, "random")
]

trainer = Trainer(experiment_name=args.experiment_name,
multi_gpu=MultiGPUMode.DISTRIBUTED_DATA_PARALLEL if distributed else MultiGPUMode.DATA_PARALLEL,
device='cuda')

dataset = ImageNetDatasetInterface(dataset_params=dataset_params)

trainer.connect_dataset_interface(dataset, data_loader_num_workers=8 * devices)
train_loader = dataloaders.imagenet_train(dataset_params={"transforms": train_transforms},
dataloader_params={"batch_size": args.batch})
valid_loader = dataloaders.imagenet_val()

arch_params = HpmStruct(**{"num_classes": 1000, "aux_head": False, "classification_mode": True, 'dropout_prob': 0.3})
model = models.get("ddrnet_23_slim" if args.slim else "ddrnet_23",
arch_params={"aux_head": False, "classification_mode": True, 'dropout_prob': 0.3},
num_classes=1000)

trainer.build_model(architecture="ddrnet_23_slim" if args.slim else "ddrnet_23",
arch_params=arch_params)
trainer.train(training_params=train_params_ddr)
trainer.train(model=model, training_params=train_params_ddr, train_loader=train_loader, valid_loader=valid_loader)
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
The main purpose of this code is to demonstrate how to upload the model to the platform, optimize and download it
after training is complete, using DeciPlatformCallback.
"""
from super_gradients import Trainer, ClassificationTestDatasetInterface
from super_gradients import Trainer
from super_gradients.training.dataloaders.dataloaders import classification_test_dataloader
from super_gradients.training.metrics import Accuracy, Top5
from super_gradients.training.utils.callbacks import DeciLabUploadCallback, ModelConversionCheckCallback
from deci_lab_client.models import (
Expand All @@ -28,8 +29,6 @@ def main(architecture_name: str):
model_checkpoints_location="local",
ckpt_root_dir=checkpoint_dir,
)
dataset = ClassificationTestDatasetInterface(dataset_params={"batch_size": 10})
trainer.connect_dataset_interface(dataset, data_loader_num_workers=0)

trainer.build_model(architecture=architecture_name, arch_params={"use_aux_heads": True, "aux_head": True})

Expand Down Expand Up @@ -91,7 +90,8 @@ def main(architecture_name: str):

# RUN TRAINING. ONCE ALL EPOCHS ARE DONE THE OPTIMIZED MODEL FILE WILL BE LOCATED IN THE EXPERIMENT'S
# CHECKPOINT DIRECTORY
trainer.train(train_params)
trainer.train(train_params, train_loader=classification_test_dataloader(),
valid_loader=classification_test_dataloader())


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
import os
from super_gradients.training import Trainer
from super_gradients.training.datasets.dataset_interfaces import Cifar10DatasetInterface
from super_gradients.training import Trainer, models
from super_gradients.training.metrics.classification_metrics import Accuracy, Top5

from super_gradients.training.dataloaders.dataloaders import cifar10_train, cifar10_val
os.environ["DECI_PLATFORM_TOKEN"] = "XXX" # Replace XXX with your token


trainer = Trainer(experiment_name='demo-deci-platform-logger')
dataset = Cifar10DatasetInterface(dataset_params={"batch_size": 256, "val_batch_size": 512})
trainer.connect_dataset_interface(dataset, data_loader_num_workers=8)
trainer.build_model("resnet18")
model = models.get("resnet18", num_classes=10)

trainer.train(training_params={"max_epochs": 20,
"lr_updates": [5, 10, 15],
Expand All @@ -23,4 +20,6 @@
"valid_metrics_list": [Accuracy(), Top5()],
"metric_to_watch": "Accuracy",
"greater_metric_to_watch_is_better": True,
"sg_logger": "deci_platform_sg_logger"})
"sg_logger": "deci_platform_sg_logger"},
train_loader=cifar10_train(),
valid_loader=cifar10_val())
13 changes: 6 additions & 7 deletions src/super_gradients/examples/early_stop/early_stop_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
# Reaches ~94.9 Accuracy after 250 Epochs
import super_gradients
from super_gradients import Trainer
from super_gradients.training.datasets.dataset_interfaces.dataset_interface import Cifar10DatasetInterface
from super_gradients.training import models, dataloaders
from super_gradients.training.metrics.classification_metrics import Accuracy, Top5
from super_gradients.training.utils.early_stopping import EarlyStop
from super_gradients.training.utils.callbacks import Phase

# Define Parameters
super_gradients.init_trainer()

Expand All @@ -22,10 +23,8 @@
# Define Model
trainer = Trainer("Callback_Example")

# Connect Dataset
dataset = Cifar10DatasetInterface()
trainer.connect_dataset_interface(dataset, data_loader_num_workers=8)

# Build Model
trainer.build_model("resnet18_cifar")
trainer.train(training_params=train_params)
model = models.get("resnet18_cifar", num_classes=10)

trainer.train(model=model, training_params=train_params,
train_loader=dataloaders.cifar10_train(), valid_loader=dataloaders.cifar10_val())
Empty file.
27 changes: 0 additions & 27 deletions src/super_gradients/examples/legacy/cifar_resnet/cifar_example.py

This file was deleted.

31 changes: 0 additions & 31 deletions src/super_gradients/examples/legacy/darknet53_example.py

This file was deleted.

Empty file.

This file was deleted.

Empty file.

This file was deleted.

Empty file.

This file was deleted.

Empty file.

This file was deleted.

Empty file.
Loading

0 comments on commit 769d996

Please sign in to comment.