-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathmain.py
105 lines (91 loc) · 3.78 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import logging
import os
import torch
from torch.utils.tensorboard import SummaryWriter
from utils import set_model_config, \
set_dataset, set_models, set_parser, \
set_seed
from eval import eval_model
from trainer import train
logger = logging.getLogger(__name__)
def main():
args = set_parser()
global best_acc
global best_acc_val
if args.local_rank == -1:
device = torch.device('cuda', args.gpu_id)
args.world_size = 1
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.world_size = torch.distributed.get_world_size()
args.n_gpu = 1
args.device = device
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning(
f"Process rank: {args.local_rank}, "
f"device: {args.device}, "
f"n_gpu: {args.n_gpu}, "
f"distributed training: {bool(args.local_rank != -1)}, "
f"16-bits training: {args.amp}",)
logger.info(dict(args._get_kwargs()))
if args.seed is not None:
set_seed(args)
if args.local_rank in [-1, 0]:
os.makedirs(args.out, exist_ok=True)
args.writer = SummaryWriter(args.out)
set_model_config(args)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
labeled_trainloader, unlabeled_dataset, test_loader, val_loader, ood_loaders \
= set_dataset(args)
model, optimizer, scheduler = set_models(args)
logger.info("Total params: {:.2f}M".format(
sum(p.numel() for p in model.parameters()) / 1e6))
if args.use_ema:
from models.ema import ModelEMA
ema_model = ModelEMA(args, model, args.ema_decay)
args.start_epoch = 0
if args.resume:
logger.info("==> Resuming from checkpoint..")
assert os.path.isfile(
args.resume), "Error: no checkpoint directory found!"
args.out = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if args.use_ema:
ema_model.ema.load_state_dict(checkpoint['ema_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
if args.amp:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.opt_level)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
model.zero_grad()
if not args.eval_only:
logger.info("***** Running training *****")
logger.info(f" Task = {args.dataset}@{args.num_labeled}")
logger.info(f" Num Epochs = {args.epochs}")
logger.info(f" Batch size per GPU = {args.batch_size}")
logger.info(f" Total train batch size = {args.batch_size*args.world_size}")
logger.info(f" Total optimization steps = {args.total_steps}")
train(args, labeled_trainloader, unlabeled_dataset, test_loader, val_loader,
ood_loaders, model, optimizer, ema_model, scheduler)
else:
logger.info("***** Running Evaluation *****")
logger.info(f" Task = {args.dataset}@{args.num_labeled}")
eval_model(args, labeled_trainloader, unlabeled_dataset, test_loader, val_loader,
ood_loaders, model, ema_model)
if __name__ == '__main__':
main()