-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrain_convnet_loop_inner_B2.py
699 lines (589 loc) · 39.4 KB
/
train_convnet_loop_inner_B2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
#%% IMPORT
import os
import argparse
from ast import literal_eval
#Matplotlib created a temporary config/cache directory at /tmp/matplotlib-6772vh08 because the default path (/.config/matplotlib) is not a writable directory; it is highly recommended to set the MPLCONFIGDIR environment variable to a writable directory, in particular to speed up the import of Matplotlib and to better support multiprocessing.
os.environ['MPLCONFIGDIR'] = os.getcwd() + "/configs/"
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from tqdm.auto import tqdm
import torch
from torch import nn
from torchvision.transforms import Compose
import random
import pandas as pd
from sklearn.metrics import roc_auc_score
import torch.optim.lr_scheduler as lr_scheduler
import glob
## When running python jobs using Docker in a cluster system (docker run ...), we typically need to specify the --user tag in the command.
## For instance, --user $(id -u):$(id -g) will associate the right user group, so that our container is always identified as ours.
## Unfortunately, by doing so, we'd loose the 'root' priviledge that is typically associated with a default docker container.
## As a result, we'd loose the ability to import pretrained weights of the popular computer vision models, since we can no longer access the so called ./cache directory.
# To overcome this issue, we can create a custom folder - say 'pretrained_models' in our working dir, and set that as the Torch Hub and Torch Home (see below).
# The first step would be to run everything without the --user tag on the container, thus using default root priviledge, and make torch download the
# weights into our created 'pretrained_models' dir. Then, for every other experiment we'd need to run, we can set the --user tag in the container
# as requested by some clsuter system admins, and the script will load the pretrained weights that we saved at the previous step.
mydir=os.path.join(os.getcwd(), 'pretrained_models') ## these are the regular ImageNet-trained CV models, like resnets weights.
torch.hub.set_dir(mydir)
os.environ['TORCH_HOME']=mydir
## If you need to debug NaN and None values throughout your script, forward(), and back-prop.. setting this flag to True can be useful
# torch.autograd.set_detect_anomaly(True)
#
parser = argparse.ArgumentParser(description="Training script for a convnet.")
parser.add_argument("--number_of_gpus",type=int,default=1,help="The number of GPUs you intend to use")
parser.add_argument("--gpus_ids",type=str,default="0",help="The comma separated list of integers representing the id of requested GPUs - such as '0,1'")
parser.add_argument("--SEED",type=int,default=42,help="fix seed to set reproducibility, eg, seed=42")
parser.add_argument("--EXPERIMENT",type=str,default="breakhis",help="geometric_dataset, prostate, imagenette, procancer, breakhis")
parser.add_argument("--CONDITIONING_FEATURE",type=str,default="aggressiveness",help="for imagenette is imagenette, for prostate can be (aggressiveness, no_tumour, scanner_vendor, disease_yes_no)")
parser.add_argument("--IMAGE_SIZE",type=int,default=128,help="imagenette 64, prostate 128")
parser.add_argument("--BATCH_SIZE_TRAIN",type=int,default=16,help="eg, 200 images")
parser.add_argument("--BATCH_SIZE_VALID",type=int,default=1,help=" 1 image")
parser.add_argument("--BATCH_SIZE_TEST",type=int,default=1,help=" 1 image")
parser.add_argument("--NUMBER_OF_EPOCHS",type=int,default=200,help="eg, 100, eventually early stopped")
parser.add_argument("--MODEL_TYPE",type=str,default="resnet18",help="eg, EqualCNN, alexnet, SimpleCNN, EqualCNN, BaseCNN, LightCNN, resnet18, resnet34, resnet50, resnet101, ")
parser.add_argument("--LR",type=str,default="[0.01,0.001]",help="list of str of floats, eg, [0.0003, 0.001]")
parser.add_argument("--WD",type=str,default="[0.01,0.01,0.001]",help="list of str of floats, eg, [0.0003, 0.001]")
parser.add_argument("--CAUSALITY_AWARENESS_METHOD", type=str, default="[None, 'max', 'lehmer']", help="[None, 'max', 'lehmer']")
parser.add_argument("--LEHMER_PARAM", type=str, default="[-2,-1,0,1,2]",help="if using Lehmer mean, which power utilize among: [-100,-1,0,1,2,100]")
parser.add_argument("--CAUSALITY_SETTING", type=str, default="['mulcat','mulcatbool']",help="if CA, which setting to use, eg: ['cat','mulcat']")
parser.add_argument("--MULCAT_CAUSES_OR_EFFECTS", type=str, default="['causes','effects']", help="if CA, which one to use for causality factors computation: ['causes','effects']")
parser.add_argument("--which_resnext_to_use",type=str, choices=["tiny","base"])
parser.add_argument("--is_pretrained",type=str, choices=["True","False"], default="False")
args = parser.parse_args()
###repoducibility:#############
SEED = args.SEED
torch.manual_seed(SEED)
random.seed(SEED)
np.random.seed(SEED)
torch.cuda.manual_seed_all(SEED)
#model
model_type = args.MODEL_TYPE
which_resnext_to_use=""
if model_type=="resnext":
which_resnext_to_use=args.which_resnext_to_use
#causality awareness and related settings
causality_awareness_method = literal_eval(args.CAUSALITY_AWARENESS_METHOD) #[None, 'max', 'lehmer']
LEHMER_PARAM = literal_eval(args.LEHMER_PARAM) #"[-100,-2,-1,0,1,100]"
CAUSALITY_SETTING = literal_eval(args.CAUSALITY_SETTING) #['cat','mulcat','mulcatbool']
MULCAT_CAUSES_OR_EFFECTS = literal_eval(args.MULCAT_CAUSES_OR_EFFECTS) #['causes','effects']
#define some settings about data, paths, training params, etc.
image_size = args.IMAGE_SIZE
batch_size_train = args.BATCH_SIZE_TRAIN
batch_size_valid = args.BATCH_SIZE_VALID
batch_size_test = args.BATCH_SIZE_TEST
epochs = args.NUMBER_OF_EPOCHS
LR = literal_eval(args.LR) #list of floats, "[0.001,0.0003]"
wd = literal_eval(args.WD)
#some other settings
loss_type="CrossEntropyLoss"
is_pretrained = args.is_pretrained
if is_pretrained=="False":
is_pretrained=False
elif is_pretrained=="True":
is_pretrained=True
print(f"is_pretrained: {is_pretrained}")
is_feature_extractor = False
csv_path=""
train_root_path = val_root_path = test_root_path = ""
if args.EXPERIMENT == "prostate":
dataset_name=""
CONDITIONING_FEATURE = args.CONDITIONING_FEATURE
channels = 1
num_classes = 2
if CONDITIONING_FEATURE == "aggressiveness": # lesion aggressiveness labels: LG and HG
dataset_name = "UNFOLDED_DATASET_5_LOW_RESOLUTION_NORMALIZED_GUIDED_CROP_GUIDED_SLICE_SELECTION"
csv_path = os.path.join(os.getcwd(),"dataset_PICAI","csv_files","cs_les_unfolded.csv")
elif CONDITIONING_FEATURE == "disease_yes_no":
dataset_name = "UNFOLDED_DATASET_DISEASE_YES_NO"
csv_path = os.path.join(os.getcwd(),"dataset_PICAI","csv_files","unfolded_disease_YesNo_balanced.csv")
elif args.EXPERIMENT == "breakhis": #TODO 31 oct 2023
dataset_name=""
CONDITIONING_FEATURE = args.CONDITIONING_FEATURE
channels = 3
num_classes = 2
if CONDITIONING_FEATURE == "aggressiveness":
csv_path = os.path.join(os.getcwd(),"dataset_breakhis","csv_files","breakhis_metadata_400X.csv") ##
else:
raise ValueError
print(f"Dataset_name: {dataset_name}\n csv_path: {csv_path}")
###############################
NUM_GPUS = args.number_of_gpus #1 #2 TODO
list_of_GPU_ids = list(args.gpus_ids)
list_of_GPU_ids = list(filter((",").__ne__, list_of_GPU_ids))
class EarlyStopper:
def __init__(self, patience=1, min_delta=0):
self.patience = patience
self.min_delta = min_delta
self.counter = 0
self.min_validation_loss = np.inf
def early_stop(self, validation_loss, epoch):
if validation_loss < self.min_validation_loss:
self.min_validation_loss = validation_loss
self.counter = 0
elif validation_loss > (self.min_validation_loss + self.min_delta):
self.counter += 1
if self.counter >= self.patience:
return True
return False
def get_patience_and_minDelta(self):
return (self.patience, self.min_delta)
def main(rank, world_size, causality_awareness, learning_rate, weight_decay, causality_method=None, lehmer_param=None, causality_setting="cat",mulcat_causes_or_effects="causes"):
print(torch.cuda.is_available())
os.environ['CUDA_VISIBLE_DEVICES'] = list_of_GPU_ids[rank]
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
from torch.utils.data import DataLoader
from torch.optim import Adam
from torchvision.datasets import ImageFolder
from torchvision import transforms
from split_train_val_test import get_or_create_datasetsCSVpaths
from pathlib import Path
results_folder = Path("./results_YOUR_EXPERIMENT") #TODO <------ set your desired output folder based on your experiments
results_folder.mkdir(exist_ok = True)
## Set some stuff for the torch DDP setting, that is valid also for the single GPU setting anyway
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
print(f"CUDA_VISIBLE_DEVICES: {os.environ['CUDA_VISIBLE_DEVICES']}")
### Regarding torch DDP stuff, we need to specify which backend to use:
## if linux OS, then use "nccl";
dist.init_process_group("nccl", rank=rank, world_size=world_size)
## if Windows, use "gloo" instead: #TODO
# os.environ["PL_TORCH_DISTRIBUTED_BACKEND"] = "gloo"
# dist.init_process_group("gloo", rank=rank, world_size=world_size)
###
if args.EXPERIMENT == "prostate": # prostate PI-CAI
from dataset_creator import Dataset2DSL
my_transform = Compose([
transforms.Resize((image_size,image_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Lambda(lambda t: (t * 2) - 1) #TODO
])
my_transform_valid_and_test = Compose([
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
# transforms.Lambda(lambda t: (t * 2) - 1) #TODO
])
path_to_train_csv, path_to_val_csv, _ = get_or_create_datasetsCSVpaths(EXPERIMENT=args.EXPERIMENT, CONDITIONING_FEATURE=CONDITIONING_FEATURE, csv_path=csv_path, testset_size=0.2, validset_size=0.15)
dataset_train = Dataset2DSL(csv_path=path_to_train_csv, dataset_name=dataset_name, CONDITIONING_FEATURE=CONDITIONING_FEATURE, transform=my_transform, use_label=True)
dataset_val = Dataset2DSL(csv_path=path_to_val_csv, dataset_name=dataset_name, CONDITIONING_FEATURE=CONDITIONING_FEATURE, transform=my_transform_valid_and_test, use_label=True)
elif args.EXPERIMENT == "procancer": # prostate ProCAncer-I consortium
from dataset_creator import Dataset2DSL
my_transform = Compose([
transforms.Resize((image_size,image_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Lambda(lambda t: (t * 2) - 1)
])
my_transform_valid_and_test = Compose([
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
# transforms.Lambda(lambda t: (t * 2) - 1)
])
path_to_train_csv, path_to_val_csv, _ = get_or_create_datasetsCSVpaths(EXPERIMENT=args.EXPERIMENT, CONDITIONING_FEATURE=CONDITIONING_FEATURE, csv_path=csv_path, testset_size=0.2, validset_size=0.15)
dataset_train = Dataset2DSL(csv_path=path_to_train_csv, dataset_name=dataset_name, CONDITIONING_FEATURE=CONDITIONING_FEATURE, transform=my_transform, use_label=True)
dataset_val = Dataset2DSL(csv_path=path_to_val_csv, dataset_name=dataset_name, CONDITIONING_FEATURE=CONDITIONING_FEATURE, transform=my_transform_valid_and_test, use_label=True)
elif args.EXPERIMENT == "breakhis": # breast histopathology slides
from dataset_creator import BREAKHISDataset2D
my_transform = Compose([
transforms.Resize((image_size,image_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Lambda(lambda t: (t * 2) - 1)
])
my_transform_valid_and_test = Compose([
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
# transforms.Lambda(lambda t: (t * 2) - 1)
])
path_to_train_csv, path_to_val_csv, _ = get_or_create_datasetsCSVpaths(EXPERIMENT=args.EXPERIMENT, CONDITIONING_FEATURE=CONDITIONING_FEATURE, csv_path=csv_path)
dataset_train = BREAKHISDataset2D(csv_path=path_to_train_csv, cls_type="binary", transform=my_transform)
dataset_val = BREAKHISDataset2D(csv_path=path_to_val_csv, cls_type="binary", transform=my_transform_valid_and_test)
# prepare the dataloaders
from torch.utils.data.distributed import DistributedSampler
sampler_train = DistributedSampler(dataset_train, num_replicas=world_size, rank=rank, shuffle=True, drop_last=False)
dataloader_train = DataLoader(dataset_train, batch_size=batch_size_train, pin_memory=False, num_workers=0, drop_last=False, shuffle=False, sampler=sampler_train)
print(f"dataloader_train of size {len(dataloader_train)} batches, each of {batch_size_train}")
sampler_valid = DistributedSampler(dataset_val, num_replicas=world_size, rank=rank, shuffle=False, drop_last=False)
dataloader_valid = DataLoader(dataset_val, batch_size=batch_size_valid, pin_memory=False, num_workers=0, drop_last=False, shuffle=False, sampler=sampler_valid)
print(f"dataloader_valid of size {len(dataloader_valid)} batches, each of {batch_size_valid}")
## when using torch DDP settings (multiple GPUs), each process has its own rank, e.g., 0,1,2: we set that value to the device variable governing the ordinal of the GPU device to use
device=rank
##
number_of_feature_maps = None
## Below, we define the model, and move it to the GPU. We also define a standard optimizer (Adam).
if model_type=="resnet18":
from networks_attn_learnLM_clean import Resnet18CA_clean
model = Resnet18CA_clean(
dim=image_size,
channels=channels,
num_classes=num_classes,
is_pretrained=is_pretrained,
is_feature_extractor=False,
causality_aware=causality_awareness,
causality_method=causality_method,
LEHMER_PARAM=lehmer_param,
causality_setting=causality_setting,
visual_attention=False, #not yet implemented
MULCAT_CAUSES_OR_EFFECTS=mulcat_causes_or_effects
)
print("-#-#-#: intialized a Resnet18CA model from networks_attn")
model = model.to(rank)
model = DDP(model, device_ids=[rank], output_device=rank, find_unused_parameters=True)
#define loss function criterion and optimizer
if loss_type == "CrossEntropyLoss":
loss_function = nn.CrossEntropyLoss()
else:
print("Please, specify a valid loss function type, such as CrossEntropyLoss")
raise NotImplementedError
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
# for name, param in model.named_parameters():
# if "causality_map_extractor.lehmer_seed" in name:
# print(f"{name}\t {param.data}")
# print()
#choose your desired scheduling regime for the learning rate...
# scheduler = lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.1, total_iters=epochs)
scheduler = lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=1.0, total_iters=epochs) #end_factor=start_factor means no effect.
# scheduler = lr_scheduler.LinearLR(optimizer, start_factor=1.0, end_factor=0.01, total_iters=round(epochs/2))
# scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[round(0.20*epochs), round(0.50*epochs)], gamma=0.5)
# scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[round(0.25*epochs), round(0.50*epochs), round(0.75*epochs),], gamma=0.5)
# scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
min_valid_loss = float("inf")
path_to_model_dir = ""
dateTimeObj = datetime.now() # current date and time
date_time = dateTimeObj.strftime("%Y%m%d%H%M%S")
list_of_epochLosses = []
list_of_validLosses = []
list_of_validAccs = []
def print_number_of_model_parameters_and_MB(model):
total_params = sum(
param.numel() for param in model.parameters()
)
trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad
)
param_size = 0
for param in model.parameters():
param_size += param.nelement() * param.element_size()
buffer_size = 0
for buffer in model.buffers():
buffer_size += buffer.nelement() * buffer.element_size()
size_all_mb = (param_size + buffer_size) / 1024**2
return total_params, trainable_params, size_all_mb
## Let's define a custom save_stamp for this experiment, that will be used to define the corresponding output directory
save_stamp = date_time + f"_{epochs}e_{image_size}i_{batch_size_train}b_{learning_rate}L_{weight_decay}w"
causality_setting_TMP = causality_setting + "_" + mulcat_causes_or_effects
model_type_TMP = model_type+which_resnext_to_use
if is_pretrained:
causality_awareness_TMP = str(causality_awareness)+"_pretrained"
else:
causality_awareness_TMP = causality_awareness
if causality_awareness:
if causality_method=="lehmer":
if model_type=="EqualCNN":
path_to_model_dir = os.path.join(results_folder,"saved_models",CONDITIONING_FEATURE, model_type_TMP, str(SEED),causality_setting_TMP, str(number_of_feature_maps), f"CA_{causality_awareness_TMP}", causality_method, str(lehmer_param), f"{save_stamp}")
else:
path_to_model_dir = os.path.join(results_folder,"saved_models",CONDITIONING_FEATURE, model_type_TMP, str(SEED), causality_setting_TMP, f"CA_{causality_awareness_TMP}", causality_method, str(lehmer_param), f"{save_stamp}")
else:
if model_type=="EqualCNN":
path_to_model_dir = os.path.join(results_folder,"saved_models",CONDITIONING_FEATURE, model_type_TMP, str(SEED), causality_setting_TMP, str(number_of_feature_maps), f"CA_{causality_awareness_TMP}", causality_method, f"{save_stamp}")
else:
path_to_model_dir = os.path.join(results_folder,"saved_models",CONDITIONING_FEATURE, model_type_TMP, str(SEED), causality_setting_TMP, f"CA_{causality_awareness_TMP}", causality_method, f"{save_stamp}")
else:
if model_type=="EqualCNN":
path_to_model_dir = os.path.join(results_folder,"saved_models",CONDITIONING_FEATURE, model_type_TMP, str(SEED), str(number_of_feature_maps), f"CA_{causality_awareness_TMP}",f"{save_stamp}")
else:
path_to_model_dir = os.path.join(results_folder,"saved_models",CONDITIONING_FEATURE, model_type_TMP, str(SEED), f"CA_{causality_awareness_TMP}",f"{save_stamp}")
if not os.path.exists(path_to_model_dir):
os.makedirs(path_to_model_dir,exist_ok=True)
with open(os.path.join(path_to_model_dir,"settings_of_this_experiment.txt"),"w") as fout:
fout.write(f" csv_path: {csv_path}\n\
dataset_name: {dataset_name}\n\
SEED: {SEED}\n \
GPU used: {world_size}\n \
---list_of_GPU_ids: {list_of_GPU_ids}\n \
dataset_name: {dataset_name}\n \
number of image classes: {num_classes}\n \
channels: {channels}\n \
image_size: {image_size}\n \
batch_size_train: {batch_size_train}\n \
batch_size_valid: {batch_size_valid}\n \
batch_size_test: {batch_size_test}\n \
Dataloader_train of size: {len(dataloader_train)} batches\n \
epochs: {epochs}\n \
initial_LR: {learning_rate}\n \
LR Scheduler: {scheduler}\n \
wd: {wd}\n \
loss_type: {loss_type}\n \
model_type: {model_type}\n \
--- if EqualCNN, No. f maps: {number_of_feature_maps}\n \
--- if ResNext, which type: {which_resnext_to_use}\n \
is_pretrained: {is_pretrained} \n \
is_feature_extractor: {is_feature_extractor} \n \
causality_aware: {causality_awareness} \n \
---causality_method: {causality_method} \n \
---LEHMER PARAM (alpha, or p): {lehmer_param} \n \
---causality_setting: {causality_setting_TMP} \n \
{model}")
## If you wish to get, save, and visualize the feature maps from inner layers of your model, using hooks might help:
feature_maps_hooked = {}
def get_activation(name):
def hook(model, input, output):
feature_maps_hooked[name] = output.detach()
return hook
## DEFINE the validation loop: this code seems huge, but it's simple and made of many repeated parts, do not get crazy beforehand ;)
def validation_loop(model, dataloader_valid, IS_CAUSALITY_AWARE, loss_function):
if dist.get_rank()==0:
if model_type=="resnet18":
## E.g., we want to get the feature maps after the very first convolutional block of our resnet (refer to its definition in the networks_ script)
handle_b = model.module.starting_block.register_forward_hook(get_activation("starting_block")) # we create an handle for that specific layer
model.eval()
accuracy_validation = 0.0
total_validation = 0.0
valid_loss = 0.0
ytrue_validation = []
yscore_validation = []
with torch.no_grad():
need_to_savefig = False
for count, (images_v, labels_v) in enumerate(dataloader_valid):
### (Optional): If you wish to get, save and later visualize feature maps from the (hooked) inner layers,
# or the produced causality map for an input validation image, then use this code.
# Otherwise, by commenting out this IF statement, you will not save any figures,
# obtaining a lighter validation epoch and thus a faster code that will speed up your experiment.
if count == 0: # TODO Let's take the first (position 0) batch only, just one for simplicity.
need_to_savefig = True # we activate the flag, which is False by default, by setting it to True, just for this time
###
images_v = images_v.to(device)
ytrue_validation.append(labels_v.item())
if IS_CAUSALITY_AWARE:
outputs_v, batch_causality_maps = model(images_v) # Our causality-driven model yields both the validation outputs and the causality maps! (Refer to its definition)
yscore_validation.append(outputs_v.detach().cpu().numpy()[:,1])
if need_to_savefig and dist.get_rank()==0: #save the figure only if it is the MASTER process (GPU with rank 0)
path_to_feature_maps = os.path.join(path_to_model_dir, "ftrmps")
if not os.path.exists(path_to_feature_maps):
os.makedirs(path_to_feature_maps,exist_ok=True)
path_to_causality_maps = os.path.join(path_to_model_dir, "caumps")
if not os.path.exists(path_to_causality_maps):
os.makedirs(path_to_causality_maps,exist_ok=True)
path_to_original_images = os.path.join(path_to_model_dir, "orgimg")
if not os.path.exists(path_to_original_images):
os.makedirs(path_to_original_images,exist_ok=True)
need_to_savefig=False #Once we have entered this IF block, we can easily put the boolean flag back to False, so that no other validation images get to this point
if model_type=="resnet18":
if epoch>15: # for epochs after 15, the saving of the maps is conditioned on a probability of 25%, to reduce the memory burden...
if np.random.random()<0.25:
np.save(os.path.join(path_to_feature_maps,f"ep{epoch}_strtngBlck.npy"), feature_maps_hooked['starting_block'].cpu().numpy())
else: # for initial epochs, instead, we are much interested in the change of the maps' appearance, thus save them every time (probability=100%)
np.save(os.path.join(path_to_feature_maps,f"ep{epoch}_strtngBlck.npy"), feature_maps_hooked['starting_block'].cpu().numpy())
handle_b.remove() # once we have saved our hooked maps, we can get rid of the corresponding handle object.
#####
for b_i in range(batch_causality_maps.size()[0]):
if epoch>15: # for epochs after 15, the saving of the map is conditioned on a probability of 25%, to reduce the memory burden...
if np.random.random()<0.25:
c_map = batch_causality_maps[b_i,:,:]
c_map *= 100 #since they are probability values (0---1), multiply them for 100 to get % (percentage)
c_map = c_map.cpu().numpy()
np.save(os.path.join(path_to_causality_maps,f"e{epoch}_c{b_i}.npy"), c_map)
else:
c_map = batch_causality_maps[b_i,:,:]
c_map *= 100 #since they are probability values (0---1), multiply them for 100 to get % (percentage)
c_map = c_map.cpu().numpy()
np.save(os.path.join(path_to_causality_maps,f"e{epoch}_c{b_i}.npy"), c_map)
else:
outputs_v, _ = model(images_v) #here, we simply disregard the causality maps (second output) since it is None for non-causality driven models...
yscore_validation.append(outputs_v.detach().cpu().numpy()[:,1])
##
if need_to_savefig and dist.get_rank()==0:
path_to_feature_maps = os.path.join(path_to_model_dir, "ftrmps")
if not os.path.exists(path_to_feature_maps):
os.makedirs(path_to_feature_maps,exist_ok=True)
path_to_original_images = os.path.join(path_to_model_dir, "orgimg")
if not os.path.exists(path_to_original_images):
os.makedirs(path_to_original_images,exist_ok=True)
need_to_savefig=False # Set the flag to the False state again, as above
if model_type=="resnet18":
if epoch>15: #same as above
if np.random.random()<0.25:
np.save(os.path.join(path_to_feature_maps,f"ep{epoch}_strtngBlck.npy"), feature_maps_hooked['starting_block'].cpu().numpy())
else:
np.save(os.path.join(path_to_feature_maps,f"ep{epoch}_strtngBlck.npy"), feature_maps_hooked['starting_block'].cpu().numpy())
handle_b.remove() #remove it, as above
if epoch == 3 : # Save the original validation input image just for the first validation epoch (epoch=3)
plt.figure()
tmpimage=images_v[0,:,:,:].cpu().numpy()
tmpimage=np.transpose(tmpimage, (1, 2, 0))
tmpimage=255*(tmpimage-tmpimage.min())/(tmpimage.max()-tmpimage.min())
tmpimage = tmpimage.astype(np.uint8)
plt.imshow(tmpimage)
plt.savefig(os.path.join(path_to_original_images,f"ep{epoch}_i0.png"))
plt.close()
labels_v=labels_v.to(device)
loss_val = loss_function(outputs_v,labels_v)
valid_loss += loss_val.item() * images_v.size(0) / len(dataloader_valid.dataset) #TODO#####
# the class with the highest energy is what we choose as prediction
predicted = torch.argmax(outputs_v, 1)
total_validation += labels_v.size(0)
count_correct_guess = (torch.eq(predicted,labels_v)).sum().item()
accuracy_validation += count_correct_guess
accuracy_validation = 100 * (accuracy_validation / total_validation)
auroc_softmax = roc_auc_score(ytrue_validation, yscore_validation)
return valid_loss, accuracy_validation, auroc_softmax
## END of the validation_loop #########################################################################
# If you need it, define the early stopping by declaring patience and minimun delta to be used in the validation loss tracking
early_stopper = EarlyStopper(patience=5, min_delta=0.005)
# Some stuff prior to beginning the moel training over epochs...
tmp_val_acc_value = 0
tmp_val_loss_value = 0
with open(os.path.join(path_to_model_dir,"results.txt"),"w") as fout:
fout.write("Results\n")
if dist.get_rank()==0:
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir=path_to_model_dir)
count_model_params, count_model_params_trainable, count_model_MB =print_number_of_model_parameters_and_MB(model=model)
if dist.get_rank()==0:
writer.add_scalar("Model/count_model_params", count_model_params)
writer.add_scalar("Model/count_model_params_trainable", count_model_params_trainable)
writer.add_scalar("Model/count_model_MB", count_model_MB)
## OK, let's train! ######
for epoch in range(epochs):
if dist.get_rank()==0:
print(f"EPOCH {epoch}---------")
dataloader_train.sampler.set_epoch(epoch) ## if we are using DistributedSampler, we have to tell it which epoch this is
epoch_loss = 0.0 # the running loss
model.train()
for batch_images,batch_labels in tqdm(dataloader_train):
optimizer.zero_grad(set_to_none=True)
step_batch_size = batch_images.size()[0]
images = batch_images.to(device)
labels = batch_labels.to(device)
##TODO when running in Windows with 'gloo' backend.
# labels = labels.type(torch.LongTensor).to(device) #TODO when running in Windows with 'gloo' backend.
outputs, _ = model(images) # our causality-driven model yields the outputs and the causality maps, but at this stage we disregard the latter
loss = loss_function(outputs,labels)
if not torch.isnan(loss): ##TODO 30 Oct
loss.backward()
#Step with the optimzer
optimizer.step()
#Keep track of the loss during epochs
epoch_loss += loss.item() * step_batch_size / len(dataloader_train.dataset)
else:
print("LOSS WAS Nan IN THIS EPOCH, JUST SKIPPING THE .BACKWARD() AND .STEP()...")
# END of the training FOR loop.
## Write and track down some intermediate results:
if dist.get_rank()==0:
with open(os.path.join(path_to_model_dir,"results.txt"),"a") as fout:
fout.write(f"epoch: {epoch}, training loss: {epoch_loss}\n")
list_of_epochLosses.append(epoch_loss) # training loss collection
if dist.get_rank()==0:
writer.add_scalar("Loss/train", epoch_loss, epoch)
## Trigger the validation loop (evaluation, inference) every three epochs and not at every epoch, just to lower the memory burden: TODO feel free to customize it.
if ((epoch>0) and (epoch%3==0)):
validation_loss, validation_accuracy, auroc_softmax = validation_loop(model, dataloader_valid, causality_awareness, loss_function)
if dist.get_rank()==0:
writer.add_scalar("Loss/valid", validation_loss, epoch)
writer.add_scalar("Acc/valid", validation_accuracy, epoch)
writer.add_scalar("AUROC/valid", auroc_softmax, epoch)
with open(os.path.join(path_to_model_dir,"results.txt"),"a") as fout:
fout.write(f" val loss: {validation_loss}, val acc: {validation_accuracy}, val auroc: {auroc_softmax}\n")
if min_valid_loss > validation_loss:
min_valid_loss = validation_loss
if dist.get_rank()==0:
writer.add_scalar("Best/valid_loss", validation_loss, epoch)
writer.add_scalar("Best/acc_at_best_valid_loss", validation_accuracy, epoch)
path_to_model_epoch = os.path.join(path_to_model_dir,f"ep{epoch}_betterValid")
torch.save(model.state_dict(), path_to_model_epoch)
file_list = glob.glob(os.path.join(path_to_model_dir,"*_betterVal*")) # get a list of all .pth files in the models directory
file_list.sort(key=lambda x: int(x.split("ep")[1].split("_")[0])) # sort the list by the epoch number in ascending order
for file in file_list[:-3]: # delete all files except the last 3 ones
os.remove(file)
list_of_validLosses.append(validation_loss)
list_of_validAccs.append(validation_accuracy)
tmp_val_loss_value = validation_loss
tmp_val_acc_value = validation_accuracy
## check for early stopping during training (in DDP fashion, when possibly multiple GPUs are used):
flag_tensor = torch.zeros(1).to(device)
if dist.get_rank()==0:
if early_stopper.early_stop(validation_loss, epoch):
flag_tensor += 1
dist.all_reduce(flag_tensor)
if flag_tensor == 1:
with open(os.path.join(path_to_model_dir,"results.txt"),"a") as fout:
fout.write(f"Exit condition from early stop on validation loss (earlyStopper patience and minDelta: {early_stopper.get_patience_and_minDelta()})")
break
else:
list_of_validLosses.append(tmp_val_loss_value)
list_of_validAccs.append(tmp_val_acc_value)
## Create, or update, the figure during training every so often (e.g., once in 9 epochs) since the graphics might be a bottleneck:
if (((epoch > 0) and epoch % 9 == 0) or (epoch == epochs-1)):
plt.figure() #####
plt.plot(list_of_epochLosses,'k-') # training
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title(f"Training loop updated at epoch {epoch}")
plt.plot(list_of_validLosses,'b-') # validation
plt.show()
plt.savefig(os.path.join(path_to_model_dir,"training_and_validation_loss_curve.pdf"))
plt.close()
plt.figure() #####
plt.plot(list_of_validAccs,'b-')
plt.xlabel("Epochs")
plt.ylabel("Validation Accuracy")
plt.title(f"Training loop updated at epoch {epoch}")
plt.show()
plt.savefig(os.path.join(path_to_model_dir,"validation_acc_curve.pdf"))
plt.close()
## if some kind of learning rate scheduler is used, then you can see how its value changes during training
before_lr = optimizer.param_groups[0]["lr"]
scheduler.step()
after_lr = optimizer.param_groups[0]["lr"]
if dist.get_rank()==0:
print("Epoch %d: SGD lr %.4f -> %.4f" % (epoch, before_lr, after_lr))
if dist.get_rank()==0:
writer.flush() #to make sure that all pending events have been written to disk.
writer.close()
with open(os.path.join(path_to_model_dir,"results.txt"),"a") as fout:
fout.write("---End of this training---")
print("---End of this training---")
# Clean up the process groups:
dist.destroy_process_group()
pass
#%% MAIN
import torch.multiprocessing as mp
if __name__ == '__main__': #main(rank, world_size, causality_awareness, learning_rate, causality_method=None, lehmer_param=None):
world_size=args.number_of_gpus
for CA_method in causality_awareness_method: #none,max,lehmer
if CA_method is None:
for lr in LR:
for we_de in wd:
print(f"Sto lanciando CA None e LR={lr}, wd={we_de}")
mp.spawn(
main,
args=(world_size, False, lr, we_de, None, None),
nprocs=world_size
)
elif CA_method=="max":
for causality_setting in CAUSALITY_SETTING: #cat,mulcat,mulcatbool
for mulcat_causes_or_effects in MULCAT_CAUSES_OR_EFFECTS: #TODO 21 luglio
for lr in LR:
for we_de in wd:
print(f"Sto lanciando CA max e LR={lr}, wd={we_de}, causality_setting {causality_setting}, con mulcat_causes_or_effects {mulcat_causes_or_effects}")
mp.spawn(
main,
args=(world_size, True, lr, we_de, "max", 0, causality_setting, mulcat_causes_or_effects),
nprocs=world_size
)
elif CA_method=="lehmer":
for causality_setting in CAUSALITY_SETTING: #cat,mulcat,mulcatbool
for mulcat_causes_or_effects in MULCAT_CAUSES_OR_EFFECTS: #TODO 21 luglio
for alpha in LEHMER_PARAM:
for lr in LR:
for we_de in wd:
print(f"Sto lanciando CA lehmer con alpha {alpha} e LR={lr}, wd={we_de}, causality_setting {causality_setting}, con mulcat_causes_or_effects {mulcat_causes_or_effects}")
mp.spawn(
main,
args=(world_size, True, lr, we_de, "lehmer", alpha, causality_setting, mulcat_causes_or_effects),
nprocs=world_size
)
else:
print("errore nel ciclo for per CA_method")