Skip to content

Commit

Permalink
autoformat
Browse files Browse the repository at this point in the history
Former-commit-id: d47360eb60f95f0b1690e4443cf68c4203601854
  • Loading branch information
Javi Ribera committed Apr 2, 2018
1 parent 0810141 commit 1ed5af7
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 24 deletions.
2 changes: 1 addition & 1 deletion object-locator/argparser.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def parse_command_args(training_or_testing):
args.save = os.path.abspath(args.save)

# Check we are not overwriting a checkpoint without resuming from it
if args.save != '' and os.path.isfile(args.save) and \
if args.save != '' and os.path.isfile(args.save) and \
not (args.resume and args.resume == args.save):
print("E: Don't overwrite a checkpoint without resuming from it. "
"Are you sure you want to do that? "
Expand Down
1 change: 0 additions & 1 deletion object-locator/locate.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@
# the precision as a function of r
df_prec_n_rec = pd.DataFrame(columns=['precision', 'recall'])


print(f'\__ Average AHD for all the testing set: {avg_ahd:.3f}')
print('\__ Accuracy for all the testing set, r=0, ..., 15')
for judge in judges:
Expand Down
12 changes: 7 additions & 5 deletions object-locator/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,8 @@ def __init__(self,

# Prepare all possible (row, col) locations in the image
self.height, self.width = resized_height, resized_width
self.resized_size = Variable(tensortype([resized_height, resized_width]))
self.resized_size = Variable(tensortype([resized_height,
resized_width]))
self.max_dist = math.sqrt(resized_height**2 + resized_width**2)
self.n_pixels = resized_height * resized_width
self.all_img_locations = torch.from_numpy(cartesian([np.arange(resized_height),
Expand Down Expand Up @@ -152,19 +153,20 @@ def forward(self, prob_map, gt, orig_sizes):
batch_size = prob_map.shape[0]
assert batch_size == len(gt)

terms_1 = []#Variable(self.tensortype(batch_size))
terms_2 = []#Variable(self.tensortype(batch_size))
terms_1 = [] # Variable(self.tensortype(batch_size))
terms_2 = [] # Variable(self.tensortype(batch_size))
for b in range(batch_size):

# One by one
prob_map_b = prob_map[b, :, :]
gt_b = gt[b]
gt_b = gt[b]
orig_size_b = orig_sizes[b, :]
norm_factor = (orig_size_b/self.resized_size).unsqueeze(0)

# Pairwise distances between all possible locations and the GTed locations
n_gt_pts = gt_b.size()[0]
normalized_x = norm_factor.repeat(self.n_pixels, 1)*self.all_img_locations
normalized_x = norm_factor.repeat(self.n_pixels, 1)*\
self.all_img_locations
normalized_y = norm_factor.repeat(len(gt_b), 1)*gt_b
d_matrix = cdist(normalized_x, normalized_y)

Expand Down
36 changes: 19 additions & 17 deletions object-locator/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,6 @@
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
momentum=0.9)
# nesterov=True)
elif args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(),
lr=args.lr)
Expand Down Expand Up @@ -170,14 +169,15 @@
target_count = Variable(target_count.type(tensortype))
target_orig_heights = Variable(tensortype(target_orig_heights))
target_orig_widths = Variable(tensortype(target_orig_widths))
target_orig_sizes = torch.stack((target_orig_heights, target_orig_widths)).transpose(0, 1)
target_orig_sizes = torch.stack((target_orig_heights,
target_orig_widths)).transpose(0, 1)

# One training step
optimizer.zero_grad()
est_map, est_count = model.forward(imgs)
term1, term2 = loss_loc.forward(est_map, target_locations, target_orig_sizes)
term3 = loss_regress.forward(est_count, target_count) #\
# / torch.sum(target_count)
term1, term2 = loss_loc.forward(
est_map, target_locations, target_orig_sizes)
term3 = loss_regress.forward(est_count, target_count)
term3 *= args.lambdaa
loss = term1 + term2 + term3
loss.backward()
Expand All @@ -186,15 +186,17 @@
# Update progress bar
loss_avg_this_epoch = (1/(batch_idx + 1))*(batch_idx * loss_avg_this_epoch +
loss.data[0])
iter_train.set_postfix(avg_train_loss_this_epoch=f'{loss_avg_this_epoch:.1f}')
iter_train.set_postfix(
avg_train_loss_this_epoch=f'{loss_avg_this_epoch:.1f}')

# Log training error
if time.time() > tic_train + args.log_interval:
tic_train = time.time()

# Log training losses
log.train_losses(terms=[term1, term2, term3, loss / 3],
iteration_number=epoch + batch_idx/len(trainset_loader),
iteration_number=epoch +
batch_idx/len(trainset_loader),
terms_legends=['Term1',
'Term2',
'Term3*%s' % args.lambdaa,
Expand Down Expand Up @@ -262,7 +264,7 @@
sum_se = 0
sum_ape = 0
iter_val = tqdm(valset_loader,
desc=f'Validating Epoch {epoch} ({len(valset)} images)')
desc=f'Validating Epoch {epoch} ({len(valset)} images)')
for batch_idx, (imgs, dictionaries) in enumerate(iter_val):

# Pull info from this batch
Expand All @@ -272,7 +274,7 @@
target_orig_heights = [dictt['orig_height'] for dictt in dictionaries]
target_orig_widths = [dictt['orig_width'] for dictt in dictionaries]

if bool((target_count==0).cpu().numpy()[0]):
if bool((target_count == 0).cpu().numpy()[0]):
continue

imgs = Variable(imgs.type(tensortype), volatile=True)
Expand All @@ -281,16 +283,16 @@
target_count = Variable(target_count.type(tensortype), volatile=True)
target_orig_heights = Variable(tensortype(target_orig_heights))
target_orig_widths = Variable(tensortype(target_orig_widths))
target_orig_sizes = torch.stack((target_orig_heights, target_orig_widths)).transpose(0, 1)
target_orig_sizes = torch.stack((target_orig_heights,
target_orig_widths)).transpose(0, 1)

# Feed-forward
est_map, est_count = model.forward(imgs)

# The 3 terms
term1, term2 = loss_loc.forward(est_map, target_locations, target_orig_sizes)
# if bool((torch.sum(target_count)==0).data.cpu().numpy()[0]):
term1, term2 = loss_loc.forward(
est_map, target_locations, target_orig_sizes)
term3 = loss_regress.forward(est_count, target_count)
# / torch.sum(target_count)
term3 *= args.lambdaa
sum_term1 += term1
sum_term2 += term2
Expand All @@ -299,8 +301,8 @@

# Update progress bar
loss_avg_this_epoch = sum_loss.data[0] / (batch_idx + 1)
iter_val.set_postfix(avg_val_loss_this_epoch=f'{loss_avg_this_epoch:.1f}-----')

iter_val.set_postfix(
avg_val_loss_this_epoch=f'{loss_avg_this_epoch:.1f}-----')

# Validation using the Averaged Hausdorff Distance
# __on the first image of the batch__
Expand Down Expand Up @@ -338,7 +340,7 @@
sum_ae += ae
sum_se += se
sum_ape += ape

# Validation using Precision and Recall
judge.evaluate_sample(centroids, target_locations)

Expand All @@ -360,6 +362,7 @@
titles=['(Validation) Input',
'(Validation) U-Net output'],
windows=[5, 6])

# # Read image with GT dots from disk
# gt_img_numpy = skimage.io.imread(
# os.path.join('/home/jprat/projects/phenosorg/data/plant_counts_dots/20160613_F54_validation_256x256_white_bigdots',
Expand Down Expand Up @@ -439,4 +442,3 @@
print("Saved best checkpoint so far in %s " % args.save)

epoch += 1

0 comments on commit 1ed5af7

Please sign in to comment.