From 832af2a4c178766b9d50a8cf8c89f2f17f238009 Mon Sep 17 00:00:00 2001 From: Pablo Pizarro R Date: Wed, 27 Mar 2024 10:25:02 -0300 Subject: [PATCH] Update comments --- MLStructFP_benchmarks/ml/model/core/_data_floor_photo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MLStructFP_benchmarks/ml/model/core/_data_floor_photo.py b/MLStructFP_benchmarks/ml/model/core/_data_floor_photo.py index 067f879..c04f31a 100644 --- a/MLStructFP_benchmarks/ml/model/core/_data_floor_photo.py +++ b/MLStructFP_benchmarks/ml/model/core/_data_floor_photo.py @@ -187,7 +187,7 @@ def load_part(self, part: int, shuffle: bool = False, ignore_split: bool = False else: assert part in (1, 2), '1 returns train, 2 test. No other part value allowed' # First, get all images size and create a numpy zero object - imgs = 0 + imgs = 0 # Total images loaded so far sizes: Dict[int, int] = {} # Size for each part for i in self._split[part - 1]: # Iterate loaded parts i_info = list(_npz_headers(self._get_file(i)[0]))[0] # ('data', (N, SIZE, SIZE), dtype('DTYPE')) @@ -208,7 +208,7 @@ def load_part(self, part: int, shuffle: bool = False, ignore_split: bool = False j = 0 # Index to add k = 0 # Number of processed parts - for i in self._split[part - 1]: # Iterate train parts + for i in self._split[part - 1]: # Iterate loaded parts f = self._get_file(i) img_b[j:j + sizes[i]] = np.load(f[0])['data'] # Binary img_p[j:j + sizes[i]] = np.load(f[1])['data'] # Photo