diff --git a/hsds/attr_sn.py b/hsds/attr_sn.py index 95cbcf4f..e16058c0 100755 --- a/hsds/attr_sn.py +++ b/hsds/attr_sn.py @@ -1085,7 +1085,7 @@ async def PUT_AttributeValue(request): if binary_data: npoints = getNumElements(np_shape) if npoints * item_size != len(binary_data): - msg = f"Expected: {npoints*item_size} bytes, " + msg = f"Expected: {npoints * item_size} bytes, " msg += f"but got {len(binary_data)}" log.warn(msg) raise HTTPBadRequest(reason=msg) diff --git a/hsds/chunk_dn.py b/hsds/chunk_dn.py index d8f17997..bae53279 100644 --- a/hsds/chunk_dn.py +++ b/hsds/chunk_dn.py @@ -253,7 +253,7 @@ async def PUT_Chunk(request): # regular chunk update # check that the content_length is what we expect if itemsize != "H5T_VARIABLE": - log.debug(f"expected content_length: {num_elements*itemsize}") + log.debug(f"expected content_length: {num_elements * itemsize}") log.debug(f"actual content_length: {request.content_length}") actual = request.content_length diff --git a/hsds/chunk_sn.py b/hsds/chunk_sn.py index d01359ff..9924530e 100755 --- a/hsds/chunk_sn.py +++ b/hsds/chunk_sn.py @@ -916,7 +916,7 @@ async def PUT_Value(request): for page_number in range(len(pages)): page = pages[page_number] - msg = f"streaming request data for page: {page_number+1} of {len(pages)}, " + msg = f"streaming request data for page: {page_number + 1} of {len(pages)}, " msg += f"selection: {page}" log.info(msg) kwargs = {"page_number": page_number, "page": page} @@ -1088,7 +1088,7 @@ async def GET_Value(request): try: for page_number in range(len(pages)): page = pages[page_number] - msg = f"streaming response data for page: {page_number+1} " + msg = f"streaming response data for page: {page_number + 1} " msg += f"of {len(pages)}, selection: {page}" log.info(msg) diff --git a/hsds/chunklocator.py b/hsds/chunklocator.py index 4821283e..6727de9e 100644 --- a/hsds/chunklocator.py +++ b/hsds/chunklocator.py @@ -231,4 +231,4 @@ def main(): log.info('done') stop_time = time.time() log.info(f"chunklocator stop: {stop_time:.2f}") - log.info(f"chunklocator elapsed: {(stop_time-start_time):.2f}") + log.info(f"chunklocator elapsed: {(stop_time - start_time):.2f}") diff --git a/hsds/hsds_app.py b/hsds/hsds_app.py index 6f08e905..d21f9079 100644 --- a/hsds/hsds_app.py +++ b/hsds/hsds_app.py @@ -157,7 +157,7 @@ def __init__( sn_url = f"http+unix://{socket_url}sn_1.sock" for i in range(dn_count): - socket_name = f"dn_{(i+1)}.sock" + socket_name = f"dn_{(i + 1)}.sock" dn_url = f"http+unix://{socket_url}{socket_name}" self._dn_urls.append(dn_url) self._socket_paths.append(f"{socket_dir}{socket_name}") @@ -167,7 +167,7 @@ def __init__( sn_url = f"http://{host}:{sn_port}" dn_port = 6101 # TBD: pull this from config for i in range(dn_count): - dn_url = f"http://{host}:{dn_port+i}" + dn_url = f"http://{host}:{dn_port + i}" self._dn_urls.append(dn_url) # sort the ports so that node_number can be determined based on dn_url @@ -296,7 +296,7 @@ def run(self): py_exe, cmd_path, "--node_type=dn", - f"--log_prefix=dn{node_number+1} ", + f"--log_prefix=dn{node_number + 1} ", ] pargs.append(f"--dn_urls={dn_urls_arg}") pargs.append(f"--node_number={node_number}") @@ -342,7 +342,7 @@ def run(self): self.log.error(msg) raise IOError(msg) - self.log.info(f"Ready after: {(time.time()-start_ts):4.2f} s") + self.log.info(f"Ready after: {(time.time() - start_ts):4.2f} s") self._ready = True def stop(self): diff --git a/hsds/servicenode.py b/hsds/servicenode.py index 2ea85319..8a5ddaee 100755 --- a/hsds/servicenode.py +++ b/hsds/servicenode.py @@ -270,7 +270,7 @@ def create_app(): if hs_username: log.info(f"getCmdLine hs_username: {hs_username}") if hs_password: - log.info(f"getCmdLine hs_password: {'*'*len(hs_password)}") + log.info(f"getCmdLine hs_password: {'*' * len(hs_password)}") if hs_username: setPassword(app, hs_username, hs_password) diff --git a/hsds/util/awsLambdaClient.py b/hsds/util/awsLambdaClient.py index 7c090d4a..fe801380 100644 --- a/hsds/util/awsLambdaClient.py +++ b/hsds/util/awsLambdaClient.py @@ -164,7 +164,7 @@ async def __aenter__(self): msg = f"lambda.invoke({self.lambdaFunction} " msg += f"start={start_time:.4f} " msg += f"finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) self.funcStats["inflight"] -= 1 msg = f"lambda.invoke - {self.funcStats['inflight']} " diff --git a/hsds/util/azureBlobClient.py b/hsds/util/azureBlobClient.py index e8c6313d..6b22c869 100644 --- a/hsds/util/azureBlobClient.py +++ b/hsds/util/azureBlobClient.py @@ -48,7 +48,7 @@ def __init__(self, app): msg = "No connection string specified" log.error(msg) raise ValueError(msg) - log.info(f"Using azure_connection_string: {'*'*len(azure_connection_string)}") + log.info(f"Using azure_connection_string: {'*' * len(azure_connection_string)}") self._client = BlobServiceClient.from_connection_string(azure_connection_string) @@ -121,7 +121,7 @@ async def get_object(self, key, bucket=None, offset=0, length=-1): finish_time = time.time() msg = f"azureBlobClient.get_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f} " + msg += f"elapsed={finish_time - start_time:.4f} " msg += f"bytes={len(data)}" log.info(msg) except CancelledError as cle: @@ -179,7 +179,7 @@ async def put_object(self, key, data, bucket=None): log.debug(f"put_object {key} returning: {rsp}") msg = f"azureBlobClient.put_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += "elapsed={finish_time-start_time:.4f} " + msg += "elapsed={finish_time - start_time:.4f} " msg += f"bytes={len(data)}" log.info(msg) @@ -231,7 +231,7 @@ async def delete_object(self, key, bucket=None): finish_time = time.time() msg = f"azureBlobClient.delete_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) except CancelledError as cle: @@ -309,7 +309,7 @@ async def is_object(self, key, bucket=None): msg = f"azureBlobClient.is_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) return found @@ -365,7 +365,7 @@ async def get_key_stats(self, key, bucket=None): key_stats["LastModified"] = datetime.datetime.timestamp(lm_dt) msg = f"azureBlobClient.get_key_stats({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) return key_stats diff --git a/hsds/util/dsetUtil.py b/hsds/util/dsetUtil.py index 34fd9bfb..9f625ad4 100644 --- a/hsds/util/dsetUtil.py +++ b/hsds/util/dsetUtil.py @@ -585,7 +585,7 @@ def getSelectionList(select, dims): raise ValueError(f"Invalid coordinate for dim {dim}") if coord < 0 or coord >= extent: msg = f"out of range coordinate for dim {dim}, {coord} " - msg += f"not in range: 0-{extent-1}" + msg += f"not in range: 0-{extent - 1}" raise ValueError(msg) if last_coord is not None and coord <= last_coord: raise ValueError("coordinates must be increasing") diff --git a/hsds/util/fileClient.py b/hsds/util/fileClient.py index b803162d..a481ac5e 100644 --- a/hsds/util/fileClient.py +++ b/hsds/util/fileClient.py @@ -131,7 +131,7 @@ async def get_object(self, key, bucket=None, offset=0, length=-1): self._validateKey(key) if length > 0: - range = f"bytes={offset}-{offset+length-1}" + range = f"bytes={offset} - {offset + length - 1}" log.info(f"storage range request: {range}") filepath = self._getFilePath(bucket, key) @@ -152,7 +152,7 @@ async def get_object(self, key, bucket=None, offset=0, length=-1): finish_time = time.time() msg = f"fileClient.get_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f} bytes={len(data)}" + msg += f"elapsed={finish_time - start_time:.4f} bytes={len(data)}" log.info(msg) except FileNotFoundError: msg = f"fileClient: {key} not found " @@ -217,7 +217,7 @@ async def put_object(self, key, data, bucket=None): finish_time = time.time() msg = f"fileClient.put_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f} bytes={len(data)}" + msg += f"elapsed={finish_time - start_time:.4f} bytes={len(data)}" log.info(msg) write_rsp = self._getFileStats(filepath, data=data) except IOError as ioe: @@ -263,7 +263,7 @@ async def delete_object(self, key, bucket=None): finish_time = time.time() msg = f"fileClient.delete_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) except IOError as ioe: diff --git a/hsds/util/idUtil.py b/hsds/util/idUtil.py index 3561e079..f25d1cb5 100644 --- a/hsds/util/idUtil.py +++ b/hsds/util/idUtil.py @@ -305,7 +305,7 @@ def createNodeId(prefix, node_number=None): node_id = "" # nothing too bad happens if this doesn't get set if node_number is not None: # just make an id based on the node_number - hash_key = f"{node_number+1:03d}" + hash_key = f"{node_number + 1:03d}" else: # use the container id if we are running inside docker hash_key = getIdHash(str(uuid.uuid1())) diff --git a/hsds/util/lruCache.py b/hsds/util/lruCache.py index 21fdea45..fd04b7a4 100644 --- a/hsds/util/lruCache.py +++ b/hsds/util/lruCache.py @@ -309,7 +309,7 @@ def consistencyCheck(self): if pos == 0: raise ValueError(f"unexpected node: {node._id}") if node._id != id_list[pos - 1]: - msg = f"expected node: {id_list[pos-1]} but found: {node._id}" + msg = f"expected node: {id_list[pos - 1]} but found: {node._id}" raise ValueError(msg) pos -= 1 node = node._prev diff --git a/hsds/util/s3Client.py b/hsds/util/s3Client.py index 0d3d692e..698af580 100644 --- a/hsds/util/s3Client.py +++ b/hsds/util/s3Client.py @@ -269,7 +269,7 @@ async def get_object(self, key, bucket=None, offset=0, length=-1): range = "" if length > 0: - range = f"bytes={offset}-{offset+length-1}" + range = f"bytes={offset} - {offset + length - 1}" log.info(f"storage range request: {range}") if not bucket: @@ -290,12 +290,12 @@ async def get_object(self, key, bucket=None, offset=0, length=-1): data = await resp["Body"].read() finish_time = time.time() if offset > 0: - range_key = f"{key}[{offset}:{offset+length}]" + range_key = f"{key}[{offset}:{offset + length}]" else: range_key = key msg = f"s3Client.get_object({range_key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f} " + msg += f"elapsed={finish_time - start_time:.4f} " msg += f"bytes={len(data)}" log.info(msg) @@ -354,7 +354,7 @@ async def put_object(self, key, data, bucket=None): finish_time = time.time() msg = f"s3Client.put_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f} " + msg += f"elapsed={finish_time - start_time:.4f} " msg += f"bytes={len(data)}" log.info(msg) s3_rsp = { @@ -406,7 +406,7 @@ async def delete_object(self, key, bucket=None): finish_time = time.time() msg = f"s3Client.delete_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) except ClientError as ce: @@ -466,7 +466,7 @@ async def is_object(self, key, bucket=None): raise HTTPInternalServerError() msg = f"s3Client.is_object({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) return found @@ -521,7 +521,7 @@ async def get_key_stats(self, key, bucket=None): key_stats["LastModified"] = LastModified msg = f"s3Client.get_key_stats({key} bucket={bucket}) " msg += f"start={start_time:.4f} finish={finish_time:.4f} " - msg += f"elapsed={finish_time-start_time:.4f}" + msg += f"elapsed={finish_time - start_time:.4f}" log.info(msg) return key_stats diff --git a/tests/integ/attr_test.py b/tests/integ/attr_test.py index ab544f47..d718cc29 100644 --- a/tests/integ/attr_test.py +++ b/tests/integ/attr_test.py @@ -1988,7 +1988,7 @@ def testPutAttributeMultiple(self): # create some groups grp_count = 3 - grp_names = [f"group{i+1}" for i in range(grp_count)] + grp_names = [f"group{i + 1}" for i in range(grp_count)] grp_ids = [] for grp_name in grp_names: @@ -2022,7 +2022,7 @@ def testPutAttributeMultiple(self): for i in range(attr_count): value = [i * 10 + j for j in range(extent)] data = {"type": "H5T_STD_I32LE", "shape": extent, "value": value} - attr_name = f"attr{i+1:04d}" + attr_name = f"attr{i + 1:04d}" attributes[attr_name] = data # write attributes to the dataset @@ -2042,7 +2042,7 @@ def testPutAttributeMultiple(self): for i in range(attr_count): attr = ret_attrs[i] self.assertTrue("name" in attr) - self.assertEqual(attr["name"], f"attr{i+1:04d}") + self.assertEqual(attr["name"], f"attr{i + 1:04d}") self.assertTrue("value" in attr) attr_value = attr["value"] self.assertEqual(len(attr_value), extent) @@ -2090,7 +2090,7 @@ def testPutAttributeMultiple(self): self.assertTrue("attributes" in rspJson) ret_attrs = rspJson["attributes"] # expect the 4 attributes we wrote in the first post - # plus (i+1) in the second post + # plus (i + 1) in the second post self.assertEqual(len(ret_attrs), attr_count + i + 1) for j in range(len(ret_attrs)): attr = ret_attrs[j] diff --git a/tests/integ/link_test.py b/tests/integ/link_test.py index 03bcf002..94d79704 100755 --- a/tests/integ/link_test.py +++ b/tests/integ/link_test.py @@ -1464,7 +1464,7 @@ def testPutLinkMultiple(self): # create some groups under grp1 grp_count = 3 - grp_names = [f"grp{(i+1):04d}" for i in range(grp_count)] + grp_names = [f"grp{(i + 1):04d}" for i in range(grp_count)] grp_ids = [] for grp_name in grp_names: diff --git a/tests/integ/value_test.py b/tests/integ/value_test.py index 459564be..d1925fd2 100755 --- a/tests/integ/value_test.py +++ b/tests/integ/value_test.py @@ -1756,7 +1756,7 @@ def testGet(self): # read 1x4 block from dataset row_index = 2 - params = {"select": f"[{row_index}:{row_index+1}, 0:4]"} + params = {"select": f"[{row_index}:{row_index + 1}, 0:4]"} params["nonstrict"] = 1 # SN can read directly from S3 or DN node rsp = self.session.get(req, params=params, headers=headers) self.assertEqual(rsp.status_code, 200) @@ -3366,7 +3366,7 @@ def testIntelligentRangeGet3D(self): z = 35 count = 10 # read 10 element, starting at index (x,y,z) - params = {"select": f"[{x}, {y}, {z}:{z+count}]"} + params = {"select": f"[{x}, {y}, {z}:{z + count}]"} params["nonstrict"] = 1 # enable SN to invoke lambda func # read the selection diff --git a/tests/perf/append/append_1d.py b/tests/perf/append/append_1d.py index 97ca40ca..b81e948b 100644 --- a/tests/perf/append/append_1d.py +++ b/tests/perf/append/append_1d.py @@ -156,5 +156,5 @@ def addRow(dset): # print out stats if mode == "a": end_ts = time.time() - print(f"added {count} rows in {(end_ts-start_ts):8.4f} seconds") - print(f"{count/(end_ts-start_ts):5.4f} rows/sec") + print(f"added {count} rows in {(end_ts - start_ts):8.4f} seconds") + print(f"{count / (end_ts - start_ts):5.4f} rows/sec") diff --git a/tests/perf/append/append_1d_async.py b/tests/perf/append/append_1d_async.py index 4716550d..0ba73f20 100644 --- a/tests/perf/append/append_1d_async.py +++ b/tests/perf/append/append_1d_async.py @@ -312,5 +312,5 @@ async def addrow(self, session, dsetid, sensor_id, seq_num): print("num failures:", cfg["error_count"]) count = cfg["rows_added"] print("rows added:", count) -print(f"added {count} rows in {(end_ts-start_ts):8.4f} seconds") -print(f"{count/(end_ts-start_ts):5.4f} rows/sec") +print(f"added {count} rows in {(end_ts - start_ts):8.4f} seconds") +print(f"{count / (end_ts - start_ts):5.4f} rows/sec") diff --git a/tests/perf/arrayperf/bytes_to_array.py b/tests/perf/arrayperf/bytes_to_array.py index 85c1ec71..bb0de0a9 100644 --- a/tests/perf/arrayperf/bytes_to_array.py +++ b/tests/perf/arrayperf/bytes_to_array.py @@ -36,13 +36,13 @@ then = time.time() buffer_size = getByteArraySize(arr) now = time.time() -msg = f"getByteArraySize - elapsed: {(now-then):6.4f} for {count} elements, " +msg = f"getByteArraySize - elapsed: {(now - then):6.4f} for {count} elements, " msg += f"returned {buffer_size}" print(msg) then = time.time() buffer = arrayToBytes(arr) now = time.time() -print(f"arrayToBytes - elpased: {(now-then):6.4f} for {count} elements") +print(f"arrayToBytes - elpased: {(now - then):6.4f} for {count} elements") if len(buffer) != buffer_size: raise ValueError(f"unexpected buffer length: {len(buffer)}") then = time.time() @@ -50,4 +50,4 @@ now = time.time() if copy.shape[0] != count: raise ValueError(f"unexpected array shape: {copy.shape}") -print(f"bytesToArray - elapsed: {(now-then):6.4f}") +print(f"bytesToArray - elapsed: {(now - then):6.4f}") diff --git a/tests/perf/arrayperf/bytes_to_vlen.py b/tests/perf/arrayperf/bytes_to_vlen.py index 9cc547bd..6b7e94fc 100644 --- a/tests/perf/arrayperf/bytes_to_vlen.py +++ b/tests/perf/arrayperf/bytes_to_vlen.py @@ -52,19 +52,19 @@ then = time.time() buffer_size = getByteArraySize(arr) now = time.time() -msg = f"getByteArraySize - elapsed: {(now-then):6.4f} for {count} elements, " +msg = f"getByteArraySize - elapsed: {(now - then):6.4f} for {count} elements, " msg += f"returned {buffer_size}" print(msg) then = time.time() buffer = arrayToBytes(arr) now = time.time() -print(f"arrayToBytes - elpased: {(now-then):6.4f} for {count} elements") +print(f"arrayToBytes - elpased: {(now - then):6.4f} for {count} elements") # convert back to a numpy array then = time.time() arr_ret = bytesToArray(buffer, dt, [count, ]) now = time.time() -print(f"bytesToArray - elpased: {(now-then):6.4f} for {count} elements") +print(f"bytesToArray - elpased: {(now - then):6.4f} for {count} elements") # verify that same original strings got returned for i in range(count): diff --git a/tests/perf/nrel/nsrdb/nsrdb_test.py b/tests/perf/nrel/nsrdb/nsrdb_test.py index 3f2dbf1b..24a835ec 100644 --- a/tests/perf/nrel/nsrdb/nsrdb_test.py +++ b/tests/perf/nrel/nsrdb/nsrdb_test.py @@ -130,7 +130,7 @@ te = time.time() result[start:end] = arr msg = f" read[{start}:{end}]: {arr.min():4.2f}, {arr.max():4.2f}, " - msg += f"{arr.mean():4.2f}, {te-ts:4.2f} s" + msg += f"{arr.mean():4.2f}, {te - ts:4.2f} s" print(msg) print(f"{H5_PATH}[{index}:]: {result}") diff --git a/tests/perf/nrel/wtk/wtk_conus.py b/tests/perf/nrel/wtk/wtk_conus.py index 81005dda..0f608a1d 100644 --- a/tests/perf/nrel/wtk/wtk_conus.py +++ b/tests/perf/nrel/wtk/wtk_conus.py @@ -125,7 +125,7 @@ def print_stats(filepath, index, arr): filepaths = [] for i in range(num_years): - filename = f"wtk_conus_{2007+i}.h5" + filename = f"wtk_conus_{2007 + i}.h5" filepath = os.path.join(folderpath, filename) filepaths.append(filepath) @@ -151,7 +151,7 @@ def print_stats(filepath, index, arr): arr = year_arrs[i] print_stats(filepath, cfg["index"], arr) if iter_count > 1: - print(f"iter {i}: {(t_end-t_start):6.2f} s") + print(f"iter {i}: {(t_end - t_start):6.2f} s") else: for i in range(num_years): filepath = filepaths[i] @@ -160,7 +160,7 @@ def print_stats(filepath, index, arr): t_end = time.time() if iter_count > 1: - print(f"iter {iter}: {(t_end-t_start):6.2f} s") + print(f"iter {iter}: {(t_end - t_start):6.2f} s") print("------------") diff --git a/tests/perf/read/getLinks.py b/tests/perf/read/getLinks.py index 25271bf0..0c6a822a 100644 --- a/tests/perf/read/getLinks.py +++ b/tests/perf/read/getLinks.py @@ -51,7 +51,7 @@ def benchmark_link_multi(headers, endpoint, file): raise ValueError(msg) end = time.time() - msg = f"Time to create {link_count} links with multi API: {(end-start):6.4f} seconds" + msg = f"Time to create {link_count} links with multi API: {(end - start):6.4f} seconds" print(msg) # Retrieve many links using multi API @@ -65,7 +65,7 @@ def benchmark_link_multi(headers, endpoint, file): raise ValueError(msg) end = time.time() - msg = f"Time to retrieve {link_count} links with multi API: {(end-start):6.4f} seconds" + msg = f"Time to retrieve {link_count} links with multi API: {(end - start):6.4f} seconds" print(msg) # Check return correctness @@ -115,7 +115,7 @@ def benchmark_link_serial(headers, endpoint, file): msg = f"Could not create link #{i}: {rsp.status_code}" raise ValueError(msg) end = time.time() - msg = f"Time to create {link_count} links individually: {(end-start):6.4f} seconds" + msg = f"Time to create {link_count} links individually: {(end - start):6.4f} seconds" print(msg) # Retrieve many links from root group individually @@ -133,7 +133,7 @@ def benchmark_link_serial(headers, endpoint, file): link_rsp[i] = rsp end = time.time() - msg = f"Time to retrieve {link_count} links individually: {(end-start):6.4f} seconds" + msg = f"Time to retrieve {link_count} links individually: {(end - start):6.4f} seconds" print(msg) # Check return correctness diff --git a/tests/perf/read/read2d.py b/tests/perf/read/read2d.py index 2310248d..fd0967bd 100644 --- a/tests/perf/read/read2d.py +++ b/tests/perf/read/read2d.py @@ -98,7 +98,7 @@ def get_option(options, arg): msg = f"contiguous selection: [{i}:{i + xlen}, {j}:{j + ylen}]: " msg += f"{arr.min():4.2f}, {arr.max():4.2f}, " - msg += f"{arr.mean():4.2f}, {te-ts:4.2f} s" + msg += f"{arr.mean():4.2f}, {te - ts:4.2f} s" print(msg) # do strided selection @@ -119,7 +119,7 @@ def get_option(options, arg): msg = f"strided selection: [{start}:{end}:{stride}, {j}:{j + ylen}]: " msg += f"{arr.min():4.2f}, {arr.max():4.2f}, " - msg += f"{arr.mean():4.2f}, {te-ts:4.2f} s" + msg += f"{arr.mean():4.2f}, {te - ts:4.2f} s" print(msg) # do fancy selection @@ -142,5 +142,5 @@ def get_option(options, arg): msg += f"{indices[-2]}, {indices[-1]}], " msg += f"{j}:{j + ylen}]: " msg += f"{arr.min():4.2f}, {arr.max():4.2f}, " - msg += f"{arr.mean():4.2f}, {te-ts:4.2f} s" + msg += f"{arr.mean():4.2f}, {te - ts:4.2f} s" print(msg) diff --git a/tests/perf/socket/client.py b/tests/perf/socket/client.py index 02e4ee41..c16ef2fa 100644 --- a/tests/perf/socket/client.py +++ b/tests/perf/socket/client.py @@ -72,7 +72,7 @@ shm_block.unlink() if total_bytes > 1024 * 1024: - print(f"mb: {total_bytes//(1024*1024)}") + print(f"mb: {total_bytes // (1024 * 1024)}") else: print(f"bytes: {total_bytes}") elapse = tEnd - tStart diff --git a/tests/perf/stream/stream_test.py b/tests/perf/stream/stream_test.py index 87d461c7..932dbf3a 100644 --- a/tests/perf/stream/stream_test.py +++ b/tests/perf/stream/stream_test.py @@ -137,7 +137,7 @@ def testStream2D(self): # initialize bytearray to test values num_bytes = item_size * num_row * num_col print( - f"initializing test data ({num_bytes} bytes, {num_bytes/(1024*1024):.2f} MiB)" + f"initializing test data ({num_bytes} bytes, {num_bytes / (1024 * 1024):.2f} MiB)" ) bin_data = bytearray(num_bytes) exp = int(math.log10(num_col)) + 1 diff --git a/tests/perf/table/table_read.py b/tests/perf/table/table_read.py index 5b5d6351..287e0d50 100644 --- a/tests/perf/table/table_read.py +++ b/tests/perf/table/table_read.py @@ -80,7 +80,7 @@ def get_option(options, arg): arr_field = arr[field_name] msg = f"consecutive read with random start [{start}:{end}]: " msg += f"{arr_field.min():4.2f}, {arr_field.max():4.2f}, " -msg += f"{arr_field.mean():4.2f}, {te-ts:4.2f} s" +msg += f"{arr_field.mean():4.2f}, {te - ts:4.2f} s" print(msg) # read with stride @@ -98,7 +98,7 @@ def get_option(options, arg): arr_field = arr[field_name] msg = f"strided read with random start index [{start}:{end}:{stride}]: " msg += f"{arr_field.min():4.2f}, {arr_field.max():4.2f}, " - msg += f"{arr_field.mean():4.2f}, {te-ts:4.2f} s" + msg += f"{arr_field.mean():4.2f}, {te - ts:4.2f} s" print(msg) @@ -117,5 +117,5 @@ def get_option(options, arg): arr_field = arr[field_name] msg = "read with random indices [[n0,n1,...,nx]]: " msg += f"{arr_field.min():4.2f}, {arr_field.max():4.2f}, " -msg += f"{arr_field.mean():4.2f}, {te-ts:4.2f} s" +msg += f"{arr_field.mean():4.2f}, {te - ts:4.2f} s" print(msg) diff --git a/tools/get_s3perf.py b/tools/get_s3perf.py index 0cfd798c..a7ab371f 100644 --- a/tools/get_s3perf.py +++ b/tools/get_s3perf.py @@ -52,7 +52,7 @@ print(f"elapsed_time: {(finish_time - start_time):6.2f}") print(f"total_bytes: {total_bytes}") bytes_per_sec = total_bytes / (finish_time - start_time) -print(f"MiB/s: {(bytes_per_sec/(1024.0*1024.0)):6.2f}") +print(f"MiB/s: {(bytes_per_sec / (1024.0 * 1024.0)):6.2f}") # get the maximun number of inflight requests and idle time