diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 73e634cdd..0adf7ff5a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ --- repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.6 + rev: v0.9.1 hooks: - id: ruff args: [--fix, --show-fixes, --output-format, grouped] diff --git a/cubedash/_model.py b/cubedash/_model.py index c7d643988..2ba8c04b8 100644 --- a/cubedash/_model.py +++ b/cubedash/_model.py @@ -211,7 +211,7 @@ def get_products() -> List[ProductWithSummary]: ] if products and not STORE.list_complete_products(): raise RuntimeError( - "No products are summarised. " "Run `cubedash-gen --all` to generate some." + "No products are summarised. Run `cubedash-gen --all` to generate some." ) return products diff --git a/cubedash/_monitoring.py b/cubedash/_monitoring.py index 345d93a86..216788c26 100644 --- a/cubedash/_monitoring.py +++ b/cubedash/_monitoring.py @@ -28,8 +28,8 @@ def time_end(response: flask.Response): render_time = time.time() - flask.g.start_render response.headers.add_header( "Server-Timing", - f"app;dur={render_time*1000}," - f'odcquery;dur={flask.g.datacube_query_time*1000};desc="ODC query time",' + f"app;dur={render_time * 1000}," + f'odcquery;dur={flask.g.datacube_query_time * 1000};desc="ODC query time",' f"odcquerycount_{flask.g.datacube_query_count};" f'desc="{flask.g.datacube_query_count} ODC queries"', ) @@ -79,7 +79,7 @@ def decorator(*args, **kwargs): duration_secs = time.time() - start_time print( f"== Index Call == {style(function.__name__, bold=True)}: " - f"{duration_secs*1000}", + f"{duration_secs * 1000}", file=sys.stderr, flush=True, ) diff --git a/cubedash/_stac.py b/cubedash/_stac.py index ecd1255fc..abb1d4e07 100644 --- a/cubedash/_stac.py +++ b/cubedash/_stac.py @@ -1273,7 +1273,7 @@ def collection_month(collection: str, year: int, month: int): date = datetime(year, month, 1).date() c = Catalog( f"{collection}-{year}-{month}", - description=f'{collection} for {date.strftime("%B %Y")}', + description=f"{collection} for {date.strftime('%B %Y')}", ) c.links.extend( diff --git a/cubedash/summary/_stores.py b/cubedash/summary/_stores.py index 0b672632e..6b197812e 100644 --- a/cubedash/summary/_stores.py +++ b/cubedash/summary/_stores.py @@ -1718,9 +1718,9 @@ def _get_shape(geometry: WKBElement, crs) -> Optional[Geometry]: if not shape.is_valid: newshape = shape.buffer(0) - assert math.isclose( - shape.area, newshape.area, abs_tol=0.0001 - ), f"{shape.area} != {newshape.area}" + assert math.isclose(shape.area, newshape.area, abs_tol=0.0001), ( + f"{shape.area} != {newshape.area}" + ) shape = newshape return shape diff --git a/cubedash/warmup.py b/cubedash/warmup.py index e2f1631c2..ad397fa70 100644 --- a/cubedash/warmup.py +++ b/cubedash/warmup.py @@ -219,7 +219,7 @@ def _format_time(t: float): if t > 1: return f"{t:.1f}s" else: - return f"{int(t*1000)}ms" + return f"{int(t * 1000)}ms" if __name__ == "__main__": diff --git a/integration_tests/asserts.py b/integration_tests/asserts.py index 863061b22..a6fab93f4 100644 --- a/integration_tests/asserts.py +++ b/integration_tests/asserts.py @@ -36,9 +36,9 @@ def assert_shapes_mostly_equal( # __tracebackhide__ = operator.methodcaller("errisinstance", AssertionError) # Check area first, as it's a nicer error message when they're wildly different. - assert shape1.area == pytest.approx( - shape2.area, abs=threshold - ), "Shapes have different areas" + assert shape1.area == pytest.approx(shape2.area, abs=threshold), ( + "Shapes have different areas" + ) s1 = shape1.simplify(tolerance=threshold) s2 = shape2.simplify(tolerance=threshold) @@ -135,17 +135,17 @@ def check_dataset_count(html, count: int): __tracebackhide__ = True actual = html.find(".dataset-count", first=True).text expected = f"{count:,d}" - assert ( - f"{expected} dataset" in actual - ), f"Incorrect dataset count: found {actual} instead of {expected}" + assert f"{expected} dataset" in actual, ( + f"Incorrect dataset count: found {actual} instead of {expected}" + ) def check_datesets_page_datestring(html, datestring: str): __tracebackhide__ = True actual = html.find(".overview-day-link", first=True).text - assert ( - datestring == actual - ), f"Incorrect datestring: found {actual} instead of {datestring}" + assert datestring == actual, ( + f"Incorrect datestring: found {actual} instead of {datestring}" + ) def expect_values( @@ -168,22 +168,22 @@ def expect_values( assert s.dataset_count == dataset_count, "wrong dataset count" assert s.footprint_count == footprint_count, "wrong footprint count" if s.footprint_count is not None and s.footprint_count > 0: - assert ( - s.footprint_geometry is not None - ), "No footprint, despite footprint count" + assert s.footprint_geometry is not None, ( + "No footprint, despite footprint count" + ) assert s.footprint_geometry.area > 0, "Empty footprint" assert s.time_range == time_range, "wrong dataset time range" - assert s.newest_dataset_creation_time == default_utc( - newest_creation_time - ), "wrong newest dataset creation" + assert s.newest_dataset_creation_time == default_utc(newest_creation_time), ( + "wrong newest dataset creation" + ) assert s.timeline_period == timeline_period, ( - f"Should be a {timeline_period}, " f"not {s.timeline_period} timeline" + f"Should be a {timeline_period}, not {s.timeline_period} timeline" ) - assert ( - s.summary_gen_time is not None - ), "Missing summary_gen_time (there's a default)" + assert s.summary_gen_time is not None, ( + "Missing summary_gen_time (there's a default)" + ) assert s.crses == crses, "Wrong dataset CRSes" @@ -202,16 +202,16 @@ def expect_values( f"Expected entry with {timeline_count} records." ) else: - assert ( - len(s.timeline_dataset_counts) == timeline_count - ), "wrong timeline entry count" - - assert ( - sum(s.region_dataset_counts.values()) == s.dataset_count - ), "region dataset count doesn't match total dataset count" - assert ( - sum(s.timeline_dataset_counts.values()) == s.dataset_count - ), "timeline count doesn't match dataset count" + assert len(s.timeline_dataset_counts) == timeline_count, ( + "wrong timeline entry count" + ) + + assert sum(s.region_dataset_counts.values()) == s.dataset_count, ( + "region dataset count doesn't match total dataset count" + ) + assert sum(s.timeline_dataset_counts.values()) == s.dataset_count, ( + "timeline count doesn't match dataset count" + ) was_timeline_error = False if region_dataset_counts is not None: diff --git a/integration_tests/conftest.py b/integration_tests/conftest.py index 41662d59f..5d558bbf6 100644 --- a/integration_tests/conftest.py +++ b/integration_tests/conftest.py @@ -59,9 +59,9 @@ def _run_cli(cli_method, opts, catch_exceptions=False, expect_success=True): opts += ("--env", env_name) result = runner.invoke(cli_method, opts, catch_exceptions=catch_exceptions) if expect_success: - assert ( - 0 == result.exit_code - ), f"Error for {opts}. Out:\n{indent(result.output, ' ' * 4)}" + assert 0 == result.exit_code, ( + f"Error for {opts}. Out:\n{indent(result.output, ' ' * 4)}" + ) return result return _run_cli diff --git a/integration_tests/test_center_datetime_logic.py b/integration_tests/test_center_datetime_logic.py index afc67c7fb..61a5ce987 100644 --- a/integration_tests/test_center_datetime_logic.py +++ b/integration_tests/test_center_datetime_logic.py @@ -43,21 +43,21 @@ def test_datestring_on_dataset_page(client: FlaskClient): def test_datestring_on_datasets_search_page(client: FlaskClient): html = get_html(client, "/products/rainfall_chirps_daily/datasets") - assert ( - "Time UTC: 2019-05-15 00:00:00" - in [ - a.find("td", first=True).attrs["title"] for a in html.find(".search-result") - ] - ), "datestring does not match expected center_time recorded in dataset_spatial table" + assert "Time UTC: 2019-05-15 00:00:00" in [ + a.find("td", first=True).attrs["title"] for a in html.find(".search-result") + ], ( + "datestring does not match expected center_time recorded in dataset_spatial table" + ) def test_datestring_on_regions_page(client: FlaskClient): html = get_html(client, "/product/rainfall_chirps_daily/regions/x210y106") - assert ( - "2019-05-15 00:00:00" - in [a.find("td", first=True).text.strip() for a in html.find(".search-result")] - ), "datestring does not match expected center_time recorded in dataset_spatial table" + assert "2019-05-15 00:00:00" in [ + a.find("td", first=True).text.strip() for a in html.find(".search-result") + ], ( + "datestring does not match expected center_time recorded in dataset_spatial table" + ) def test_summary_center_datetime(client: FlaskClient): diff --git a/integration_tests/test_eo3_support.py b/integration_tests/test_eo3_support.py index 742b3ac92..270f51252 100644 --- a/integration_tests/test_eo3_support.py +++ b/integration_tests/test_eo3_support.py @@ -125,9 +125,9 @@ def test_eo3_extents(eo3_index: Index): ), } assert footprint.is_valid, "Created footprint is not a valid geometry" - assert ( - dataset_extent_row["footprint"].srid == 32650 - ), "Expected epsg:32650 within the footprint geometry" + assert dataset_extent_row["footprint"].srid == 32650, ( + "Expected epsg:32650 within the footprint geometry" + ) assert dataset_extent_row["region_code"] == "113081" assert dataset_extent_row["size_bytes"] is None @@ -210,9 +210,9 @@ def test_undo_eo3_doc_compatibility(eo3_index: Index): with TEST_EO3_DATASET_ARD.open("r") as f: raw_doc = YAML(typ="safe", pure=True).load(f) - assert ( - indexed_doc == raw_doc - ), "Document does not match original after undoing compatibility fields." + assert indexed_doc == raw_doc, ( + "Document does not match original after undoing compatibility fields." + ) def test_undo_eo3_compatibility_del_handling(): diff --git a/integration_tests/test_page_loads.py b/integration_tests/test_page_loads.py index dd2f89e7d..64fe3d816 100644 --- a/integration_tests/test_page_loads.py +++ b/integration_tests/test_page_loads.py @@ -154,9 +154,9 @@ def test_all_products_are_shown(client: FlaskClient): a.text.strip() for a in html.find(".product-selection-header .option-menu-link") ) indexed_product_names = sorted(p.name for p in _model.STORE.all_products()) - assert ( - found_product_names == indexed_product_names - ), "Product shown in menu don't match the indexed products" + assert found_product_names == indexed_product_names, ( + "Product shown in menu don't match the indexed products" + ) def test_get_overview_product_links(client: FlaskClient): @@ -407,36 +407,36 @@ def test_api_returns_high_tide_comp_datasets(client: FlaskClient): These are slightly fun to handle as they are a small number with a huge time range. """ geojson = get_geojson(client, "/api/datasets/high_tide_comp_20p") - assert ( - len(geojson["features"]) == 306 - ), "Not all high tide datasets returned as geojson" + assert len(geojson["features"]) == 306, ( + "Not all high tide datasets returned as geojson" + ) # Search and time summary is only based on center time. # These searches are within the dataset time range, but not the center_time. # Dataset range: '2000-01-01T00:00:00' to '2016-10-31T00:00:00' # year geojson = get_geojson(client, "/api/datasets/high_tide_comp_20p/2008") - assert ( - len(geojson["features"]) == 306 - ), "Expected high tide datasets within whole dataset range" + assert len(geojson["features"]) == 306, ( + "Expected high tide datasets within whole dataset range" + ) # month geojson = get_geojson(client, "/api/datasets/high_tide_comp_20p/2008/6") - assert ( - len(geojson["features"]) == 306 - ), "Expected high tide datasets within whole dataset range" + assert len(geojson["features"]) == 306, ( + "Expected high tide datasets within whole dataset range" + ) # day geojson = get_geojson(client, "/api/datasets/high_tide_comp_20p/2008/6/1") - assert ( - len(geojson["features"]) == 306 - ), "Expected high tide datasets within whole dataset range" + assert len(geojson["features"]) == 306, ( + "Expected high tide datasets within whole dataset range" + ) # Out of the test dataset time range. No results. # Completely outside of range geojson = get_geojson(client, "/api/datasets/high_tide_comp_20p/2018") - assert ( - len(geojson["features"]) == 0 - ), "Expected no high tide datasets in in this year" + assert len(geojson["features"]) == 0, ( + "Expected no high tide datasets in in this year" + ) # One day before/after (is time zone handling correct?) geojson = get_geojson(client, "/api/datasets/high_tide_comp_20p/2008/6/2") assert len(geojson["features"]) == 0, "Expected no result one-day-after center time" @@ -469,9 +469,9 @@ def test_api_returns_high_tide_comp_regions(client: FlaskClient): """ rv: Response = client.get("/api/regions/high_tide_comp_20p") - assert ( - rv.status_code == 404 - ), "High tide comp does not support regions: it should return not-exist code." + assert rv.status_code == 404, ( + "High tide comp does not support regions: it should return not-exist code." + ) def test_api_returns_scene_regions(client: FlaskClient): @@ -908,9 +908,9 @@ def test_with_timings(client: FlaskClient): for f in rv.headers["Server-Timing"].split(",") if f.startswith("odcquerycount_") ] - assert ( - count_header - ), f"No query count server timing header found in {rv.headers['Server-Timing']}" + assert count_header, ( + f"No query count server timing header found in {rv.headers['Server-Timing']}" + ) # Example header: # app;dur=1034.12,odcquery;dur=103.03;desc="ODC query time",odcquerycount_6;desc="6 ODC queries" @@ -969,9 +969,9 @@ def test_get_robots(client: FlaskClient): num_lines = len(text.split("\n")) assert num_lines > 1, "robots.txt should have multiple lines" - assert ( - rv.headers["Content-Type"] == "text/plain" - ), "robots.txt content-type should be text/plain" + assert rv.headers["Content-Type"] == "text/plain", ( + "robots.txt content-type should be text/plain" + ) def test_all_give_404s(client: FlaskClient): diff --git a/integration_tests/test_pages_render.py b/integration_tests/test_pages_render.py index 804f4a12b..9c4854a63 100644 --- a/integration_tests/test_pages_render.py +++ b/integration_tests/test_pages_render.py @@ -89,9 +89,9 @@ def test_allows_null_product_fixed_fields( """ # WHEN we have some products summarised - assert ( - summary_store.list_complete_products() - ), "There's no summarised products to test" + assert summary_store.list_complete_products(), ( + "There's no summarised products to test" + ) # AND there's some with null fixed_metadata (ie. pre-Explorer0-EO3-update) with odc_test_db.index._active_connection() as conn: diff --git a/integration_tests/test_stac.py b/integration_tests/test_stac.py index 3a096433b..e71c13665 100644 --- a/integration_tests/test_stac.py +++ b/integration_tests/test_stac.py @@ -219,7 +219,7 @@ def retrieve(uri: str): def get_extension(url: str) -> jsonschema.Draft7Validator: if not is_url(url): raise ValueError( - f"stac extensions are now expected to be URLs in 1.0.0. " f"Got {url!r}" + f"stac extensions are now expected to be URLs in 1.0.0. Got {url!r}" ) return load_schema_doc(_web_reference(url), location=url) @@ -314,9 +314,9 @@ def validate_item(item: Dict): if item["geometry"] is not None: with DebugContext(f"Failing shape:\n{pformat(item['geometry'])}"): shape = shapely_shape(item["geometry"]) - assert ( - shape.is_valid - ), f"Item has invalid geometry: {explain_validity(shape)}" + assert shape.is_valid, ( + f"Item has invalid geometry: {explain_validity(shape)}" + ) assert shape.geom_type in ( "Polygon", "MultiPolygon", @@ -347,9 +347,9 @@ def validate_items( product_counts[item["properties"].get("odc:product", item["collection"])] += 1 # Assert there's no duplicates - assert ( - id_ not in seen_ids - ), f"Duplicate dataset item (record {i}) of search results: {id_}" + assert id_ not in seen_ids, ( + f"Duplicate dataset item (record {i}) of search results: {id_}" + ) seen_ids.add(id_) # Assert they are all ordered (including across pages!) @@ -357,9 +357,9 @@ def validate_items( # TODO: this is actually a (date, id) sort, but our test data has no duplicate dates. prev_dt = last_item["properties"]["datetime"] this_dt = item["properties"]["datetime"] - assert ( - prev_dt < this_dt - ), f"Items {i} and {i - 1} out of order: {prev_dt} > {this_dt}" + assert prev_dt < this_dt, ( + f"Items {i} and {i - 1} out of order: {prev_dt} > {this_dt}" + ) i += 1 # Note that the above block stops most paging infinite loops quickly @@ -371,9 +371,7 @@ def validate_items( ) if isinstance(expect_count, int): assert i == expect_count, ( - f"Expected {expect_count} items.\n" - "Got:\n" - f"\t{printable_product_counts}" + f"Expected {expect_count} items.\nGot:\n\t{printable_product_counts}" ) else: assert product_counts == expect_count @@ -559,9 +557,9 @@ def test_stac_links(stac_client: FlaskClient): _CATALOG_SCHEMA.validate(response) assert response["id"] == "odc-explorer", "Expected default unconfigured endpoint id" - assert ( - response["title"] == "Default ODC Explorer instance" - ), "Expected default unconfigured endpoint title" + assert response["title"] == "Default ODC Explorer instance", ( + "Expected default unconfigured endpoint title" + ) # A child link to each "collection" (product) child_links = [r for r in response["links"] if r["rel"] == "child"] @@ -970,7 +968,7 @@ def test_next_link(stac_client: FlaskClient): # next link should return next page of results geojson = get_items( stac_client, - ("/stac/search?" "collections=ga_ls8c_ard_3,ls7_nbart_albers"), + ("/stac/search?collections=ga_ls8c_ard_3,ls7_nbart_albers"), ) assert geojson.get("numberMatched") > len(geojson.get("features")) @@ -1048,7 +1046,7 @@ def geojson_feature_ids(d: Dict) -> List[str]: # least better than the old Postgres error error_message_json = get_json( stac_client, - ("/stac/search?&collection=ls7_nbart_albers" "&ids=7a[-fd04ad[-"), + ("/stac/search?&collection=ls7_nbart_albers&ids=7a[-fd04ad[-"), expect_status_code=400, ) assert error_message_json["name"] == "Bad Request" @@ -1158,14 +1156,14 @@ def test_stac_search_collections(stac_client: FlaskClient): # Get all in one collection geojson = get_items( stac_client, - ("/stac/search?" "&collections=ls7_nbart_scene" "&limit=20"), + ("/stac/search?&collections=ls7_nbart_scene&limit=20"), ) assert len(geojson.get("features")) == 4 # Get all the datasets for two collections geojson = get_items( stac_client, - ("/stac/search?" "&collections=ls7_nbart_scene,ls7_nbar_scene" "&limit=20"), + ("/stac/search?&collections=ls7_nbart_scene,ls7_nbar_scene&limit=20"), ) # Four datasets each. assert len(geojson.get("features")) == 8 @@ -1185,7 +1183,7 @@ def test_stac_search_collections(stac_client: FlaskClient): # (its doesn't mean match-the-empty-list!) geojson = get_items( stac_client, - ("/stac/search?" "&collections=" "&limit=20"), + ("/stac/search?&collections=&limit=20"), ) assert len(geojson.get("features")) > 0 @@ -1275,9 +1273,9 @@ def test_stac_search_by_post(stac_client: FlaskClient): bands = ["blue", "green", "nir", "red", "swir1", "swir2"] with DebugContext(f"feature {feature['id']}"): # TODO: These are the same file in a NetCDF. They should probably be one asset? - assert len(feature["assets"]) == len( - bands - ), f"Expected an asset per band, got {feature['assets']!r}" + assert len(feature["assets"]) == len(bands), ( + f"Expected an asset per band, got {feature['assets']!r}" + ) assert set(feature["assets"].keys()) == set(bands) while bands: band = bands.pop() @@ -1289,7 +1287,7 @@ def test_stac_search_by_post(stac_client: FlaskClient): # These have no path, so they should be the dataset location itself with a layer. # (this is a .nc file in reality, but our test data loading creates weird locations) assert ( - band_d["href"] == f'file://example.com/test_dataset/{feature["id"]}' + band_d["href"] == f"file://example.com/test_dataset/{feature['id']}" ) # Validate stac item with jsonschema diff --git a/integration_tests/test_stores.py b/integration_tests/test_stores.py index 1f04d96fd..ee78456f4 100644 --- a/integration_tests/test_stores.py +++ b/integration_tests/test_stores.py @@ -198,19 +198,19 @@ def test_put_get_summaries(summary_store: SummaryStore): loaded = summary_store.get(product_name, 2017, None, None) assert o is not loaded, ( - "Store should not return the original objects " "(they may change)" + "Store should not return the original objects (they may change)" + ) + assert o.summary_gen_time is not None, ( + "Summary-gen-time should have been added by the server" ) - assert ( - o.summary_gen_time is not None - ), "Summary-gen-time should have been added by the server" original_gen_time = o.summary_gen_time assert o.footprint_geometry.area == pytest.approx(4.857_924_619_872) assert loaded.dataset_count == 4 - assert ( - sum(loaded.region_dataset_counts.values()) == 4 - ), "Region dataset counts don't match total count" + assert sum(loaded.region_dataset_counts.values()) == 4, ( + "Region dataset counts don't match total count" + ) assert sorted(loaded.region_dataset_counts.keys()) == [ "1_2", "3_4", @@ -232,9 +232,9 @@ def test_put_get_summaries(summary_store: SummaryStore): assert loaded.newest_dataset_creation_time == datetime( 2018, 2, 2, 2, 2, 2, tzinfo=tz.tzutc() ) - assert ( - loaded.summary_gen_time != original_gen_time - ), "An update should update the generation time" + assert loaded.summary_gen_time != original_gen_time, ( + "An update should update the generation time" + ) def test_generate_empty(run_generate): diff --git a/integration_tests/test_summarise_data.py b/integration_tests/test_summarise_data.py index 429a4ff98..f2e1b7760 100644 --- a/integration_tests/test_summarise_data.py +++ b/integration_tests/test_summarise_data.py @@ -170,9 +170,9 @@ def test_generate_incremental_archivals(run_generate, summary_store: SummaryStor # It should be in the count again. # (this change should work because the new 'updated' column will be bumped on restore) run_generate("ga_ls9c_ard_3") - assert ( - summary_store.get("ga_ls9c_ard_3").dataset_count == original_dataset_count - ), "A dataset that was restored from archival was not refreshed by Explorer" + assert summary_store.get("ga_ls9c_ard_3").dataset_count == original_dataset_count, ( + "A dataset that was restored from archival was not refreshed by Explorer" + ) def _one_dataset(index: Index, product_name: str): @@ -226,9 +226,9 @@ def test_dataset_changing_product(run_generate, summary_store: SummaryStore): _change_dataset_product(index, dataset_id, our_product) run_generate("ga_ls8c_ard_3", "ga_ls9c_ard_3", "--force-refresh") - assert ( - summary_store.get("ga_ls9c_ard_3").dataset_count == original_dataset_count - ), "Expected dataset to be added again after the product changed back" + assert summary_store.get("ga_ls9c_ard_3").dataset_count == original_dataset_count, ( + "Expected dataset to be added again after the product changed back" + ) def _change_dataset_product(index: Index, dataset_id: UUID, other_product: Product): @@ -346,9 +346,9 @@ def test_sampled_product_fixed_fields(summary_store: SummaryStore): def test_generate_empty_time(run_generate, summary_store: SummaryStore): run_generate("ls8_nbar_albers") # No datasets in 2018 - assert ( - summary_store.get("ls8_nbar_albers", year=2018) is None - ), "There should be no datasets in 2018" + assert summary_store.get("ls8_nbar_albers", year=2018) is None, ( + "There should be no datasets in 2018" + ) # Year that does not exist for LS8 summary = summary_store.get("ls8_nbar_albers", year=2006, month=None, day=None) @@ -523,9 +523,9 @@ def test_calc_albers_summary_with_storage(summary_store: SummaryStore): assert original is not cached_s assert cached_s.dataset_count == original.dataset_count assert cached_s.summary_gen_time is not None - assert ( - cached_s.summary_gen_time == original.summary_gen_time - ), "A new, rather than cached, summary was returned" + assert cached_s.summary_gen_time == original.summary_gen_time, ( + "A new, rather than cached, summary was returned" + ) def test_cubedash_gen_refresh(run_generate, odc_test_db: Datacube, empty_client): @@ -549,9 +549,9 @@ def _get_product_seq_value(): # Value wasn't incremented! value_after_rerun = _get_product_seq_value() - assert ( - value_after_rerun == original_value - ), "Product sequence was incremented without any new products being added." + assert value_after_rerun == original_value, ( + "Product sequence was incremented without any new products being added." + ) def test_computed_regions_match_those_summarised(summary_store: SummaryStore): diff --git a/integration_tests/test_utc_tst.py b/integration_tests/test_utc_tst.py index a284d1500..237c4e8b8 100644 --- a/integration_tests/test_utc_tst.py +++ b/integration_tests/test_utc_tst.py @@ -66,24 +66,25 @@ def test_yearly_dataset_count(client: FlaskClient): def test_dataset_search_page_localised_time(client: FlaskClient): html = get_html(client, "/products/ga_ls9c_ard_3/datasets/2022") - assert ( - "2022-01-01 08:11:00" - in [a.find("td", first=True).text.strip() for a in html.find(".search-result")] - ), "datestring does not match expected center_time recorded in dataset_spatial table" - - assert ( - "Time UTC: 2021-12-31 22:41:00" - in [ - a.find("td", first=True).attrs["title"] for a in html.find(".search-result") - ] - ), "datestring does not match expected center_time recorded in dataset_spatial table" + assert "2022-01-01 08:11:00" in [ + a.find("td", first=True).text.strip() for a in html.find(".search-result") + ], ( + "datestring does not match expected center_time recorded in dataset_spatial table" + ) + + assert "Time UTC: 2021-12-31 22:41:00" in [ + a.find("td", first=True).attrs["title"] for a in html.find(".search-result") + ], ( + "datestring does not match expected center_time recorded in dataset_spatial table" + ) html = get_html(client, "/products/ga_ls9c_ard_3/datasets/2021") - assert ( - "2021-12-04 11:05:22" - in [a.find("td", first=True).text.strip() for a in html.find(".search-result")] - ), "datestring does not match expected center_time recorded in dataset_spatial table" + assert "2021-12-04 11:05:22" in [ + a.find("td", first=True).text.strip() for a in html.find(".search-result") + ], ( + "datestring does not match expected center_time recorded in dataset_spatial table" + ) def test_clirunner_generate_grouping_timezone(odc_test_db, run_generate, empty_client):