diff --git a/.github/workflows/core_tests.yml b/.github/workflows/core_tests.yml index 0c11dd5df..fcfe1e545 100644 --- a/.github/workflows/core_tests.yml +++ b/.github/workflows/core_tests.yml @@ -51,13 +51,12 @@ jobs: "psutil=5.9.5" \ "pydantic=2.6.1" \ "pypyr=5.8.0" \ - "pytables=3.6.1" \ + "pytables=3.9.2" \ "pytest-cov" \ "pytest-regressions=2.5.0" \ "scikit-learn=1.2.2" \ - "sharrow>=2.6.0" \ + "sharrow>=2.7.0" \ "simwrapper=1.8.5" \ - "xarray=2023.2.0" \ "zarr=2.14.2" \ "zstandard=0.21.0" if: steps.cache.outputs.cache-hit != 'true' @@ -151,13 +150,12 @@ jobs: "psutil=5.9.5" \ "pydantic=2.6.1" \ "pypyr=5.8.0" \ - "pytables=3.6.1" \ + "pytables=3.9.2" \ "pytest-cov" \ "pytest-regressions=2.5.0" \ "scikit-learn=1.2.2" \ - "sharrow>=2.6.0" \ + "sharrow>=2.7.0" \ "simwrapper=1.8.5" \ - "xarray=2023.2.0" \ "zarr=2.14.2" \ "zstandard=0.21.0" if: steps.cache.outputs.cache-hit != 'true' @@ -249,13 +247,12 @@ jobs: "psutil=5.9.5" \ "pydantic=2.6.1" \ "pypyr=5.8.0" \ - "pytables=3.6.1" \ + "pytables=3.9.2" \ "pytest-cov" \ "pytest-regressions=2.5.0" \ "scikit-learn=1.2.2" \ - "sharrow>=2.6.0" \ + "sharrow>=2.7.0" \ "simwrapper=1.8.5" \ - "xarray=2023.2.0" \ "zarr=2.14.2" \ "zstandard=0.21.0" if: steps.cache.outputs.cache-hit != 'true' @@ -346,13 +343,12 @@ jobs: "psutil=5.9.5" \ "pydantic=2.6.1" \ "pypyr=5.8.0" \ - "pytables=3.6.1" \ + "pytables=3.9.2" \ "pytest-cov" \ "pytest-regressions=2.5.0" \ "scikit-learn=1.2.2" \ - "sharrow>=2.6.0" \ + "sharrow>=2.7.0" \ "simwrapper=1.8.5" \ - "xarray=2023.2.0" \ "zarr=2.14.2" \ "zstandard=0.21.0" if: steps.cache.outputs.cache-hit != 'true' @@ -413,13 +409,12 @@ jobs: "psutil=5.9.5" \ "pydantic=2.6.1" \ "pypyr=5.8.0" \ - "pytables=3.6.1" \ + "pytables=3.9.2" \ "pytest-cov" \ "pytest-regressions=2.5.0" \ "scikit-learn=1.2.2" \ - "sharrow>=2.6.0" \ + "sharrow>=2.7.0" \ "simwrapper=1.8.5" \ - "xarray=2023.2.0" \ "zarr=2.14.2" \ "zstandard=0.21.0" if: steps.cache.outputs.cache-hit != 'true' @@ -479,13 +474,12 @@ jobs: "psutil=5.9.5" \ "pydantic=2.6.1" \ "pypyr=5.8.0" \ - "pytables=3.6.1" \ + "pytables=3.9.2" \ "pytest-cov" \ "pytest-regressions=2.5.0" \ "scikit-learn=1.2.2" \ - "sharrow>=2.6.0" \ + "sharrow>=2.7.0" \ "simwrapper=1.8.5" \ - "xarray=2023.2.0" \ "zarr=2.14.2" \ "zstandard=0.21.0" if: steps.cache.outputs.cache-hit != 'true' diff --git a/activitysim/abm/models/disaggregate_accessibility.py b/activitysim/abm/models/disaggregate_accessibility.py index 8d1102743..851e7f0f4 100644 --- a/activitysim/abm/models/disaggregate_accessibility.py +++ b/activitysim/abm/models/disaggregate_accessibility.py @@ -569,7 +569,7 @@ def expand_template_zones(self, tables): _expanded = pd.DataFrame(util.named_product(**index_params)).set_index("index") # Use result to join template onto expanded table of zones - ex_table = _expanded.join(master_template).reset_index() + ex_table = _expanded.join(master_template).sort_index().reset_index() # Concatenate a new unique set of ids cols = ["home_zone_id", "proto_household_id", "proto_person_id"] @@ -642,7 +642,9 @@ def create_proto_pop(self): .set_index("index") .rename(columns={"hhid": hhid}) ) - persons = rep.join(persons).sort_values(hhid).reset_index(drop=True) + persons = ( + rep.join(persons, sort=True).sort_values(hhid).reset_index(drop=True) + ) persons[perid] = persons.index + 1 # Assign persons to tours @@ -718,6 +720,7 @@ def merge_persons(self): perid = self.params["proto_persons"]["index_col"] persons_merged.set_index(perid, inplace=True, drop=True) + persons_merged = persons_merged.sort_index() self.proto_pop["proto_persons_merged"] = persons_merged # Store in pipeline diff --git a/activitysim/abm/models/input_checker.py b/activitysim/abm/models/input_checker.py index b8a94f369..db9f58c12 100644 --- a/activitysim/abm/models/input_checker.py +++ b/activitysim/abm/models/input_checker.py @@ -300,34 +300,42 @@ def report_errors(state, input_checker_settings, v_warnings, v_errors): for warn in warns: if "dataframe validator" in str(warn.message): - file_logger.warning( - "Failed dataframe validator: " - + str(warn.message).split("\n")[-1] - ) - elif "element-wise validator" in str(warn.message): - if "DataFrameSchema" in str(warn.message): - file_logger.warning( - "Failed element-wise validator: <" - + str(warn.message).split("\n")[0].split(" ")[1] - + table_name - + ")>\n\t" - + str(warn.message) - .split("failure cases:\n")[0] - .split("\n")[-2] - + "\n\tfailure cases:\n\t" - + "\n\t".join( - str(warn.message) - .split("failure cases:\n")[1] - .split("\n") - ) - ) - else: + try: file_logger.warning( - "Failed element-wise validator: <" - + " ".join(str(warn.message).split("\n")[0].split(" ")[1:3]) - + "\n\t" - + "\n\t".join(str(warn.message).split("\n")[1:]) + "Failed dataframe validator: " + + str(warn.message).split("\n")[-1] ) + except Exception: + file_logger.warning(warn) + elif "element-wise validator" in str(warn.message): + try: + if "DataFrameSchema" in str(warn.message): + file_logger.warning( + "Failed element-wise validator: <" + + str(warn.message).split("\n")[0].split(" ")[1] + + table_name + + ")>\n\t" + + str(warn.message) + .split("failure cases:\n")[0] + .split("\n")[-2] + + "\n\tfailure cases:\n\t" + + "\n\t".join( + str(warn.message) + .split("failure cases:\n")[1] + .split("\n") + ) + ) + else: + file_logger.warning( + "Failed element-wise validator: <" + + " ".join( + str(warn.message).split("\n")[0].split(" ")[1:3] + ) + + "\n\t" + + "\n\t".join(str(warn.message).split("\n")[1:]) + ) + except Exception: + file_logger.warning(warn) else: file_logger.warning(warn) file_logger.warning("\n") diff --git a/activitysim/abm/models/school_escorting.py b/activitysim/abm/models/school_escorting.py index b3aaf2b60..61f3c9dd6 100644 --- a/activitysim/abm/models/school_escorting.py +++ b/activitysim/abm/models/school_escorting.py @@ -634,7 +634,7 @@ def school_escorting( state.add_table("tours", tours) state.get_rn_generator().drop_channel("tours") state.get_rn_generator().add_channel("tours", tours) - state.add_table("escort_bundles", escort_bundles) + state.add_table("escort_bundles", escort_bundles.reset_index(drop=True)) # save school escorting tours and trips in pipeline so we can overwrite results from downstream models state.add_table("school_escort_tours", school_escort_tours) state.add_table("school_escort_trips", school_escort_trips) diff --git a/activitysim/abm/models/trip_departure_choice.py b/activitysim/abm/models/trip_departure_choice.py index 43f02df34..6ecd68ca8 100644 --- a/activitysim/abm/models/trip_departure_choice.py +++ b/activitysim/abm/models/trip_departure_choice.py @@ -386,7 +386,7 @@ def choose_tour_leg_pattern( def apply_stage_two_model(state, omnibus_spec, trips, chunk_size, trace_label): - if not trips.index.is_monotonic: + if not trips.index.is_monotonic_increasing: trips = trips.sort_index() # Assign the duration of the appropriate leg to the trip diff --git a/activitysim/abm/models/util/school_escort_tours_trips.py b/activitysim/abm/models/util/school_escort_tours_trips.py index d8b5386eb..5af26ffa1 100644 --- a/activitysim/abm/models/util/school_escort_tours_trips.py +++ b/activitysim/abm/models/util/school_escort_tours_trips.py @@ -100,7 +100,7 @@ def create_chauf_escort_trips(bundles): "outbound", "purpose", ] - ).reset_index() + ).reset_index(drop=True) # numbering trips such that outbound escorting trips must come first and inbound trips must come last outbound_trip_num = -1 * ( @@ -240,7 +240,7 @@ def create_escortee_trips(bundles): # create a new trip for each escortee destination escortee_trips = escortee_trips.explode( ["destination", "escort_participants", "school_escort_trip_num", "purpose"] - ).reset_index() + ).reset_index(drop=True) # numbering trips such that outbound escorting trips must come first and inbound trips must come last # this comes in handy when merging trips to others in the tour decided downstream diff --git a/activitysim/abm/models/vehicle_allocation.py b/activitysim/abm/models/vehicle_allocation.py index a341493ff..e10af1947 100644 --- a/activitysim/abm/models/vehicle_allocation.py +++ b/activitysim/abm/models/vehicle_allocation.py @@ -236,6 +236,8 @@ def vehicle_allocation( logger.info("Running for occupancy = %d", occup) # setting occup for access in spec expressions locals_dict.update({"occup": occup}) + if model_settings.sharrow_skip: + locals_dict["disable_sharrow"] = True choices = simulate.simple_simulate( state, @@ -258,6 +260,7 @@ def vehicle_allocation( choices.loc[choices["alt_choice"] == alt, "choice"] = choosers.loc[ choices["alt_choice"] == alt, alt ] + choices["choice"] = choices["choice"].astype(veh_choice_dtype) choices.loc[ choices["alt_choice"] == alts_from_spec[-1], "choice" ] = alts_from_spec[-1] diff --git a/activitysim/cli/create.py b/activitysim/cli/create.py index 7211471d7..6821ad4c5 100644 --- a/activitysim/cli/create.py +++ b/activitysim/cli/create.py @@ -2,6 +2,7 @@ import glob import hashlib +import importlib.resources import logging import os import shutil @@ -21,14 +22,15 @@ def _example_path(resource): resource = os.path.join(EXAMPLES_DIR, resource) - path = pkg_resources.resource_filename(PACKAGE, resource) - - return path + return importlib.resources.as_file( + importlib.resources.files(PACKAGE).joinpath(resource) + ) def _load_manifest(): - with open(_example_path(MANIFEST), "r") as f: - manifest = yaml.safe_load(f.read()) + with _example_path(MANIFEST) as f_pth: + with open(f_pth, "r") as f: + manifest = yaml.safe_load(f.read()) assert manifest, f"error: could not load {MANIFEST}" return {example["name"]: example for example in manifest} @@ -177,8 +179,9 @@ def get_example( ) else: - for asset_path in glob.glob(_example_path(assets)): - copy_asset(asset_path, target_path, dirs_exist_ok=True) + with _example_path(assets) as pth: + for asset_path in glob.glob(str(pth)): + copy_asset(asset_path, target_path, dirs_exist_ok=True) print(f"copied! new project files are in {os.path.abspath(dest_path)}") diff --git a/activitysim/core/assign.py b/activitysim/core/assign.py index 19693f2ab..4ecb3feeb 100644 --- a/activitysim/core/assign.py +++ b/activitysim/core/assign.py @@ -96,7 +96,36 @@ def read_assignment_spec( """ try: - cfg = pd.read_csv(file_name, comment="#") + # we use an explicit list of na_values, these are the values that + # Pandas version 1.5 recognized as NaN by default. Notably absent is + # 'None' which is used in some spec files to be the object `None` not + # the float value NaN. + cfg = pd.read_csv( + file_name, + comment="#", + na_values=[ + "", + "#N/A", + "#N/A N/A", + "#NA", + "-1.#IND", + "-1.#QNAN", + "-NaN", + "-nan", + "1.#IND", + "1.#QNAN", + "", + "N/A", + "NA", + "NULL", + "NaN", + "n/a", + "nan", + "null", + ], + keep_default_na=False, + ) + except Exception as e: logger.error(f"Error reading spec file: {file_name}") logger.error(str(e)) diff --git a/activitysim/core/los.py b/activitysim/core/los.py index 8586a3018..5ac90f930 100644 --- a/activitysim/core/los.py +++ b/activitysim/core/los.py @@ -780,7 +780,15 @@ def get_mazpairs(self, omaz, dmaz, attribute): self.maz_ceiling ) + np.asanyarray(dmaz, dtype=np.int64) else: - i = np.asanyarray(omaz) * self.maz_ceiling + np.asanyarray(dmaz) + # if we have less than a 32-bit index, it will + # overflow so we need to upgrade to at least 32 bit + omaz_as_array = np.asanyarray(omaz) + if omaz_as_array.dtype not in (np.int32, np.int64): + omaz_as_array = omaz_as_array.astype(np.int32) + dmaz_as_array = np.asanyarray(dmaz) + if dmaz_as_array.dtype not in (np.int32, np.int64): + dmaz_as_array = dmaz_as_array.astype(np.int32) + i = omaz_as_array * self.maz_ceiling + dmaz_as_array s = util.quick_loc_df(i, self.maz_to_maz_df, attribute) # FIXME - no point in returning series? diff --git a/activitysim/core/test/_tools.py b/activitysim/core/test/_tools.py index b8f338ce9..618b467bb 100644 --- a/activitysim/core/test/_tools.py +++ b/activitysim/core/test/_tools.py @@ -165,8 +165,8 @@ def progressive_checkpoint_test( if ref_target.exists(): try: state.checkpoint.check_against(ref_target, checkpoint_name=step_name) - except Exception: - print(f"> {name} {step_name}: ERROR") + except Exception as e: + print(f"> {name} {step_name}: ERROR {e}") raise else: print(f"> {name} {step_name}: ok") diff --git a/activitysim/core/util.py b/activitysim/core/util.py index 3130b18f5..80c9d4c0b 100644 --- a/activitysim/core/util.py +++ b/activitysim/core/util.py @@ -289,7 +289,7 @@ def quick_loc_series(loc_list, target_series): left_on = "left" - if isinstance(loc_list, pd.Int64Index): + if isinstance(loc_list, pd.Index): left_df = pd.DataFrame({left_on: loc_list.values}) elif isinstance(loc_list, pd.Series): left_df = loc_list.to_frame(name=left_on) diff --git a/activitysim/core/workflow/state.py b/activitysim/core/workflow/state.py index f21810bb5..b4bc017b1 100644 --- a/activitysim/core/workflow/state.py +++ b/activitysim/core/workflow/state.py @@ -714,7 +714,18 @@ def get_pyarrow( if t is None: raise KeyError(tablename) if isinstance(t, pd.DataFrame): - t = pa.Table.from_pandas(t, preserve_index=True, columns=columns) + df = t + try: + t = pa.Table.from_pandas(df, preserve_index=True, columns=columns) + except (pa.ArrowTypeError, pa.ArrowInvalid): + # if there are object columns, try to convert them to categories + df = df.copy() + for k, dtype in df.dtypes.items(): + if dtype.kind == "O": + df[k] = df[k].astype("str") + elif dtype == "boolean": + df[k] = df[k].astype("str") + t = pa.Table.from_pandas(df, preserve_index=True, columns=columns) if isinstance(t, pa.Table): if columns is not None: t = t.select(columns) diff --git a/activitysim/examples/placeholder_sandag/test/test_sandag.py b/activitysim/examples/placeholder_sandag/test/test_sandag.py index 32e18ff1c..704952bf6 100644 --- a/activitysim/examples/placeholder_sandag/test/test_sandag.py +++ b/activitysim/examples/placeholder_sandag/test/test_sandag.py @@ -196,6 +196,57 @@ def test_2_zone(data): run_test(zone="2", multiprocess=False) +def test_2_zone_local_compute(data): + def _test_path(dirname): + return os.path.join(os.path.dirname(__file__), dirname) + + import activitysim.abm # register components # noqa: F401 + + state = workflow.State.make_default( + data_dir=example_path("data_2"), + configs_dir=( + _test_path("configs_2_zone"), + example_path("configs_2_zone"), + psrc_example_path("configs"), + ), + output_dir=_test_path("output_2"), + ) + state.run.all(resume_after=None) + # ## regress tours + regress_tours_df = pd.read_csv(_test_path(f"regress/final_2_zone_tours.csv")) + tours_df = pd.read_csv(_test_path(f"output_2/final_2_zone_tours.csv")) + tours_df.to_csv( + _test_path(f"regress/final_2_zone_tours_last_run_localcompute.csv"), index=False + ) + test.assert_frame_substantively_equal( + tours_df, regress_tours_df, rtol=1e-03, check_dtype=False + ) + + # ## regress trips + regress_trips_df = pd.read_csv(_test_path(f"regress/final_2_zone_trips.csv")) + trips_df = pd.read_csv(_test_path(f"output_2/final_2_zone_trips.csv")) + trips_df.to_csv(_test_path(f"regress/final_2_zone_trips_last_run.csv"), index=False) + test.assert_frame_substantively_equal( + trips_df, regress_trips_df, rtol=1e-03, check_dtype=False + ) + + # also test accessibility for the 2-zone system + regress_accessibility_df = pd.read_csv( + _test_path(f"regress/final_2_zone_proto_disaggregate_accessibility.csv") + ) + final_accessibility_df = pd.read_csv( + _test_path(f"output_2/final_2_zone_proto_disaggregate_accessibility.csv") + ) + final_accessibility_df = final_accessibility_df[ + [c for c in final_accessibility_df.columns if not c.startswith("_original_")] + ] + test.assert_frame_substantively_equal( + final_accessibility_df, + regress_accessibility_df, + check_dtype=False, + ) + + def test_2_zone_norecode(data): run_test(zone="2", multiprocess=False, recode=False) diff --git a/activitysim/examples/prototype_mtc_extended/configs/trip_mode_choice_annotate_trips_preprocessor.csv b/activitysim/examples/prototype_mtc_extended/configs/trip_mode_choice_annotate_trips_preprocessor.csv index fefe54a13..df7059cd8 100644 --- a/activitysim/examples/prototype_mtc_extended/configs/trip_mode_choice_annotate_trips_preprocessor.csv +++ b/activitysim/examples/prototype_mtc_extended/configs/trip_mode_choice_annotate_trips_preprocessor.csv @@ -19,7 +19,7 @@ Description,Target,Expression ,tour_mode_is_drive_transit,i_tour_mode.isin(I_DRIVE_TRANSIT_MODES) ,tour_mode_is_ride_hail,i_tour_mode.isin(I_RIDE_HAIL_MODES) #,, -,selected_tour_vehicle,"reindex(tours.selected_vehicle, df.tour_id)" +,selected_tour_vehicle,"reindex(tours.selected_vehicle, df.tour_id).astype(vehicles.vehicle_type.dtype)" ,auto_op_cost,"reindex(vehicles.groupby('vehicle_type')['auto_operating_cost'].mean(), pd.Series(selected_tour_vehicle, df.index))" ,auto_op_cost,"np.where(pd.isna(auto_op_cost), costPerMile, auto_op_cost)" ,inbound,~df.outbound @@ -92,4 +92,4 @@ dest terminal time not counted at home,_dest_terminal_time,"np.where(inbound & l #,dist_bike,od_skims['DISTBIKE'] #,dist_only,od_skims['DIST'] # added for school escorting model,, -Number of school children in vehicle on trip,num_escortees,df.escort_participants.fillna('').apply(lambda x: len(x.split('_'))) \ No newline at end of file +Number of school children in vehicle on trip,num_escortees,df.escort_participants.fillna('').apply(lambda x: len(x.split('_'))) diff --git a/activitysim/examples/prototype_mtc_extended/configs/vehicle_allocation.yaml b/activitysim/examples/prototype_mtc_extended/configs/vehicle_allocation.yaml index 23222fd05..4854d9a06 100644 --- a/activitysim/examples/prototype_mtc_extended/configs/vehicle_allocation.yaml +++ b/activitysim/examples/prototype_mtc_extended/configs/vehicle_allocation.yaml @@ -26,3 +26,5 @@ OCCUPANCY_LEVELS: # DF: tours # TABLES: # - vehicles + +sharrow_skip: true diff --git a/activitysim/examples/prototype_mtc_extended/test/prototype_mtc_extended_reference_pipeline.zip b/activitysim/examples/prototype_mtc_extended/test/prototype_mtc_extended_reference_pipeline.zip index 9dda15936..8d394afc4 100644 Binary files a/activitysim/examples/prototype_mtc_extended/test/prototype_mtc_extended_reference_pipeline.zip and b/activitysim/examples/prototype_mtc_extended/test/prototype_mtc_extended_reference_pipeline.zip differ diff --git a/conda-environments/activitysim-dev-base.yml b/conda-environments/activitysim-dev-base.yml index 4b6d5f184..ffa8f784b 100644 --- a/conda-environments/activitysim-dev-base.yml +++ b/conda-environments/activitysim-dev-base.yml @@ -31,7 +31,6 @@ dependencies: - ipykernel # so this env will appear in jupyter as a selection - isort - jupyterlab -- larch = 5.7.* - matplotlib - myst-parser # allows markdown in sphinx - nbconvert @@ -43,7 +42,7 @@ dependencies: - numpydoc - openmatrix = 0.3.* - orca = 1.8 -- pandas = 1.4.* +- pandas = 2.2.* - pandera >= 0.15, <0.18.1 - platformdirs = 3.2.* - pre-commit @@ -54,7 +53,7 @@ dependencies: - pydata-sphinx-theme - pyinstrument = 4.4 - pypyr = 5.8.* -- pytables >=3.7 +- pytables >=3.9 - pytest = 7.2.* - pytest-cov - pytest-regressions @@ -71,10 +70,10 @@ dependencies: - sphinx = 6.1.* - sphinx_rtd_theme = 1.2.* - sphinx-argparse = 0.4.* -- xarray = 2023.2.* +- xarray >= 2023.2 - xmle - zarr = 2.14.* - zstandard - pip: - - autodoc_pydantic \ No newline at end of file + - autodoc_pydantic diff --git a/conda-environments/activitysim-dev.yml b/conda-environments/activitysim-dev.yml index 33a5856fb..4f4046e34 100644 --- a/conda-environments/activitysim-dev.yml +++ b/conda-environments/activitysim-dev.yml @@ -39,7 +39,7 @@ dependencies: - numpydoc - openmatrix = 0.3.* - orca = 1.8 -- pandas = 1.4.* +- pandas = 2.2.* - pandera >= 0.15, <0.18.1 - platformdirs = 3.2.* - pre-commit @@ -50,7 +50,7 @@ dependencies: - pydata-sphinx-theme - pyinstrument = 4.4 - pypyr = 5.8.* -- pytables >=3.7 +- pytables >=3.9 - pytest = 7.2.* - pytest-cov - pytest-regressions @@ -61,14 +61,14 @@ dependencies: - ruff - setuptools_scm - scikit-learn = 1.2.* -- sharrow >= 2.6.0 +- sharrow >= 2.7.0 - simwrapper > 1.7 - snakeviz # for profiling - sparse - sphinx = 6.1.* - sphinx_rtd_theme = 1.2.* - sphinx-argparse = 0.4.* -- xarray = 2023.2.* +- xarray >= 2023.2 - xmle - zarr = 2.14.* - zstandard diff --git a/conda-environments/docbuild.yml b/conda-environments/docbuild.yml index df4e45492..a68dfe09f 100644 --- a/conda-environments/docbuild.yml +++ b/conda-environments/docbuild.yml @@ -32,20 +32,20 @@ dependencies: - numpydoc - openmatrix >= 0.3.4.1 - orca >= 1.6 -- pandas >= 1.1.0,<2 +- pandas >= 2.2 - platformdirs - psutil >= 4.1 - pyarrow >= 2.0 - pydantic = 2.6.* - pypyr >= 5.3 -- pytables >=3.7 +- pytables >=3.9 - pytest - pytest-cov - pytest-regressions - pyyaml >= 5.1 - requests >= 2.7 - scikit-learn >= 1.1 -- sharrow >= 2.6.0 +- sharrow >= 2.7.0 - simwrapper > 1.7 - sparse - sphinx-argparse diff --git a/conda-environments/github-actions-tests.yml b/conda-environments/github-actions-tests.yml index 3aa8ae115..928304cf4 100644 --- a/conda-environments/github-actions-tests.yml +++ b/conda-environments/github-actions-tests.yml @@ -18,13 +18,13 @@ dependencies: - openmatrix = 0.3.5.0 - orca = 1.8 - pandera >= 0.15, <0.18.1 -- pandas = 1.4.* +- pandas = 2.2.* - platformdirs = 3.2.* - psutil = 5.9.* - pyarrow = 11.* - pydantic = 2.6.* - pypyr = 5.8.* -- pytables >= 3.7 +- pytables >= 3.9 - pytest = 7.2.* - pytest-cov - pytest-regressions @@ -32,9 +32,9 @@ dependencies: - requests = 2.28.* - ruff - scikit-learn = 1.2.* -- sharrow >= 2.6.0 +- sharrow >= 2.7.0 - simwrapper > 1.7 - sparse -- xarray = 2023.2.* +- xarray >= 2023.2 - zarr = 2.14.* - zstandard diff --git a/pyproject.toml b/pyproject.toml index 92e8d85f7..e6ced6798 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,7 @@ dependencies = [ "numpy >= 1.16.1", "openmatrix >= 0.3.4.1", "orca >= 1.6", - "pandas >= 1.4, <2", + "pandas >= 2.2", "platformdirs", "psutil >= 4.1", "pyarrow >= 2.0",