diff --git a/.travis.yml b/.travis.yml index 4750a12..c3f0683 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,21 +1,14 @@ language: python env: - - PYTHON_VERSION=2.7 IPYTHON_KERNEL=python2 - - PYTHON_VERSION=3.6 IPYTHON_KERNEL=python3 - - PYTHON_VERSION=3.7 IPYTHON_KERNEL=python3 - + - PYTHON_VERSION=3.8 IPYTHON_KERNEL=python3 before_install: - wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh - sh Miniconda3-latest-Linux-x86_64.sh -b -p /home/travis/miniconda - export PATH=/home/travis/miniconda/bin:$PATH install: - - conda create -n testenv --yes -c conda-forge python=$PYTHON_VERSION pip numpy scipy matplotlib - - source activate testenv - - conda install --yes -c conda-forge pygrib scikit-learn scikit-image netcdf4 basemap - - pip install arrow - - pip install pytest - - pip install . + - conda env create --yes -f environment.yml + - source activate hagelslag script: - - py.test + - pytest notifications: - email: true \ No newline at end of file + email: true diff --git a/README.md b/README.md index d47ac78..dd3c020 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,11 @@ The package contains modules for storm identification and tracking, spatio-tempo machine learning model training to predict hazard intensity as well as space and time translations. ### Citation -If you employ hagelslag in your research, please acknowledge its use with the following citation: +If you employ hagelslag in your research, please acknowledge its use with the following citations: + + Gagne, D. J., A. McGovern, S. E. Haupt, R. A. Sobash, J. K. Williams, M. Xue, 2017: Storm-Based Probabilistic Hail + Forecasting with Machine Learning Applied to Convection-Allowing Ensembles, Wea. Forecasting, 32, 1819-1840. + https://doi.org/10.1175/WAF-D-17-0010.1. Gagne II, D. J., A. McGovern, N. Snook, R. Sobash, J. Labriola, J. K. Williams, S. E. Haupt, and M. Xue, 2016: Hagelslag: Scalable object-based severe weather analysis and forecasting. Proceedings of the Sixth Symposium on @@ -21,7 +25,8 @@ djgagne at ou dot edu. ### Requirements -Hagelslag is compatible with Python 2.7 and 3.5. Hagelslag is easiest to install with the help of the Anaconda Python Distribution, but it should work with other +Hagelslag is compatible with Python 3.6 or newer. Hagelslag is easiest to install with the help of the [Miniconda +Python Distribution](https://docs.conda.io/en/latest/miniconda.html), but it should work with other Python setups as well. Hagelslag requires the following packages and recommends the following versions: * numpy >= 1.10 @@ -30,14 +35,30 @@ Python setups as well. Hagelslag requires the following packages and recommends * scikit-learn >= 0.16 * pandas >= 0.15 * arrow >= 0.8.0 -* basemap +* pyproj * netCDF4-python +* xarray +* jupyter +* ncepgrib2 +* pygrib +* cython +* pip +* sphinx +* mock + +Install dependencies with the following commands: +``` +git clone https://github.com/djgagne/hagelslag.git +cd ~/hagelslag +conda env create -f environment.yml +conda activate hagelslag +``` ### Installation +Install the latest version of hagelslag with the following command from the top-level hagelslag directory (where setup.py +is): +`pip install .` -To install hagelslag, enter the top-level directory of the package and run the standard python setup command: - - python setup.py install Hagelslag will install the libraries in site-packages and will also install 3 applications into the `bin` directory of your Python installation. diff --git a/config/ncar_rt2020_data.config b/config/ncar_rt2020_data.config index 168a2e3..6c5993e 100644 --- a/config/ncar_rt2020_data.config +++ b/config/ncar_rt2020_data.config @@ -10,6 +10,7 @@ from datetime import datetime work_path = "/glade/work/sobash/NSC_objects/" scratch_path = "/glade/scratch/dgagne/" +#dates = pd.read_csv("/glade/work/") #dates = pd.read_csv("/glade/work/ahijevyc/share/NSC.dates", # header=None)[0].astype(str).str.pad(14, side="right",fillchar="0") #date_index = pd.DatetimeIndex(dates) diff --git a/config/ncar_storm_data_3km.config b/config/ncar_storm_data_3km.config index b53a3b8..6c6c442 100644 --- a/config/ncar_storm_data_3km.config +++ b/config/ncar_storm_data_3km.config @@ -24,7 +24,7 @@ config = dict(dates=date_index.to_pydatetime(), watershed_variable="W_UP_MAX", ensemble_name="NCARSTORM", ensemble_members=ensemble_members, - model_path="/glade/p/nmmm0039/3KM_WRF_POST/", + model_path="/glade/p/mmm/parc/sobash/NSC/3KM_WRF_POST_12sec_ts/", model_watershed_params=(10, 1, 80, 100, 60), size_filter=12, gaussian_window=1, diff --git a/config/ncar_storm_data_3km_2020.config b/config/ncar_storm_data_3km_2020.config new file mode 100644 index 0000000..f695799 --- /dev/null +++ b/config/ncar_storm_data_3km_2020.config @@ -0,0 +1,92 @@ +#!/usr/bin/env python +from hagelslag.processing.ObjectMatcher import shifted_centroid_distance,closest_distance +from hagelslag.processing.ObjectMatcher import centroid_distance, time_distance + +import pandas as pd +import numpy as np +import os, sys +from datetime import datetime + +work_path = "/glade/work/dgagne/NSC_data/" +scratch_path = "/glade/scratch/dgagne/" +dates = pd.read_csv("/glade/u/home/dgagne/hagelslag/config/ncar_storm_dates_3km_new.txt", + header=None)[0].astype(str).str.pad(14, side="right",fillchar="0") +date_index = pd.DatetimeIndex(dates) +ensemble_members = ["d01"] +pressure_levels = ["850", "700", "500", "300"] +pres_vars = ["GHT_PL", "T_PL", "TD_PL", "U_PL", "V_PL"] +full_pres_vars = [] +for pres_var in pres_vars: + for pressure_level in pressure_levels: + full_pres_vars.append(pres_var + "_" + pressure_level) +REFL_1KM_AGL = { + "name": "REFL_1KM_AGL", + "params": (30, 1, 80, 300, 60), + "object_matcher_params":([shifted_centroid_distance],np.array([1.0]),np.array([24000])) + } +W_UP_MAX = { + "name": "W_UP_MAX", + "params": (10, 1, 80, 300, 60), + "object_matcher_params":([closest_distance,shifted_centroid_distance],np.array([0.9,0.1]),np.array([1,24000])) + } +REFL_COM = { + "name": "REFL_COM", + "params": (40, 1, 80, 300, 50), + "object_matcher_params":([shifted_centroid_distance],np.array([1.0]),np.array([24000])) + } +segmentation_approach = "hyst" # "hyst", "ws", or "ew" +REFL_COM["params"] = (35, 50) +watershed_dict = REFL_COM +watershed_variable = watershed_dict["name"] +output_prefix = work_path + "track_data_nsc_3km_"+watershed_variable+"_"+segmentation_approach +config = dict(dates=date_index.to_pydatetime(), + start_hour=1, + end_hour=35, # Don't go above maximum lead time-1 (35) or diagnostics file for storm_variables won't be found + watershed_variable=watershed_variable, + ensemble_name="NCARSTORM", + ensemble_members=ensemble_members, + model_path="/glade/p/mmm/parc/sobash/NSC/3KM_WRF_POST_12sec_ts/", + segmentation_approach = segmentation_approach, + model_watershed_params=watershed_dict["params"], + size_filter=12, + gaussian_window=0, + #mrms_path= work_path + "mrms_ncar/", + mrms_path=None, + mrms_variable="MESH_Max_60min_00.50", + mrms_watershed_params=(13, 1, 125, 100, 100), + object_matcher_params=watershed_dict["object_matcher_params"], + track_matcher_params=([centroid_distance, time_distance], + np.array([80000, 2])), + storm_variables=["UP_HELI_MAX", "GRPL_MAX", "WSPD10MAX", "W_UP_MAX", "W_DN_MAX", + "RVORT1_MAX", "RVORT5_MAX", "UP_HELI_MAX03", "UP_HELI_MAX01", + "UP_HELI_MIN", "REFL_COM", "REFL_1KM_AGL", "REFD_MAX", + "PSFC", "T2", "Q2", "TD2", "U10", "V10"] + full_pres_vars, + #"UP_HELI_MIN", "HAIL_MAXK1", "HAIL_MAX2D", "HAILCAST_DIAM_MAX", + potential_variables=["SBLCL", "MLLCL", "SBCAPE", "MLCAPE", "MUCAPE", "SBCINH", "MLCINH", + "USHR1", "VSHR1", "USHR6", "VSHR6", "U_BUNK", "V_BUNK", + "SRH03", "SRH01", "PSFC", "T2", "Q2", "TD2", "U10", "V10"], + #"PSFC", "T2", "Q2", "TD2", "U10", "V10"] + full_pres_vars, + future_variables=["REFL_COM", "UP_HELI_MAX", "GRPL_MAX", "HAIL_MAXK1", "UP_HELI_MAX03"], + tendency_variables=[], + shape_variables=["area", "eccentricity", "major_axis_length", "minor_axis_length", "orientation"], + #variable_statistics=["mean", "max", "min", "std", + # "percentile_10", "percentile_25", "percentile_50", "percentile_75", "percentile_90"], + variable_statistics=["mean", "max", "min"], + csv_path = output_prefix + "_csv/", + geojson_path = output_prefix + "_json/", + nc_path = output_prefix + "_nc/", + patch_radius=40, + unique_matches=True, + closest_matches=True, + match_steps=True, + train=False, + single_step=True, + label_type="gamma", + model_map_file="/glade/u/home/dgagne/hagelslag/mapfiles/ncar_storm_map_3km.txt", + mask_file="/glade/u/home/dgagne/hagelslag/mapfiles/ncar_storm_us_mask_3km.nc") +if not os.path.exists(config["csv_path"]): + print("csv_path doesn't exist. Try mkdir",config["csv_path"]) + sys.exit(1) +if not os.path.exists(config["nc_path"]): + print("nc_path doesn't exist. Try mkdir",config["nc_path"]) + sys.exit(1) diff --git a/config/ssef_data_2014.config b/config/ssef/ssef_data_2014.config similarity index 100% rename from config/ssef_data_2014.config rename to config/ssef/ssef_data_2014.config diff --git a/config/ssef_data_2015.config b/config/ssef/ssef_data_2015.config similarity index 100% rename from config/ssef_data_2015.config rename to config/ssef/ssef_data_2015.config diff --git a/config/ssef_data_cqg_closest_2014.config b/config/ssef/ssef_data_cqg_closest_2014.config similarity index 100% rename from config/ssef_data_cqg_closest_2014.config rename to config/ssef/ssef_data_cqg_closest_2014.config diff --git a/config/ssef_data_cqg_closest_2015.config b/config/ssef/ssef_data_cqg_closest_2015.config similarity index 100% rename from config/ssef_data_cqg_closest_2015.config rename to config/ssef/ssef_data_cqg_closest_2015.config diff --git a/config/ssef_data_cqg_closest_2016.config b/config/ssef/ssef_data_cqg_closest_2016.config similarity index 100% rename from config/ssef_data_cqg_closest_2016.config rename to config/ssef/ssef_data_cqg_closest_2016.config diff --git a/config/ssef_data_cqg_closest_2016_stampede.config b/config/ssef/ssef_data_cqg_closest_2016_stampede.config similarity index 100% rename from config/ssef_data_cqg_closest_2016_stampede.config rename to config/ssef/ssef_data_cqg_closest_2016_stampede.config diff --git a/config/ssef_data_cqg_closest_2016_train.config b/config/ssef/ssef_data_cqg_closest_2016_train.config similarity index 100% rename from config/ssef_data_cqg_closest_2016_train.config rename to config/ssef/ssef_data_cqg_closest_2016_train.config diff --git a/config/ssef_data_cqg_patches_2015.config b/config/ssef/ssef_data_cqg_patches_2015.config similarity index 100% rename from config/ssef_data_cqg_patches_2015.config rename to config/ssef/ssef_data_cqg_patches_2015.config diff --git a/config/ssef_data_cqg_patches_2016.config b/config/ssef/ssef_data_cqg_patches_2016.config similarity index 100% rename from config/ssef_data_cqg_patches_2016.config rename to config/ssef/ssef_data_cqg_patches_2016.config diff --git a/config/ssef_data_cqg_patches_2017.config b/config/ssef/ssef_data_cqg_patches_2017.config similarity index 100% rename from config/ssef_data_cqg_patches_2017.config rename to config/ssef/ssef_data_cqg_patches_2017.config diff --git a/config/ssef_data_dist_2014.config b/config/ssef/ssef_data_dist_2014.config similarity index 100% rename from config/ssef_data_dist_2014.config rename to config/ssef/ssef_data_dist_2014.config diff --git a/config/ssef_data_dist_2015.config b/config/ssef/ssef_data_dist_2015.config similarity index 100% rename from config/ssef_data_dist_2015.config rename to config/ssef/ssef_data_dist_2015.config diff --git a/config/ssef_data_uh_closest_2014.config b/config/ssef/ssef_data_uh_closest_2014.config similarity index 100% rename from config/ssef_data_uh_closest_2014.config rename to config/ssef/ssef_data_uh_closest_2014.config diff --git a/config/ssef_data_uh_closest_2015.config b/config/ssef/ssef_data_uh_closest_2015.config similarity index 100% rename from config/ssef_data_uh_closest_2015.config rename to config/ssef/ssef_data_uh_closest_2015.config diff --git a/config/ssef_data_uh_neighbor_2014.config b/config/ssef/ssef_data_uh_neighbor_2014.config similarity index 100% rename from config/ssef_data_uh_neighbor_2014.config rename to config/ssef/ssef_data_uh_neighbor_2014.config diff --git a/config/ssef_data_uh_neighbor_2015.config b/config/ssef/ssef_data_uh_neighbor_2015.config similarity index 100% rename from config/ssef_data_uh_neighbor_2015.config rename to config/ssef/ssef_data_uh_neighbor_2015.config diff --git a/config/ssef_eval_2015.config b/config/ssef/ssef_eval_2015.config similarity index 100% rename from config/ssef_eval_2015.config rename to config/ssef/ssef_eval_2015.config diff --git a/config/ssef_eval_cqg_closest_2015.config b/config/ssef/ssef_eval_cqg_closest_2015.config similarity index 100% rename from config/ssef_eval_cqg_closest_2015.config rename to config/ssef/ssef_eval_cqg_closest_2015.config diff --git a/config/ssef_eval_cqg_masked_2015.config b/config/ssef/ssef_eval_cqg_masked_2015.config similarity index 100% rename from config/ssef_eval_cqg_masked_2015.config rename to config/ssef/ssef_eval_cqg_masked_2015.config diff --git a/config/ssef_eval_uh_closest_2015.config b/config/ssef/ssef_eval_uh_closest_2015.config similarity index 100% rename from config/ssef_eval_uh_closest_2015.config rename to config/ssef/ssef_eval_uh_closest_2015.config diff --git a/config/ssef_eval_uh_neighbor_2015.config b/config/ssef/ssef_eval_uh_neighbor_2015.config similarity index 100% rename from config/ssef_eval_uh_neighbor_2015.config rename to config/ssef/ssef_eval_uh_neighbor_2015.config diff --git a/config/ssef_forecast_2015.config b/config/ssef/ssef_forecast_2015.config similarity index 100% rename from config/ssef_forecast_2015.config rename to config/ssef/ssef_forecast_2015.config diff --git a/config/ssef_forecast_cqg_closest_2015.config b/config/ssef/ssef_forecast_cqg_closest_2015.config similarity index 100% rename from config/ssef_forecast_cqg_closest_2015.config rename to config/ssef/ssef_forecast_cqg_closest_2015.config diff --git a/config/ssef_forecast_cqg_closest_2016_stampede.config b/config/ssef/ssef_forecast_cqg_closest_2016_stampede.config similarity index 100% rename from config/ssef_forecast_cqg_closest_2016_stampede.config rename to config/ssef/ssef_forecast_cqg_closest_2016_stampede.config diff --git a/config/ssef_forecast_cqg_closest_2016_stratus.config b/config/ssef/ssef_forecast_cqg_closest_2016_stratus.config similarity index 100% rename from config/ssef_forecast_cqg_closest_2016_stratus.config rename to config/ssef/ssef_forecast_cqg_closest_2016_stratus.config diff --git a/config/ssef_forecast_cqg_patch_2017.config b/config/ssef/ssef_forecast_cqg_patch_2017.config similarity index 100% rename from config/ssef_forecast_cqg_patch_2017.config rename to config/ssef/ssef_forecast_cqg_patch_2017.config diff --git a/config/ssef_forecast_dist_2015.config b/config/ssef/ssef_forecast_dist_2015.config similarity index 100% rename from config/ssef_forecast_dist_2015.config rename to config/ssef/ssef_forecast_dist_2015.config diff --git a/config/ssef_forecast_uh_closest_2015.config b/config/ssef/ssef_forecast_uh_closest_2015.config similarity index 100% rename from config/ssef_forecast_uh_closest_2015.config rename to config/ssef/ssef_forecast_uh_closest_2015.config diff --git a/config/ssef_forecast_uh_neighbor_2015.config b/config/ssef/ssef_forecast_uh_neighbor_2015.config similarity index 100% rename from config/ssef_forecast_uh_neighbor_2015.config rename to config/ssef/ssef_forecast_uh_neighbor_2015.config diff --git a/config/ssef_solar_data_2015.config b/config/ssef/ssef_solar_data_2015.config similarity index 100% rename from config/ssef_solar_data_2015.config rename to config/ssef/ssef_solar_data_2015.config diff --git a/config/ssef_solar_data_2016_stampede.config b/config/ssef/ssef_solar_data_2016_stampede.config similarity index 100% rename from config/ssef_solar_data_2016_stampede.config rename to config/ssef/ssef_solar_data_2016_stampede.config diff --git a/demos/obj_tracking.py b/demos/obj_tracking.py index 7023178..1cf1080 100755 --- a/demos/obj_tracking.py +++ b/demos/obj_tracking.py @@ -14,18 +14,16 @@ from matplotlib.patches import Polygon, PathPatch from matplotlib.collections import PatchCollection from datetime import datetime, timedelta -from mpl_toolkits.basemap import Basemap from scipy.ndimage import gaussian_filter, find_objects from copy import deepcopy -from mysavfig import mysavfig import pdb, sys, argparse, os # In[2]: -from hagelslag.processing import EnhancedWatershed +from hagelslag.processing.EnhancedWatershedSegmenter import EnhancedWatershed from hagelslag.data import ModelOutput -from hagelslag.processing import ObjectMatcher, shifted_centroid_distance, centroid_distance, closest_distance +from hagelslag.processing.ObjectMatcher import ObjectMatcher, closest_distance from hagelslag.processing import STObject parser = argparse.ArgumentParser(description='object tracker') @@ -36,7 +34,8 @@ parser.add_argument('-v','--verbose', action="store_true", help='print more output. useful for debugging') args = parser.parse_args() -if args.verbose: print args +if args.verbose: + print(args) odir = '/glade/p/work/ahijevyc/hagelslag/out/' model_path = "/glade/scratch/ahijevyc/VSE/" @@ -105,24 +104,19 @@ def add_grid(m): # If it does, see if 'field' is a variable. ncf = Dataset(dfile) if field in ncf.variables: - print dfile + print(dfile) model_grid.data.append(ncf.variables[field][0,:,:]) ncf.close() break ncf.close() d += deltat -print model_grid.lon.shape, np.maximum.reduce(model_grid.data).shape # max across time dimension -print model_grid.data[0].max(), model_grid.data[-1].max(), np.maximum.reduce(model_grid.data).max() +print(model_grid.lon.shape, np.maximum.reduce(model_grid.data).shape) # max across time dimension +print(model_grid.data[0].max(), model_grid.data[-1].max(), np.maximum.reduce(model_grid.data).max()) -basemap = Basemap(resolution="l", - llcrnrlon=model_grid.lon.min()+5., urcrnrlon=model_grid.lon.max()-.1, - llcrnrlat=model_grid.lat.min()+1.5, urcrnrlat=model_grid.lat.max()-.5, - projection='lcc',lat_1=np.mean(model_grid.lat), - lat_0=np.mean(model_grid.lat),lon_0=np.mean(model_grid.lon)) plt.figure(figsize=(10,8)) -add_grid(basemap) -basemap.contourf(model_grid.lon, model_grid.lat, + +plt.contourf(model_grid.lon, model_grid.lat, np.maximum.reduce(model_grid.data), # max across time dimension levels, extend="max", @@ -133,24 +127,23 @@ def add_grid(m): end_date.strftime("%d %b %Y %H:%M")), fontweight="bold", fontsize=14) dtstr = "_"+member+run_date.strftime("_%Y%m%d%H") -ret = mysavfig(odir+"uh_swaths/"+field+"_swaths"+dtstr+".png") +ret = plt.savefig(odir+"uh_swaths/"+field+"_swaths"+dtstr+".png") def get_forecast_objects(model_grid, ew_params, min_size, gaussian_window): ew = EnhancedWatershed(*ew_params) model_objects = [] - print "Find model objects Hour:", + print("Find model objects Hour:") for h in range(int((model_grid.end_date - model_grid.start_date).total_seconds()/deltat.total_seconds())+1): - print h, + print(h) hour_labels = ew.size_filter(ew.label(gaussian_filter(model_grid.data[h], gaussian_window)), min_size) obj_slices = find_objects(hour_labels) num_slices = len(obj_slices) model_objects.append([]) if num_slices > 0: fig, ax = plt.subplots() - add_grid(basemap) - t = basemap.contourf(model_grid.lon,model_grid.lat,hour_labels,np.arange(0,num_slices+1)+0.5,extend="max",cmap="Set1",latlon=True,title=str(run_date)+" "+field+" "+str(h)) - ret = mysavfig(odir+"enh_watershed_ex/ew{0:02d}.png".format(h)) + t = plt.contourf(model_grid.lon,model_grid.lat,hour_labels,np.arange(0,num_slices+1)+0.5,extend="max",cmap="Set1",latlon=True,title=str(run_date)+" "+field+" "+str(h)) + ret = plt.savefig(odir+"enh_watershed_ex/ew{0:02d}.png".format(h)) for s, sl in enumerate(obj_slices): model_objects[-1].append(STObject(model_grid.data[h][sl], #np.where(hour_labels[sl] > 0, 1, 0), @@ -178,7 +171,7 @@ def get_forecast_objects(model_grid, ew_params, min_size, gaussian_window): def track_forecast_objects(input_model_objects, model_grid, object_matcher): model_objects = deepcopy(input_model_objects) hours = np.arange(int((model_grid.end_date-model_grid.start_date).total_seconds()/deltat.total_seconds()) + 1) - print "hours = ",hours + print("hours = ",hours) tracked_model_objects = [] for h in hours: past_time_objs = [] @@ -189,12 +182,12 @@ def track_forecast_objects(input_model_objects, model_grid, object_matcher): past_time_objs.append(obj) # If no objects existed in the last time step, then consider objects in current time step all new if len(past_time_objs) == 0: - print "time",h, " no objects existed in the last time step. consider objects in current time step all new" + print("time",h, " no objects existed in the last time step. consider objects in current time step all new") tracked_model_objects.extend(deepcopy(model_objects[h])) # Match from previous time step with current time step elif len(past_time_objs) > 0 and len(model_objects[h]) > 0: assignments = object_matcher.match_objects(past_time_objs, model_objects[h], h - 1, h) - print "assignments:", assignments + print("assignments:", assignments) unpaired = range(len(model_objects[h])) for pair in assignments: past_time_objs[pair[0]].extend(model_objects[h][pair[1]]) @@ -209,42 +202,4 @@ def track_forecast_objects(input_model_objects, model_grid, object_matcher): # np.array([dist_weight, 1-dist_weight]), np.array([max_distance] * 2)) object_matcher = ObjectMatcher([closest_distance],np.array([1]),np.array([4*model_grid.dx])) -tracked_model_objects = track_forecast_objects(model_objects, model_grid, object_matcher) -color_list = ["violet", "cyan", "blue", "green", "purple", "darkgreen", "teal", "royalblue"] -color_arr = np.tile(color_list, len(tracked_model_objects) / len(color_list) + 1) -fig, ax = plt.subplots(figsize=(11, 8.5)) -add_grid(basemap) -basemap.contourf(model_grid.lon, - model_grid.lat, - np.maximum.reduce(model_grid.data), - levels, - extend="max", - cmap="YlOrRd", latlon=True) -plt.colorbar(shrink=0.8,fraction=0.05) -c = 0 -for t,tracked_model_object in enumerate(tracked_model_objects): - duration = tracked_model_object.end_time - tracked_model_object.start_time + 1 - if duration < args.timethresh: continue - # Draw polygon boundaries - patches = [] - for time in tracked_model_object.times: - lon = tracked_model_object.boundary_polygon(time)[0] - lat = tracked_model_object.boundary_polygon(time)[1] - #basemap.plot(lon, lat, color=color_arr[c], latlon=True, lw=0.5) - x, y = basemap(lon,lat) - if len(x) > 2: # if there are only 6 pixels, boundary_polygon may be zero-length. Make sure it has at least 3 points. - patches.append(Polygon(np.transpose([x,y]), closed=True, fill=True)) - ax.add_collection(PatchCollection(patches, color=color_arr[c], alpha=0.4)) - # Label objects - traj = tracked_model_object.trajectory() - xs, ys = basemap(*traj) - #plt.plot(xs,ys, marker='o', markersize=4, color=color_arr[t], lw=2) - for lon, lat, x, y, time, u, v in zip(traj[0], traj[1], xs,ys,tracked_model_object.times,tracked_model_object.u,tracked_model_object.v): - print "#",t," lon,lat=",lon,lat,"time=",time,"u,v=",u,v - if args.verbose: plt.text(x,y,str(time)+":"+str(t), fontsize=7) - plt.text(x,y,str(time), fontsize=7) - #plt.barbs(x,y,u/model_grid.dx, v/model_grid.dx, length=6, barbcolor=color_arr[t]) - c = c+1 -plt.title(title_info.get_text(), fontweight="bold", fontsize=14) -ret = mysavfig(odir+"storm_tracks/storm_tracks"+dtstr+"_"+str(params["delta"])+".png") diff --git a/doc/hagelslag.plot.rst b/doc/hagelslag.plot.rst deleted file mode 100644 index e003cc1..0000000 --- a/doc/hagelslag.plot.rst +++ /dev/null @@ -1,22 +0,0 @@ -hagelslag.plot package -====================== - -Submodules ----------- - -hagelslag.plot.glueviz module ------------------------------ - -.. automodule:: hagelslag.plot.glueviz - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: hagelslag.plot - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/hagelslag.rst b/doc/hagelslag.rst index 4ad59a4..67891ed 100644 --- a/doc/hagelslag.rst +++ b/doc/hagelslag.rst @@ -8,7 +8,6 @@ Subpackages hagelslag.data hagelslag.evaluation - hagelslag.plot hagelslag.processing hagelslag.util diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..696b873 --- /dev/null +++ b/environment.yml @@ -0,0 +1,26 @@ +name: hagelslag +channels: + - conda-forge +dependencies: + - python=3.8 + - numpy + - scipy + - matplotlib + - xarray + - netcdf4 + - pandas + - scikit-learn + - pytest + - h5py + - pip + - pyproj + - pygrib + - scikit-image + - jupyter + - jupyterlab + - arrow + - cython + - sphinx + - mock + - pip: + - git+https://github.com/jswhit/ncepgrib2.git diff --git a/hagelslag/data/FV3ModelGrid.py b/hagelslag/data/FV3ModelGrid.py index 33e976e..e391d89 100644 --- a/hagelslag/data/FV3ModelGrid.py +++ b/hagelslag/data/FV3ModelGrid.py @@ -4,7 +4,7 @@ from .ModelGrid import ModelGrid -class FV3ModelGrid(ModelGrid, GribModelGrid): +class FV3ModelGrid(GribModelGrid): """ Extension of the ModelGrid class for interfacing with the HREFv2 ensemble. Args: @@ -27,9 +27,10 @@ def __init__(self, member, run_date, variable, start_date, (end_date - run_date).total_seconds() / 3600 + 1, dtype=int) for forecast_hr in self.forecast_hours: - file_name=self.path+'{0}/{1}*{0}*f{2:02}.grib2'.format( + file_name=self.path+'{0}/{1}_{2}f{3:03d}.grib2'.format( run_date.strftime("%Y%m%d"), self.member, + run_date.strftime("%Y%m%d%H"), forecast_hr) filenames.append(file_name) self.netcdf_variables = ["hske_1000", "hske_3000", "hmf_1000", "hmf_3000", "ihm_1000", "ihm_3000"] @@ -39,7 +40,7 @@ def __init__(self, member, run_date, variable, start_date, def load_data(self): if self.variable in self.netcdf_variables: - super(self).load_data() + return ModelGrid.load_data(self) else: - super(self).load_grib_data() + return GribModelGrid.load_data(self) diff --git a/hagelslag/data/GribModelGrid.py b/hagelslag/data/GribModelGrid.py index 2c3bb85..67ae733 100644 --- a/hagelslag/data/GribModelGrid.py +++ b/hagelslag/data/GribModelGrid.py @@ -129,7 +129,6 @@ def load_data(self): print("No {0} model runs on {1}".format(self.member, self.run_date)) units = None return self.data, units - for f, g_file in enumerate(self.file_objects): grib = pygrib.open(g_file) data_values = None diff --git a/hagelslag/data/ModelOutput.py b/hagelslag/data/ModelOutput.py index d621557..4e1270b 100644 --- a/hagelslag/data/ModelOutput.py +++ b/hagelslag/data/ModelOutput.py @@ -199,7 +199,7 @@ def load_map_info(self, map_file): setattr(self, m, v) self.i, self.j = np.indices(self.lon.shape) self.proj = get_proj_obj(proj_dict) - elif self.ensemble_name.upper() in ["NCAR", "NCARSTORM", "HRRR", "VSE", "HREFV2","HRRRE"]: + else: proj_dict, grid_dict = read_ncar_map_file(map_file) if self.member_name[0:7] == "1km_pbl": # Don't just look at the first 3 characters. You have to differentiate '1km_pbl1' and '1km_on_3km_pbl1' grid_dict["dx"] = 1000 diff --git a/hagelslag/plot/__init__.py b/hagelslag/plot/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hagelslag/plot/glueviz.py b/hagelslag/plot/glueviz.py deleted file mode 100644 index 0b72cfb..0000000 --- a/hagelslag/plot/glueviz.py +++ /dev/null @@ -1,56 +0,0 @@ -from glue.viewers.custom.qt import CustomViewer -from glue.core import Data -from glue.config import data_factory -import json -import numpy as np -from mpl_toolkits.basemap import Basemap - - -class TrackPolyMap(CustomViewer): - name = "Track Poly Map" - x = "att(x)" - y = "att(y)" - color = "red" - - def plot_data(self, axes, x, y, color, state): - for p in range(len(x)): - axes.fill(x[p], y[p], color) - - def plot_subset(self, axes, x, y, style, state): - for p in range(len(x)): - mx, my = state.m(x[p], y[p]) - axes.fill(mx, my, - alpha=style.alpha, - facecolor=style.color, - edgecolor=style.color) - - def setup(self, axes, state): - bmap = Basemap(projection="cyl") - bmap.drawstates() - bmap.drawcoastlines() - bmap.drawcountries() - state.m = bmap - - def select(self, roi, latitude, longitude, state): - mx, my = state.m(longitude, latitude) - return roi.contains(mx, my) - - -def is_json(filename, **kwargs): - return filename.endswith(".json") - - -@data_factory('Hagelslag geoJSON loader', is_json) -def read_json(file_name): - json_file = open(file_name) - data = json.load(json_file) - json_file.close() - track_steps = [] - for feature in data['features']: - feature_grids = {} - for grid in feature['properties']['attributes'].keys(): - feature_grids[grid] = np.array(feature['properties']['attributes']) - feature_grids["i"] = feature["properties"]["i"] - - track_steps.append(Data(label=data['properties']["id"], **feature_grids)) - return track_steps diff --git a/hagelslag/processing/EnsembleProducts.py b/hagelslag/processing/EnsembleProducts.py index eb88d8e..5c94a97 100644 --- a/hagelslag/processing/EnsembleProducts.py +++ b/hagelslag/processing/EnsembleProducts.py @@ -17,7 +17,7 @@ try: - from ncepgrib2 import Grib2Encode, dump + from ncepgrib2 import Grib2Encode grib_support = True except ImportError("ncepgrib2 not available"): grib_support = False diff --git a/hagelslag/util/convert_mrms_grids.py b/hagelslag/util/convert_mrms_grids.py index fcde6b9..ef96901 100644 --- a/hagelslag/util/convert_mrms_grids.py +++ b/hagelslag/util/convert_mrms_grids.py @@ -1,4 +1,3 @@ -import Nio import pandas as pd import os import subprocess @@ -13,7 +12,7 @@ import warnings import traceback import argparse - +import xarray as xr def main(): warnings.simplefilter("ignore") @@ -125,7 +124,7 @@ def __init__(self, start_date, end_date, variable, path_start, freq="1H"): self.path_start = path_start self.freq = freq self.data = None - self.all_dates = pd.DatetimeIndex(start=self.start_date, end=self.end_date, freq=self.freq) + self.all_dates = pd.date_range(start=self.start_date, end=self.end_date, freq=self.freq) self.loaded_dates = None self.lon = None self.lat = None @@ -150,9 +149,9 @@ def load_data(self): print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) - file_obj = Nio.open_file(full_path + data_file[:-3]) + file_obj = xr.open_dataset(full_path + data_file[:-3]) else: - file_obj = Nio.open_file(full_path + data_file) + file_obj = xr.open_dataset(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: diff --git a/requirements.txt b/requirements.txt index 90ab76d..486ac44 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,5 @@ pyproj scikit-image pygrib arrow>=0.8 - +cython +ncepgrib2 diff --git a/setup.py b/setup.py index 88197ae..76c1ad1 100644 --- a/setup.py +++ b/setup.py @@ -15,24 +15,14 @@ if on_rtd: requires = [] else: - requires = ["numpy>=1.10", - "pandas>=0.15", - "scipy", - "matplotlib>=1.5", - "netCDF4", - "scikit-learn>=0.16", - "scikit-image", - "pyproj", - "xarray", - "pygrib", - "arrow>=0.8.0", - ] + with open("requirements.txt") as require_file: + requires = [r.strip() for r in require_file.readlines()] if __name__ == "__main__": pkg_description = "Hagelslag is a Python package for storm-based analysis, forecasting, and evaluation." setup(name="hagelslag", - version="0.3", + version="0.4.0b1", description="Object-based severe weather forecast system", author="David John Gagne", author_email="dgagne@ucar.edu",