diff --git a/README.md b/README.md
index c0cc2a7..a495edd 100644
--- a/README.md
+++ b/README.md
@@ -451,17 +451,24 @@ Performance benchmarks on a 2015 MacBook Pro (M: million, K: thousand):
| Function | Routing | Number of cells | Run time |
| ----------------------- | ------- | ------------------------ | -------- |
-| `flowdir` | D8 | 36M | 1.09 [s] |
-| `flowdir` | DINF | 36M | 6.64 [s] |
-| `accumulation` | D8 | 36M | 3.65 [s] |
-| `accumulation` | DINF | 36M | 16.2 [s] |
-| `catchment` | D8 | 9.76M | 3.43 [s] |
-| `catchment` | DINF | 9.76M | 5.41 [s] |
-| `distance_to_outlet` | D8 | 9.76M | 4.74 [s] |
-| `distance_to_outlet` | DINF | 9.76M | 1 [m] 13 [s] |
-| `distance_to_ridge` | D8 | 36M | 6.83 [s] |
-| `hand` | D8 | 36M total, 730K channel | 12.9 [s] |
-| `hand` | DINF | 36M total, 770K channel | 18.7 [s] |
+| `flowdir` | D8 | 36M | 1.14 [s] |
+| `flowdir` | DINF | 36M | 7.01 [s] |
+| `flowdir` | MFD | 36M | 4.21 [s] |
+| `accumulation` | D8 | 36M | 3.44 [s] |
+| `accumulation` | DINF | 36M | 14.9 [s] |
+| `accumulation` | MFD | 36M | 32.5 [s] |
+| `catchment` | D8 | 9.76M | 2.19 [s] |
+| `catchment` | DINF | 9.76M | 3.5 [s] |
+| `catchment` | MFD | 9.76M | 17.1 [s] |
+| `distance_to_outlet` | D8 | 9.76M | 2.98 [s] |
+| `distance_to_outlet` | DINF | 9.76M | 5.49 [s] |
+| `distance_to_outlet` | MFD | 9.76M | 13.1 [s] |
+| `distance_to_ridge` | D8 | 36M | 4.53 [s] |
+| `distance_to_ridge` | DINF | 36M | 14.5 [s] |
+| `distance_to_ridge` | MFD | 36M | 31.3 [s] |
+| `hand` | D8 | 36M total, 730K channel | 12.3 [s] |
+| `hand` | DINF | 36M total, 770K channel | 15.8 [s] |
+| `hand` | MFD | 36M total, 770K channel | 29.8 [s] |
| `stream_order` | D8 | 36M total, 1M channel | 3.99 [s] |
| `extract_river_network` | D8 | 36M total, 345K channel | 4.07 [s] |
| `detect_pits` | N/A | 36M | 1.80 [s] |
@@ -471,10 +478,13 @@ Performance benchmarks on a 2015 MacBook Pro (M: million, K: thousand):
| `resolve_flats` | N/A | 36M | 9.56 [s] |
| `cell_dh` | D8 | 36M | 2.34 [s] |
| `cell_dh` | DINF | 36M | 4.92 [s] |
+| `cell_dh` | MFD | 36M | 30.1 [s] |
| `cell_distances` | D8 | 36M | 1.11 [s] |
| `cell_distances` | DINF | 36M | 2.16 [s] |
+| `cell_distances` | MFD | 36M | 26.8 [s] |
| `cell_slopes` | D8 | 36M | 4.01 [s] |
| `cell_slopes` | DINF | 36M | 10.2 [s] |
+| `cell_slopes` | MFD | 36M | 58.7 [s] |
Speed tests were run on a conditioned DEM from the HYDROSHEDS DEM repository
(linked above as `elevation.tiff`).
diff --git a/docs/flow-directions.md b/docs/flow-directions.md
index 0aca37f..66afeeb 100644
--- a/docs/flow-directions.md
+++ b/docs/flow-directions.md
@@ -135,7 +135,121 @@ Raster([[ nan, nan, nan, ..., nan, nan, nan],
Note that each entry takes a value between 0 and 2π, with `np.nan` representing unknown flow directions.
-Note that you must also specify `routing=dinf` when using `grid.catchment` or `grid.accumulation` with a D-infinity output grid.
+Note that you must also specify `routing='dinf'` when using other functions that use the flow direction grid such as `grid.catchment` or `grid.accumulation`.
+
+## Multiple flow directions (MFD)
+
+The multiple flow direction (MFD) routing scheme partitions flow from each cell to among as many as eight of its neighbors. The proportion of the cell that flows to each of its neighbors is proportional to the height gradient between the neighboring cells.
+
+MFD routing can be selected by using the keyword argument `routing='mfd'`.
+
+```python
+fdir = grid.flowdir(inflated_dem, routing='mfd')
+```
+
+
+Output...
+
+
+
+fdir
+
+MultiRaster([[[0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0.37428797, 0.41595555, ..., 0. ,
+ 0. , 0. ],
+ [0. , 0.35360402, 0.42297009, ..., 0.06924557,
+ 0. , 0. ],
+ ...,
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ]],
+
+ [[0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0.36288963, 0.33088875, ..., 0.06863035,
+ 0. , 0. ],
+ [0. , 0.40169546, 0.36123674, ..., 0.23938736,
+ 0.17013502, 0. ],
+ ...,
+ [0. , 0.00850506, 0.10102002, ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0.04147018, ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ]],
+
+ [[0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0.14276847, 0.06932945, ..., 0.48528137,
+ 0.39806072, 0. ],
+ [0. , 0.1217316 , 0.06042334, ..., 0.4193337 ,
+ 0.48612365, 0. ],
+ ...,
+ [0. , 0.1683329 , 0.28176027, ..., 0. ,
+ 0. , 0. ],
+ [0. , 0.13663963, 0.24437534, ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ]],
+
+ ...,
+
+ [[0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ ...,
+ [0. , 0.20829874, 0.04770285, ..., 0.29010027,
+ 0.31952507, 0. ],
+ [0. , 0.20128372, 0.11750307, ..., 0.23404662,
+ 0.28716789, 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ]],
+
+ [[0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ ...,
+ [0. , 0. , 0. , ..., 0.03460397,
+ 0.06389793, 0. ],
+ [0. , 0.0151827 , 0. , ..., 0. ,
+ 0.06675575, 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ]],
+
+ [[0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0.12005394, 0.18382625, ..., 0. ,
+ 0. , 0. ],
+ [0. , 0.12296892, 0.15536983, ..., 0. ,
+ 0. , 0. ],
+ ...,
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ],
+ [0. , 0. , 0. , ..., 0. ,
+ 0. , 0. ]]])
+
+
+
+
+
+
+
+Note that if the original DEM is of shape `(m, n)`, then the returned flow direction grid will be of shape `(8, m, n)`. The first dimension corresponds to the eight flow directions, with index 0 corresponding to North, index 1 corresponding to Northeast, index 2 corresponding to East, and so on until index 7 corresponding to Northwest. The value of a given array element (k, i, j) represents the proportion of flow that is transferred from cell (i, j) to the neighboring cell k (where k ∈ [0, 7]).
+
+Note that you must also specify `routing='mfd'` when using other functions that use the flow direction grid such as `grid.catchment` or `grid.accumulation`.
+
## Effect of map projections on routing
diff --git a/docs/index.md b/docs/index.md
index 7b8d14a..5db33ca 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -455,17 +455,24 @@ Performance benchmarks on a 2015 MacBook Pro (M: million, K: thousand):
| Function | Routing | Number of cells | Run time |
| ----------------------- | ------- | ------------------------ | -------- |
-| `flowdir` | D8 | 36M | 1.09 [s] |
-| `flowdir` | DINF | 36M | 6.64 [s] |
-| `accumulation` | D8 | 36M | 3.65 [s] |
-| `accumulation` | DINF | 36M | 16.2 [s] |
-| `catchment` | D8 | 9.76M | 3.43 [s] |
-| `catchment` | DINF | 9.76M | 5.41 [s] |
-| `distance_to_outlet` | D8 | 9.76M | 4.74 [s] |
-| `distance_to_outlet` | DINF | 9.76M | 1 [m] 13 [s] |
-| `distance_to_ridge` | D8 | 36M | 6.83 [s] |
-| `hand` | D8 | 36M total, 730K channel | 12.9 [s] |
-| `hand` | DINF | 36M total, 770K channel | 18.7 [s] |
+| `flowdir` | D8 | 36M | 1.14 [s] |
+| `flowdir` | DINF | 36M | 7.01 [s] |
+| `flowdir` | MFD | 36M | 4.21 [s] |
+| `accumulation` | D8 | 36M | 3.44 [s] |
+| `accumulation` | DINF | 36M | 14.9 [s] |
+| `accumulation` | MFD | 36M | 32.5 [s] |
+| `catchment` | D8 | 9.76M | 2.19 [s] |
+| `catchment` | DINF | 9.76M | 3.5 [s] |
+| `catchment` | MFD | 9.76M | 17.1 [s] |
+| `distance_to_outlet` | D8 | 9.76M | 2.98 [s] |
+| `distance_to_outlet` | DINF | 9.76M | 5.49 [s] |
+| `distance_to_outlet` | MFD | 9.76M | 13.1 [s] |
+| `distance_to_ridge` | D8 | 36M | 4.53 [s] |
+| `distance_to_ridge` | DINF | 36M | 14.5 [s] |
+| `distance_to_ridge` | MFD | 36M | 31.3 [s] |
+| `hand` | D8 | 36M total, 730K channel | 12.3 [s] |
+| `hand` | DINF | 36M total, 770K channel | 15.8 [s] |
+| `hand` | MFD | 36M total, 770K channel | 29.8 [s] |
| `stream_order` | D8 | 36M total, 1M channel | 3.99 [s] |
| `extract_river_network` | D8 | 36M total, 345K channel | 4.07 [s] |
| `detect_pits` | N/A | 36M | 1.80 [s] |
@@ -475,10 +482,13 @@ Performance benchmarks on a 2015 MacBook Pro (M: million, K: thousand):
| `resolve_flats` | N/A | 36M | 9.56 [s] |
| `cell_dh` | D8 | 36M | 2.34 [s] |
| `cell_dh` | DINF | 36M | 4.92 [s] |
+| `cell_dh` | MFD | 36M | 30.1 [s] |
| `cell_distances` | D8 | 36M | 1.11 [s] |
| `cell_distances` | DINF | 36M | 2.16 [s] |
+| `cell_distances` | MFD | 36M | 26.8 [s] |
| `cell_slopes` | D8 | 36M | 4.01 [s] |
| `cell_slopes` | DINF | 36M | 10.2 [s] |
+| `cell_slopes` | MFD | 36M | 58.7 [s] |
Speed tests were run on a conditioned DEM from the HYDROSHEDS DEM repository
(linked above as `elevation.tiff`).
diff --git a/pysheds/__init__.py b/pysheds/__init__.py
index 260c070..f9aa3e1 100644
--- a/pysheds/__init__.py
+++ b/pysheds/__init__.py
@@ -1 +1 @@
-__version__ = "0.3.1"
+__version__ = "0.3.2"
diff --git a/pysheds/_sgrid.py b/pysheds/_sgrid.py
index b60c5dc..1fe424f 100644
--- a/pysheds/_sgrid.py
+++ b/pysheds/_sgrid.py
@@ -1,3 +1,4 @@
+from heapq import heappop, heappush
import numpy as np
from numba import njit, prange
from numba.types import float64, int64, uint32, uint16, uint8, boolean, UniTuple, Tuple, List, void
@@ -252,6 +253,35 @@ def _angle_to_d8_numba(angles, dirmap, nodata_cells):
props_1.flat[i] = prop_1
return fdirs_0, fdirs_1, props_0, props_1
+@njit(float64[:,:,:](float64[:,:], float64, float64, boolean[:,:], float64, int64),
+ parallel=True,
+ cache=True)
+def _mfd_flowdir_numba(dem, dx, dy, nodata_cells, nodata_out, p=1):
+ m, n = dem.shape
+ fdir = np.zeros((8, m, n), dtype=np.float64)
+ row_offsets = np.array([-1, -1, 0, 1, 1, 1, 0, -1])
+ col_offsets = np.array([0, 1, 1, 1, 0, -1, -1, -1])
+ dd = np.sqrt(dx**2 + dy**2)
+ distances = np.array([dy, dd, dx, dd, dy, dd, dx, dd])
+ for i in prange(1, m - 1):
+ for j in prange(1, n - 1):
+ if nodata_cells[i, j]:
+ fdir[:, i, j] = nodata_out
+ else:
+ elev = dem[i, j]
+ den = 0.
+ for k in range(8):
+ row_offset = row_offsets[k]
+ col_offset = col_offsets[k]
+ distance = distances[k]
+ num = (elev - dem[i + row_offset, j + col_offset])**p / distance
+ if num > 0:
+ fdir[k, i, j] = num
+ den += num
+ if den > 0:
+ fdir[:, i, j] /= den
+ return fdir
+
# Functions for 'catchment'
@njit(void(int64, boolean[:,:], int64[:,:], int64[:], int64[:]),
@@ -373,6 +403,37 @@ def _dinf_catchment_iter_numba(fdir_0, fdir_1, pour_point, dirmap):
queue.append(neighbor)
return catch
+@njit(boolean[:,:](float64[:,:,:], UniTuple(int64, 2)),
+ cache=True)
+def _mfd_catchment_iter_numba(fdir, pour_point):
+ _, m, n = fdir.shape
+ mn = m * n
+ catch = np.zeros((m, n), dtype=np.bool8)
+ i, j = pour_point
+ ix = (i * n) + j
+ offsets = np.array([-n, 1 - n, 1,
+ 1 + n, n, - 1 + n,
+ - 1, - 1 - n])
+ r_dirmap = np.array([4, 5, 6, 7,
+ 0, 1, 2, 3])
+ queue = [ix]
+ while queue:
+ parent = queue.pop()
+ catch.flat[parent] = True
+ neighbors = offsets + parent
+ for k in range(8):
+ neighbor = neighbors[k]
+ neighbor_dir = r_dirmap[k]
+ visited = catch.flat[neighbor]
+ if visited:
+ continue
+ else:
+ kix = neighbor + (neighbor_dir * mn)
+ points_to = fdir.flat[kix] > 0.
+ if points_to:
+ queue.append(neighbor)
+ return catch
+
# Functions for 'accumulation'
@njit(void(int64, int64, float64[:,:], int64[:,:], uint8[:]),
@@ -582,6 +643,69 @@ def _dinf_accumulation_eff_iter_numba(acc, fdir_0, fdir_1, indegree, startnodes,
queue.append(endnode_1)
return acc
+@njit(uint8[:](int64[:,:,:]), parallel=True)
+def _mfd_bincount(fdir):
+ p, m, n = fdir.shape
+ mn = m * n
+ out = np.zeros(mn, dtype=np.uint8)
+ for i in range(p):
+ fdir_i = fdir[i]
+ for j in prange(mn):
+ endnode = fdir_i.flat[j]
+ if endnode != j:
+ out[endnode] += 1
+ return out
+
+@njit(float64[:,:](float64[:,:], int64[:,:,:], float64[:,:,:], uint8[:], int64[:]),
+ cache=True)
+def _mfd_accumulation_iter_numba(acc, fdir, props, indegree, startnodes):
+ n = startnodes.size
+ queue = [0]
+ _ = queue.pop()
+ for k in range(n):
+ startnode = startnodes.flat[k]
+ queue.append(startnode)
+ while queue:
+ startnode = queue.pop()
+ for i in range(8):
+ fdir_i = fdir[i]
+ props_i = props[i]
+ endnode = fdir_i.flat[startnode]
+ if endnode == startnode:
+ continue
+ else:
+ prop = props_i.flat[startnode]
+ acc.flat[endnode] += (prop * acc.flat[startnode])
+ indegree.flat[endnode] -= 1
+ if (indegree.flat[endnode] == 0):
+ queue.append(endnode)
+ return acc
+
+@njit(float64[:,:](float64[:,:], int64[:,:,:], float64[:,:,:], uint8[:], int64[:], float64[:,:]),
+ cache=True)
+def _mfd_accumulation_eff_iter_numba(acc, fdir, props, indegree, startnodes, eff):
+ n = startnodes.size
+ queue = [0]
+ _ = queue.pop()
+ for k in range(n):
+ startnode = startnodes.flat[k]
+ queue.append(startnode)
+ while queue:
+ startnode = queue.pop()
+ for i in range(8):
+ fdir_i = fdir[i]
+ props_i = props[i]
+ endnode = fdir_i.flat[startnode]
+ if endnode == startnode:
+ continue
+ else:
+ prop = props_i.flat[startnode]
+ acc.flat[endnode] += (prop * acc.flat[startnode] * eff.flat[startnode])
+ indegree.flat[endnode] -= 1
+ if (indegree.flat[endnode] == 0):
+ queue.append(endnode)
+ return acc
+
# Functions for 'flow_distance'
@njit(void(int64, int64[:,:], boolean[:,:], float64[:,:], float64[:,:],
@@ -702,6 +826,7 @@ def _dinf_flow_distance_recur_numba(fdir_0, fdir_1, weights_0, weights_1,
def _dinf_flow_distance_iter_numba(fdir_0, fdir_1, weights_0, weights_1,
pour_point, dirmap):
dist = np.full(fdir_0.shape, np.inf, dtype=np.float64)
+ visited = np.zeros(fdir_0.shape, dtype=np.bool8)
r_dirmap = np.array([dirmap[4], dirmap[5], dirmap[6],
dirmap[7], dirmap[0], dirmap[1],
dirmap[2], dirmap[3]])
@@ -712,74 +837,168 @@ def _dinf_flow_distance_iter_numba(fdir_0, fdir_1, weights_0, weights_1,
i, j = pour_point
ix = (i * n) + j
dist.flat[ix] = 0.
- queue = [ix]
+ queue = [(0., ix)]
while queue:
- parent = queue.pop()
- parent_dist = dist.flat[parent]
+ parent_dist, parent = heappop(queue)
+ visited.flat[parent] = True
neighbors = offsets + parent
for k in range(8):
neighbor = neighbors[k]
- current_neighbor_dist = dist.flat[neighbor]
- points_to_0 = (fdir_0.flat[neighbor] == r_dirmap[k])
- points_to_1 = (fdir_1.flat[neighbor] == r_dirmap[k])
- if points_to_0:
- neighbor_dist_0 = parent_dist + weights_0.flat[neighbor]
- if (neighbor_dist_0 < current_neighbor_dist):
- dist.flat[neighbor] = neighbor_dist_0
- queue.append(neighbor)
- elif points_to_1:
- neighbor_dist_1 = parent_dist + weights_1.flat[neighbor]
- if (neighbor_dist_1 < current_neighbor_dist):
- dist.flat[neighbor] = neighbor_dist_1
- queue.append(neighbor)
+ if visited.flat[neighbor]:
+ continue
+ else:
+ current_neighbor_dist = dist.flat[neighbor]
+ points_to_0 = (fdir_0.flat[neighbor] == r_dirmap[k])
+ points_to_1 = (fdir_1.flat[neighbor] == r_dirmap[k])
+ if points_to_0:
+ neighbor_dist_0 = parent_dist + weights_0.flat[neighbor]
+ if (neighbor_dist_0 < current_neighbor_dist):
+ dist.flat[neighbor] = neighbor_dist_0
+ heappush(queue, (neighbor_dist_0, neighbor))
+ elif points_to_1:
+ neighbor_dist_1 = parent_dist + weights_1.flat[neighbor]
+ if (neighbor_dist_1 < current_neighbor_dist):
+ dist.flat[neighbor] = neighbor_dist_1
+ heappush(queue, (neighbor_dist_1, neighbor))
return dist
-@njit(void(int64, int64, int64[:,:], int64[:,:], float64[:,:],
- int64[:,:], uint8[:], float64[:,:]),
+# TODO: Weights should actually by (8, m, n)
+# neighbor_dist = parent_dist + weights.flat[kix]
+@njit(float64[:,:](float64[:,:,:], UniTuple(int64, 2), float64[:,:]),
cache=True)
-def _d8_reverse_distance_recursion(startnode, endnode, min_order, max_order,
- rdist, fdir, indegree, weights):
- min_order.flat[endnode] = min(min_order.flat[endnode], rdist.flat[startnode])
- max_order.flat[endnode] = max(max_order.flat[endnode], rdist.flat[startnode])
+def _mfd_flow_distance_iter_numba(fdir, pour_point, weights):
+ _, m, n = fdir.shape
+ mn = m * n
+ dist = np.full((m, n), np.inf, dtype=np.float64)
+ visited = np.zeros((m, n), dtype=np.bool8)
+ i, j = pour_point
+ ix = (i * n) + j
+ offsets = np.array([-n, 1 - n, 1,
+ 1 + n, n, - 1 + n,
+ - 1, - 1 - n])
+ r_dirmap = np.array([4, 5, 6, 7,
+ 0, 1, 2, 3])
+ dist.flat[ix] = 0.
+ queue = [(0., ix)]
+ while queue:
+ parent_dist, parent = heappop(queue)
+ visited.flat[parent] = True
+ neighbors = offsets + parent
+ for k in range(8):
+ neighbor = neighbors[k]
+ if visited.flat[neighbor]:
+ continue
+ else:
+ neighbor_dir = r_dirmap[k]
+ current_neighbor_dist = dist.flat[neighbor]
+ kix = neighbor + (neighbor_dir * mn)
+ points_to = fdir.flat[kix] > 0.
+ if points_to:
+ neighbor_dist = parent_dist + weights.flat[neighbor]
+ if (neighbor_dist < current_neighbor_dist):
+ dist.flat[neighbor] = neighbor_dist
+ heappush(queue, (neighbor_dist, neighbor))
+ return dist
+
+# Functions for 'reverse_flow_distance'
+
+@njit(void(int64, int64, float64[:,:], int64[:,:], uint8[:], float64[:,:]),
+ cache=True)
+def _d8_reverse_distance_recursion(startnode, endnode, rdist, fdir, indegree,
+ weights):
+ rdist.flat[endnode] = max(rdist.flat[endnode],
+ rdist.flat[startnode] + weights.flat[endnode])
indegree.flat[endnode] -= 1
if indegree.flat[endnode] == 0:
- rdist.flat[endnode] = max_order.flat[endnode] + weights.flat[endnode]
new_startnode = endnode
new_endnode = fdir.flat[new_startnode]
- _d8_reverse_distance_recursion(new_startnode, new_endnode, min_order,
- max_order, rdist, fdir, indegree, weights)
+ _d8_reverse_distance_recursion(new_startnode, new_endnode, rdist, fdir,
+ indegree, weights)
-@njit(float64[:,:](int64[:,:], int64[:,:], float64[:,:], int64[:,:],
+@njit(float64[:,:](float64[:,:], int64[:,:],
uint8[:], int64[:], float64[:,:]),
cache=True)
-def _d8_reverse_distance_recur_numba(min_order, max_order, rdist, fdir,
+def _d8_reverse_distance_recur_numba(rdist, fdir,
indegree, startnodes, weights):
n = startnodes.size
for k in range(n):
startnode = startnodes.flat[k]
endnode = fdir.flat[startnode]
- _d8_reverse_distance_recursion(startnode, endnode, min_order, max_order,
- rdist, fdir, indegree, weights)
+ _d8_reverse_distance_recursion(startnode, endnode, rdist, fdir,
+ indegree, weights)
return rdist
-@njit(float64[:,:](int64[:,:], int64[:,:], float64[:,:], int64[:,:],
+@njit(float64[:,:](float64[:,:], int64[:,:],
uint8[:], int64[:], float64[:,:]),
cache=True)
-def _d8_reverse_distance_iter_numba(min_order, max_order, rdist, fdir,
- indegree, startnodes, weights):
+def _d8_reverse_distance_iter_numba(rdist, fdir, indegree, startnodes, weights):
n = startnodes.size
for k in range(n):
startnode = startnodes.flat[k]
endnode = fdir.flat[startnode]
while(indegree.flat[startnode] == 0):
- min_order.flat[endnode] = min(min_order.flat[endnode], rdist.flat[startnode])
- max_order.flat[endnode] = max(max_order.flat[endnode], rdist.flat[startnode])
+ rdist.flat[endnode] = max(rdist.flat[endnode],
+ rdist.flat[startnode] + weights.flat[endnode])
indegree.flat[endnode] -= 1
- rdist.flat[endnode] = max_order.flat[endnode] + weights.flat[endnode]
startnode = endnode
endnode = fdir.flat[startnode]
return rdist
+# TODO: This should probably have two weights vectors
+@njit(float64[:,:](float64[:,:], int64[:,:], int64[:,:],
+ uint8[:], int64[:], float64[:,:]),
+ cache=True)
+def _dinf_reverse_distance_iter_numba(rdist, fdir_0, fdir_1,
+ indegree, startnodes, weights):
+ n = startnodes.size
+ queue = [0]
+ _ = queue.pop()
+ for k in range(n):
+ startnode = startnodes.flat[k]
+ queue.append(startnode)
+ while queue:
+ startnode = queue.pop()
+ endnode_0 = fdir_0.flat[startnode]
+ endnode_1 = fdir_1.flat[startnode]
+ rdist.flat[endnode_0] = max(rdist.flat[endnode_0],
+ rdist.flat[startnode] + weights.flat[endnode_0])
+ rdist.flat[endnode_1] = max(rdist.flat[endnode_1],
+ rdist.flat[startnode] + weights.flat[endnode_1])
+ indegree.flat[endnode_0] -= 1
+ indegree.flat[endnode_1] -= 1
+ if (indegree.flat[endnode_0] == 0):
+ queue.append(endnode_0)
+ if (indegree.flat[endnode_1] == 0):
+ # Account for cases where both fdirs point in same direction
+ if (endnode_0 != endnode_1):
+ queue.append(endnode_1)
+ return rdist
+
+@njit(float64[:,:](float64[:,:], int64[:,:,:], uint8[:], int64[:], float64[:,:]),
+ cache=True)
+def _mfd_reverse_distance_iter_numba(rdist, fdir, indegree, startnodes, weights):
+ n = startnodes.size
+ queue = [0]
+ _ = queue.pop()
+ for k in range(n):
+ startnode = startnodes.flat[k]
+ queue.append(startnode)
+ while queue:
+ startnode = queue.pop()
+ for i in range(8):
+ fdir_i = fdir[i]
+ endnode = fdir_i.flat[startnode]
+ if endnode == startnode:
+ continue
+ else:
+ weight = weights.flat[startnode]
+ rdist.flat[endnode] = max(rdist.flat[endnode],
+ rdist.flat[startnode] + weights.flat[endnode])
+ indegree.flat[endnode] -= 1
+ if (indegree.flat[endnode] == 0):
+ queue.append(endnode)
+ return rdist
+
# Functions for 'resolve_flats'
@njit(UniTuple(boolean[:,:], 3)(float64[:,:], int64[:]),
@@ -1091,6 +1310,46 @@ def _dinf_hand_recur_numba(fdir_0, fdir_1, mask, dirmap):
_dinf_hand_recursion(parent, parent, hand, offsets, r_dirmap, fdir_0, fdir_1)
return hand
+@njit(int64[:,:](float64[:,:,:], boolean[:,:]),
+ cache=True)
+def _mfd_hand_iter_numba(fdir, mask):
+ _, m, n = fdir.shape
+ mn = m * n
+ offsets = np.array([-n, 1 - n, 1,
+ 1 + n, n, - 1 + n,
+ - 1, - 1 - n])
+ r_dirmap = np.array([4, 5, 6, 7,
+ 0, 1, 2, 3])
+ hand = -np.ones((m, n), dtype=np.int64)
+ cur_queue = []
+ next_queue = []
+ for i in range(hand.size):
+ if mask.flat[i]:
+ hand.flat[i] = i
+ cur_queue.append(i)
+ while True:
+ if not cur_queue:
+ break
+ while cur_queue:
+ k = cur_queue.pop()
+ neighbors = offsets + k
+ for j in range(8):
+ neighbor = neighbors[j]
+ visited = (hand.flat[neighbor] >= 0)
+ if visited:
+ continue
+ else:
+ neighbor_dir = r_dirmap[j]
+ kix = neighbor + (neighbor_dir * mn)
+ points_to = fdir.flat[kix] > 0.
+ if points_to:
+ hand.flat[neighbor] = hand.flat[k]
+ next_queue.append(neighbor)
+ while next_queue:
+ next_cell = next_queue.pop()
+ cur_queue.append(next_cell)
+ return hand
+
@njit(float64[:,:](int64[:,:], float64[:,:], float64),
parallel=True,
cache=True)
@@ -1207,7 +1466,9 @@ def _d8_stream_network_iter_numba(fdir, indegree, orig_indegree, startnodes):
endnode = fdir.flat[startnode]
return profiles
-@njit(parallel=True)
+@njit(float64[:,:](int64[:,:], int64[:,:], float64[:,:]),
+ parallel=True,
+ cache=True)
def _d8_cell_dh_numba(startnodes, endnodes, dem):
n = startnodes.size
dh = np.zeros_like(dem)
@@ -1217,7 +1478,9 @@ def _d8_cell_dh_numba(startnodes, endnodes, dem):
dh.flat[k] = dem.flat[startnode] - dem.flat[endnode]
return dh
-@njit(parallel=True)
+@njit(float64[:,:](int64[:,:], int64[:,:], int64[:,:], float64[:,:], float64[:,:], float64[:,:]),
+ parallel=True,
+ cache=True)
def _dinf_cell_dh_numba(startnodes, endnodes_0, endnodes_1, props_0, props_1, dem):
n = startnodes.size
dh = np.zeros(dem.shape, dtype=np.float64)
@@ -1231,7 +1494,28 @@ def _dinf_cell_dh_numba(startnodes, endnodes_0, endnodes_1, props_0, props_1, de
prop_1 * (dem.flat[startnode] - dem.flat[endnode_1]))
return dh
-@njit(parallel=True)
+@njit(float64[:,:](int64[:,:], int64[:,:,:], float64[:,:,:], float64[:,:]),
+ parallel=True,
+ cache=True)
+def _mfd_cell_dh_numba(startnodes, endnodes, props, dem):
+ k, m, n = props.shape
+ mn = m * n
+ N = startnodes.size
+ dh = np.zeros((m, n), dtype=np.float64)
+ for i in prange(N):
+ startnode = startnodes.flat[i]
+ elev = dem.flat[startnode]
+ for j in prange(k):
+ kix = startnode + (j * mn)
+ endnode = endnodes.flat[kix]
+ prop = props.flat[kix]
+ neighbor_elev = dem.flat[endnode]
+ dh.flat[startnode] += prop * (elev - neighbor_elev)
+ return dh
+
+@njit(float64[:,:](int64[:,:], UniTuple(int64, 8), float64, float64),
+ parallel=True,
+ cache=True)
def _d8_cell_distances_numba(fdir, dirmap, dx, dy):
n = fdir.size
cdist = np.zeros(fdir.shape, dtype=np.float64)
@@ -1245,7 +1529,10 @@ def _d8_cell_distances_numba(fdir, dirmap, dx, dy):
cdist.flat[k] = dist_map[fdir_k]
return cdist
-@njit(parallel=True)
+@njit(float64[:,:](int64[:,:], int64[:,:], float64[:,:], float64[:,:], UniTuple(int64, 8),
+ float64, float64),
+ parallel=True,
+ cache=True)
def _dinf_cell_distances_numba(fdir_0, fdir_1, prop_0, prop_1, dirmap, dx, dy):
n = fdir_0.size
cdist = np.zeros(fdir_0.shape, dtype=np.float64)
@@ -1265,7 +1552,28 @@ def _dinf_cell_distances_numba(fdir_0, fdir_1, prop_0, prop_1, dirmap, dx, dy):
cdist.flat[k] = dist_k
return cdist
-@njit(parallel=True)
+@njit(float64[:,:](int64[:,:], int64[:,:,:], float64[:,:,:], float64, float64),
+ parallel=True,
+ cache=True)
+def _mfd_cell_distances_numba(startnodes, endnodes, props, dx, dy):
+ k, m, n = props.shape
+ mn = m * n
+ N = startnodes.size
+ dd = np.sqrt(dx**2 + dy**2)
+ distances = (dy, dd, dx, dd, dy, dd, dx, dd)
+ cdist = np.zeros((m, n), dtype=np.float64)
+ for i in prange(N):
+ startnode = startnodes.flat[i]
+ for j in prange(k):
+ kix = startnode + (j * mn)
+ endnode = endnodes.flat[kix]
+ prop = props.flat[kix]
+ cdist.flat[startnode] += prop * distances[j]
+ return cdist
+
+@njit(float64[:,:](float64[:,:], float64[:,:]),
+ parallel=True,
+ cache=True)
def _cell_slopes_numba(dh, cdist):
n = dh.size
slopes = np.zeros(dh.shape, dtype=np.float64)
@@ -1365,6 +1673,8 @@ def _flatten_fdir_numba(fdir, dirmap):
on_bottom = (k > (n - c - 1))
on_boundary = (on_left | on_right | on_top | on_bottom)
if on_boundary:
+ # TODO: This seems like it could cause errors at corner points
+ # TODO: Check if offset is already zero
if on_left:
offset = left_map[cell_dir]
if on_right:
@@ -1403,6 +1713,57 @@ def _flatten_fdir_no_boundary(fdir, dirmap):
flat_fdir.flat[k] = k + offset
return flat_fdir
+# TODO: Assumes pits and flats are removed
+@njit(int64[:,:,:](float64[:,:,:]),
+ parallel=True,
+ cache=True)
+def _flatten_mfd_fdir_numba(fdir):
+ p, r, c = fdir.shape
+ n = r * c
+ flat_fdir = np.zeros((p, r, c), dtype=np.int64)
+ offsets = np.array([0 - c, 1 - c, 1 + 0, 1 + c,
+ 0 + c, -1 + c, -1 + 0, -1 - c],
+ dtype=np.int64)
+ left_map = np.array([offsets[0], offsets[1], offsets[2], offsets[3],
+ offsets[4], 0, 0, 0],
+ dtype=np.int64)
+ right_map = np.array([offsets[0], 0, 0, 0,
+ offsets[4], offsets[5], offsets[6], offsets[7]],
+ dtype=np.int64)
+ top_map = np.array([0, 0, offsets[2], offsets[3],
+ offsets[4], offsets[5], offsets[6], 0],
+ dtype=np.int64)
+ bottom_map = np.array([offsets[0], offsets[1], offsets[2], 0,
+ 0, 0, offsets[6], offsets[7]],
+ dtype=np.int64)
+ for i in prange(8):
+ for k in prange(n):
+ kix = k + (i * n)
+ cell_value = fdir.flat[kix]
+ if cell_value == 0:
+ offset = 0
+ else:
+ on_left = ((k % c) == 0)
+ on_right = (((k + 1) % c) == 0)
+ on_top = (k < c)
+ on_bottom = (k > (n - c - 1))
+ on_boundary = (on_left | on_right | on_top | on_bottom)
+ if on_boundary:
+ # TODO: This seems like it could cause errors at corner points
+ # TODO: Check if offset is already zero
+ if on_left:
+ offset = left_map[i]
+ if on_right and (offset != 0):
+ offset = right_map[i]
+ if on_top and (offset != 0):
+ offset = top_map[i]
+ if on_bottom and (offset != 0):
+ offset = bottom_map[i]
+ else:
+ offset = offsets[i]
+ flat_fdir.flat[kix] = k + offset
+ return flat_fdir
+
@njit
def _construct_matching(fdir, dirmap):
n = fdir.size
diff --git a/pysheds/_sview.py b/pysheds/_sview.py
index 0ff1a5f..c667ad3 100644
--- a/pysheds/_sview.py
+++ b/pysheds/_sview.py
@@ -3,17 +3,19 @@
from numba.types import float64, UniTuple
@njit(parallel=True)
-def _view_fill_numba(data, out, y_ix, x_ix, y_passed, x_passed):
+def _view_fill_numba(data, out, y_ix, x_ix, y_passed, x_passed, nodata):
n = x_ix.size
m = y_ix.size
for i in prange(m):
for j in prange(n):
if (y_passed[i]) & (x_passed[j]):
out[i, j] = data[y_ix[i], x_ix[j]]
+ else:
+ out[i, j] = nodata
return out
@njit(parallel=True)
-def _view_fill_by_axes_nearest_numba(data, out, y_ix, x_ix):
+def _view_fill_by_axes_nearest_numba(data, out, y_ix, x_ix, nodata):
m, n = y_ix.size, x_ix.size
M, N = data.shape
# Currently need to use inplace form of round
@@ -27,10 +29,12 @@ def _view_fill_by_axes_nearest_numba(data, out, y_ix, x_ix):
for j in prange(n):
if (y_in_bounds[i]) & (x_in_bounds[j]):
out[i, j] = data[y_near[i], x_near[j]]
+ else:
+ out[i, j] = nodata
return out
@njit(parallel=True)
-def _view_fill_by_axes_linear_numba(data, out, y_ix, x_ix):
+def _view_fill_by_axes_linear_numba(data, out, y_ix, x_ix, nodata):
m, n = y_ix.size, x_ix.size
M, N = data.shape
# Find which cells are in bounds
@@ -63,10 +67,12 @@ def _view_fill_by_axes_linear_numba(data, out, y_ix, x_ix):
+ ( ( 1 - tx[j] ) * ty[i] * ll )
+ ( tx[j] * ty[i] * lr ) )
out[i, j] = value
+ else:
+ out[i, j] = nodata
return out
@njit(parallel=True)
-def _view_fill_by_entries_nearest_numba(data, out, y_ix, x_ix):
+def _view_fill_by_entries_nearest_numba(data, out, y_ix, x_ix, nodata):
m, n = y_ix.size, x_ix.size
M, N = data.shape
# Currently need to use inplace form of round
@@ -81,10 +87,12 @@ def _view_fill_by_entries_nearest_numba(data, out, y_ix, x_ix):
for i in prange(n):
if (y_in_bounds[i]) & (x_in_bounds[i]):
out.flat[i] = data[y_near[i], x_near[i]]
+ else:
+ out.flat[i] = nodata
return out
@njit(parallel=True)
-def _view_fill_by_entries_linear_numba(data, out, y_ix, x_ix):
+def _view_fill_by_entries_linear_numba(data, out, y_ix, x_ix, nodata):
m, n = y_ix.size, x_ix.size
M, N = data.shape
# Find which cells are in bounds
@@ -118,6 +126,8 @@ def _view_fill_by_entries_linear_numba(data, out, y_ix, x_ix):
+ ( ( 1 - tx[i] ) * ty[i] * ll )
+ ( tx[i] * ty[i] * lr ) )
out.flat[i] = value
+ else:
+ out.flat[i] = nodata
return out
@njit(UniTuple(float64[:], 2)(UniTuple(float64, 9), float64[:], float64[:]), parallel=True)
diff --git a/pysheds/sgrid.py b/pysheds/sgrid.py
index d2c2861..cf1ffc4 100644
--- a/pysheds/sgrid.py
+++ b/pysheds/sgrid.py
@@ -29,7 +29,7 @@
import pysheds.io
# Import viewing functions
-from pysheds.sview import Raster
+from pysheds.sview import Raster, MultiRaster
from pysheds.sview import View, ViewFinder
# Import numba functions
@@ -582,6 +582,7 @@ def flowdir(self, dem, routing='d8', flats=-1, pits=-2, nodata_out=None,
Routing algorithm to use:
'd8' : D8 flow directions
'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
Additional keyword arguments (**kwargs) are passed to self.view.
@@ -611,8 +612,14 @@ def flowdir(self, dem, routing='d8', flats=-1, pits=-2, nodata_out=None,
fdir = self._dinf_flowdir(dem=dem, nodata_cells=nodata_cells,
nodata_out=nodata_out, flats=flats,
pits=pits, dirmap=dirmap)
+ elif routing.lower() == 'mfd':
+ if nodata_out is None:
+ nodata_out = np.nan
+ fdir = self._mfd_flowdir(dem=dem, nodata_cells=nodata_cells,
+ nodata_out=nodata_out, flats=flats,
+ pits=pits, dirmap=dirmap)
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
fdir.metadata.update(default_metadata)
return fdir
@@ -640,6 +647,21 @@ def _dinf_flowdir(self, dem, nodata_cells, nodata_out=np.nan, flats=-1, pits=-2,
return self._output_handler(data=fdir, viewfinder=dem.viewfinder,
metadata=dem.metadata, nodata=nodata_out)
+ def _mfd_flowdir(self, dem, nodata_cells, nodata_out=np.nan, flats=-1, pits=-2,
+ dirmap=(64, 128, 1, 2, 4, 8, 16, 32)):
+ # Make sure nothing flows to the nodata cells
+ dem[nodata_cells] = dem.max() + 1
+ dx = abs(dem.affine.a)
+ dy = abs(dem.affine.e)
+ # TODO: Allow p value to be changed
+ fdir = _self._mfd_flowdir_numba(dem, dx, dy, nodata_cells,
+ nodata_out, p=1)
+ # Create new mask to match new shape
+ new_mask = np.tile(dem.viewfinder.mask, (8, 1, 1))
+ return self._output_handler(data=fdir, viewfinder=dem.viewfinder,
+ metadata=dem.metadata, nodata=nodata_out,
+ mask=new_mask)
+
def catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
nodata_out=False, xytype='coordinate', routing='d8', snap='corner',
algorithm='iterative', **kwargs):
@@ -673,6 +695,7 @@ def catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4, 8, 16
Routing algorithm to use:
'd8' : D8 flow directions
'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
snap : str
Function to use for self.nearest_cell:
'corner' : numpy.around()
@@ -695,8 +718,10 @@ def catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4, 8, 16
input_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
elif routing.lower() == 'dinf':
input_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ input_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
kwargs.update(input_overrides)
fdir = self._input_handler(fdir, **kwargs)
xmin, ymin, xmax, ymax = fdir.bbox
@@ -705,7 +730,7 @@ def catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4, 8, 16
raise ValueError('Pour point ({}, {}) is out of bounds for dataset with bbox {}.'
.format(x, y, (xmin, ymin, xmax, ymax)))
elif xytype == 'index':
- if (x < 0) or (y < 0) or (x >= fdir.shape[1]) or (y >= fdir.shape[0]):
+ if (x < 0) or (y < 0) or (x >= fdir.shape[-1]) or (y >= fdir.shape[-2]):
raise ValueError('Pour point ({}, {}) is out of bounds for dataset with shape {}.'
.format(x, y, fdir.shape))
if routing.lower() == 'd8':
@@ -716,6 +741,10 @@ def catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4, 8, 16
catch = self._dinf_catchment(x, y, fdir=fdir, pour_value=pour_value, dirmap=dirmap,
nodata_out=nodata_out, xytype=xytype, snap=snap,
algorithm=algorithm)
+ elif routing.lower() == 'mfd':
+ catch = self._mfd_catchment(x, y, fdir=fdir, pour_value=pour_value, dirmap=dirmap,
+ nodata_out=nodata_out, xytype=xytype, snap=snap,
+ algorithm=algorithm)
return catch
def _d8_catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
@@ -767,6 +796,31 @@ def _dinf_catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4,
metadata=fdir.metadata, nodata=nodata_out)
return catch
+ def _mfd_catchment(self, x, y, fdir, pour_value=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ nodata_out=False, xytype='coordinate', snap='corner',
+ algorithm='iterative'):
+ # Pad the rim
+ left, right, top, bottom = self._pop_rim(fdir, nodata=0)
+ # If xytype is 'coordinate', delineate catchment based on cell nearest
+ # to given geographic coordinate
+ if xytype in {'label', 'coordinate'}:
+ x, y = self.nearest_cell(x, y, fdir.affine, snap)
+ # Delineate the catchment
+ if algorithm.lower() == 'iterative':
+ catch = _self._mfd_catchment_iter_numba(fdir, (y, x))
+ elif algorithm.lower() == 'recursive':
+ raise NotImplementedError('Recursive algorithm not implemented.')
+ else:
+ raise ValueError('Algorithm must be `iterative` or `recursive`.')
+ if pour_value is not None:
+ catch[y, x] = pour_value
+ # Create new mask because dimension reduced
+ new_mask = fdir.mask[0]
+ catch = self._output_handler(data=catch, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=nodata_out,
+ mask=new_mask)
+ return catch
+
def accumulation(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
nodata_out=0., efficiency=None, routing='d8', cycle_size=1,
algorithm='iterative', **kwargs):
@@ -795,6 +849,7 @@ def accumulation(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
Routing algorithm to use:
'd8' : D8 flow directions
'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
cycle_size : int
Maximum length of cycles to check for in d-infinity grids. (Note
that d-infinity routing can generate cycles that will cause
@@ -816,8 +871,10 @@ def accumulation(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
elif routing.lower() == 'dinf':
fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
kwargs.update(fdir_overrides)
fdir = self._input_handler(fdir, **kwargs)
if weights is not None:
@@ -837,6 +894,10 @@ def accumulation(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
nodata_out=nodata_out,
efficiency=efficiency,
cycle_size=cycle_size, algorithm=algorithm)
+ elif routing.lower() == 'mfd':
+ acc = self._mfd_accumulation(fdir, weights=weights, dirmap=dirmap,
+ nodata_out=nodata_out,
+ efficiency=efficiency, algorithm=algorithm)
return acc
def _d8_accumulation(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
@@ -944,6 +1005,54 @@ def _dinf_accumulation(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16
metadata=fdir.metadata, nodata=nodata_out)
return acc
+ def _mfd_accumulation(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ nodata_out=0., efficiency=None, algorithm='iterative', **kwargs):
+ # Find nodata cells and invalid cells
+ nodata_cells = self._get_nodata_cells(fdir)
+ # Set nodata cells to zero
+ fdir[nodata_cells] = 0.
+ # Start and end nodes
+ startnodes = np.arange(fdir[0].size, dtype=np.int64)
+ props = fdir
+ endnodes = _self._flatten_mfd_fdir_numba(props)
+ if weights is not None:
+ acc = weights.astype(np.float64).reshape(fdir[0].shape)
+ # Otherwise, initialize accumulation array to ones where valid cells exist
+ else:
+ acc = np.ones(fdir[0].shape, dtype=np.float64)
+ acc = np.asarray(acc)
+ # If using efficiency, initialize array
+ if efficiency is not None:
+ eff = efficiency.astype(np.float64).reshape(fdir[0].shape)
+ eff = np.asarray(eff)
+ # Find indegree of all cells
+ indegree = _self._mfd_bincount(endnodes)
+ # Set starting nodes to those with no predecessors
+ startnodes = startnodes[(indegree == 0)]
+ # Compute accumulation for no efficiency case
+ if efficiency is None:
+ if algorithm.lower() == 'iterative':
+ acc = _self._mfd_accumulation_iter_numba(acc, endnodes, props,
+ indegree, startnodes)
+ elif algorithm.lower() == 'recursive':
+ raise NotImplementedError('Not Implemented.')
+ else:
+ raise ValueError('Algorithm must be `iterative` or `recursive`.')
+ # Compute accumulation for efficiency case
+ else:
+ if algorithm.lower() == 'iterative':
+ acc = _self._mfd_accumulation_eff_iter_numba(acc, endnodes, props,
+ indegree, startnodes, eff)
+ elif algorithm.lower() == 'recursive':
+ raise NotImplementedError('Not Implemented.')
+ else:
+ raise ValueError('Algorithm must be `iterative` or `recursive`.')
+ new_mask = fdir.mask[0]
+ acc = self._output_handler(data=acc, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=nodata_out,
+ mask=new_mask)
+ return acc
+
def distance_to_outlet(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
nodata_out=np.nan, routing='d8', method='shortest',
xytype='coordinate', snap='corner', algorithm='iterative', **kwargs):
@@ -972,6 +1081,7 @@ def distance_to_outlet(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4,
Routing algorithm to use:
'd8' : D8 flow directions
'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
xytype : 'coordinate' or 'index'
How to interpret parameters 'x' and 'y'.
'coordinate' : x and y represent geographic coordinates
@@ -1001,8 +1111,10 @@ def distance_to_outlet(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4,
input_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
elif routing.lower() == 'dinf':
input_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ input_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
kwargs.update(input_overrides)
fdir = self._input_handler(fdir, **kwargs)
if weights is not None:
@@ -1015,7 +1127,7 @@ def distance_to_outlet(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4,
raise ValueError('Pour point ({}, {}) is out of bounds for dataset with bbox {}.'
.format(x, y, (xmin, ymin, xmax, ymax)))
elif xytype == 'index':
- if (x < 0) or (y < 0) or (x >= fdir.shape[1]) or (y >= fdir.shape[0]):
+ if (x < 0) or (y < 0) or (x >= fdir.shape[-1]) or (y >= fdir.shape[-2]):
raise ValueError('Pour point ({}, {}) is out of bounds for dataset with shape {}.'
.format(x, y, fdir.shape))
if routing.lower() == 'd8':
@@ -1028,6 +1140,11 @@ def distance_to_outlet(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4,
dirmap=dirmap, nodata_out=nodata_out,
method=method, xytype=xytype,
snap=snap, algorithm=algorithm)
+ elif routing.lower() == 'mfd':
+ dist = self._mfd_flow_distance(x=x, y=y, fdir=fdir, weights=weights,
+ dirmap=dirmap, nodata_out=nodata_out,
+ method=method, xytype=xytype,
+ snap=snap, algorithm=algorithm)
return dist
def _d8_flow_distance(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
@@ -1041,6 +1158,7 @@ def _d8_flow_distance(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4,
fdir[invalid_cells] = 0
if xytype in {'label', 'coordinate'}:
x, y = self.nearest_cell(x, y, fdir.affine, snap)
+ # TODO: Should this be ones for all cells?
if weights is None:
weights = (~nodata_cells).reshape(fdir.shape).astype(np.float64)
if algorithm.lower() == 'iterative':
@@ -1066,6 +1184,7 @@ def _dinf_flow_distance(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4
weights_0 = weights
weights_1 = weights
else:
+ # TODO: Should this be ones for all cells?
weights_0 = (~nodata_cells).reshape(fdir.shape).astype(np.float64)
weights_1 = weights_0
if method.lower() == 'shortest':
@@ -1084,6 +1203,35 @@ def _dinf_flow_distance(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4
metadata=fdir.metadata, nodata=nodata_out)
return dist
+ def _mfd_flow_distance(self, x, y, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ nodata_out=np.nan, method='shortest', xytype='coordinate',
+ snap='corner', algorithm='iterative', **kwargs):
+ # Pad the rim
+ left, right, top, bottom = self._pop_rim(fdir, nodata=0)
+ # Populate weights
+ if weights is None:
+ weights = np.ones(fdir[0].shape, dtype=np.float64)
+ # If xytype is 'coordinate', delineate catchment based on cell nearest
+ # to given geographic coordinate
+ if xytype in {'label', 'coordinate'}:
+ x, y = self.nearest_cell(x, y, fdir.affine, snap)
+ # Compute flow distances
+ if method.lower() == 'shortest':
+ if algorithm.lower() == 'iterative':
+ dist = _self._mfd_flow_distance_iter_numba(fdir, (y, x), weights)
+ elif algorithm.lower() == 'recursive':
+ raise NotImplementedError('Recursive algorithm not implemented.')
+ else:
+ raise ValueError('Algorithm must be `iterative` or `recursive`.')
+ else:
+ raise NotImplementedError("Only implemented for shortest path distance.")
+ # Create new mask because dimension reduced
+ new_mask = fdir.mask[0]
+ dist = self._output_handler(data=dist, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=nodata_out,
+ mask=new_mask)
+ return dist
+
def compute_hand(self, fdir, dem, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
nodata_out=None, routing='d8', return_index=False, algorithm='iterative',
**kwargs):
@@ -1109,7 +1257,8 @@ def compute_hand(self, fdir, dem, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
routing : str
Routing algorithm to use:
'd8' : D8 flow directions
- 'dinf' : D-infinity flow directions (not implemented)
+ 'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
return_index : bool
Boolean value indicating desired output.
- If True, return a Raster where each cell indicates the index
@@ -1133,8 +1282,10 @@ def compute_hand(self, fdir, dem, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
elif routing.lower() == 'dinf':
fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
dem_overrides = {'dtype' : np.float64, 'nodata' : dem.nodata}
mask_overrides = {'dtype' : np.bool8, 'nodata' : False}
kwargs.update(fdir_overrides)
@@ -1158,11 +1309,16 @@ def compute_hand(self, fdir, dem, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
hand = self._dinf_compute_hand(fdir=fdir, mask=mask,
nodata_out=nodata_out,
algorithm=algorithm)
+ elif routing.lower() == 'mfd':
+ hand = self._mfd_compute_hand(fdir=fdir, mask=mask,
+ nodata_out=nodata_out,
+ algorithm=algorithm)
# If index is not desired, return heights
if not return_index:
- hand = _self._assign_hand_heights_numba(hand, dem, nodata_out)
- hand = self._output_handler(data=hand, viewfinder=fdir.viewfinder,
- metadata=fdir.metadata, nodata=nodata_out)
+ hand_idx = hand
+ hand = _self._assign_hand_heights_numba(hand_idx, dem, nodata_out)
+ hand = self._output_handler(data=hand, viewfinder=hand_idx.viewfinder,
+ metadata=hand_idx.metadata, nodata=nodata_out)
return hand
def _d8_compute_hand(self, fdir, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
@@ -1207,6 +1363,25 @@ def _dinf_compute_hand(self, fdir, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
metadata=fdir.metadata, nodata=-1)
return hand
+ def _mfd_compute_hand(self, fdir, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ nodata_out=-1, algorithm='iterative'):
+ # Get nodata cells
+ nodata_cells = self._get_nodata_cells(fdir)
+ # Pad the rim
+ dirleft, dirright, dirtop, dirbottom = self._pop_rim(fdir, nodata=0.)
+ maskleft, maskright, masktop, maskbottom = self._pop_rim(mask, nodata=False)
+ if algorithm.lower() == 'iterative':
+ hand = _self._mfd_hand_iter_numba(fdir, mask)
+ elif algorithm.lower() == 'recursive':
+ raise NotImplementedError('Recursive algorithm not implemented.')
+ else:
+ raise ValueError('Algorithm must be `iterative` or `recursive`.')
+ new_mask = fdir.mask[0]
+ hand = self._output_handler(data=hand, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=-1,
+ mask=new_mask)
+ return hand
+
def extract_river_network(self, fdir, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
routing='d8', algorithm='iterative', **kwargs):
"""
@@ -1241,7 +1416,7 @@ def extract_river_network(self, fdir, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32)
if routing.lower() == 'd8':
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
else:
- raise NotImplementedError('Only implemented for D8 routing.')
+ raise NotImplementedError('Only implemented for `d8` routing.')
mask_overrides = {'dtype' : np.bool8, 'nodata' : False}
kwargs.update(fdir_overrides)
fdir = self._input_handler(fdir, **kwargs)
@@ -1313,7 +1488,7 @@ def stream_order(self, fdir, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
if routing.lower() == 'd8':
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
else:
- raise NotImplementedError('Only implemented for D8 routing.')
+ raise NotImplementedError('Only implemented for `d8` routing.')
mask_overrides = {'dtype' : np.bool8, 'nodata' : False}
kwargs.update(fdir_overrides)
fdir = self._input_handler(fdir, **kwargs)
@@ -1347,8 +1522,9 @@ def stream_order(self, fdir, mask, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
metadata=fdir.metadata, nodata=nodata_out)
return order
- def distance_to_ridge(self, fdir, mask, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
- nodata_out=0, routing='d8', algorithm='iterative', **kwargs):
+ def distance_to_ridge(self, fdir, weights=None, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ nodata_out=np.nan, routing='d8', algorithm='iterative', cycle_size=1,
+ **kwargs):
"""
Generates a raster representing the (weighted) topological distance from each cell
to its originating drainage divide, moving upstream.
@@ -1368,6 +1544,8 @@ def distance_to_ridge(self, fdir, mask, weights=None, dirmap=(64, 128, 1, 2, 4,
routing : str
Routing algorithm to use:
'd8' : D8 flow directions
+ 'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
algorithm : str
Algorithm type to use:
'iterative' : Use an iterative algorithm (recommended).
@@ -1383,42 +1561,55 @@ def distance_to_ridge(self, fdir, mask, weights=None, dirmap=(64, 128, 1, 2, 4,
"""
if routing.lower() == 'd8':
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'dinf':
+ fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise NotImplementedError('Only implemented for D8 routing.')
- mask_overrides = {'dtype' : np.bool8, 'nodata' : False}
+ raise NotImplementedError('Routing method must be one of: `d8`, `dinf`, `mfd`')
kwargs.update(fdir_overrides)
fdir = self._input_handler(fdir, **kwargs)
- kwargs.update(mask_overrides)
- mask = self._input_handler(mask, **kwargs)
if weights is not None:
weights_overrides = {'dtype' : np.float64, 'nodata' : weights.nodata}
kwargs.update(weights_overrides)
weights = self._input_handler(weights, **kwargs)
+ if routing.lower() == 'd8':
+ rdist = self._d8_distance_to_ridge(fdir=fdir, weights=weights,
+ dirmap=dirmap, algorithm=algorithm,
+ nodata_out=nodata_out)
+ elif routing.lower() == 'dinf':
+ rdist = self._dinf_distance_to_ridge(fdir=fdir, weights=weights,
+ dirmap=dirmap, algorithm=algorithm,
+ nodata_out=nodata_out, cycle_size=cycle_size)
+ elif routing.lower() == 'mfd':
+ rdist = self._mfd_distance_to_ridge(fdir=fdir, weights=weights,
+ dirmap=dirmap, algorithm=algorithm,
+ nodata_out=nodata_out, cycle_size=cycle_size)
+ return rdist
+
+ def _d8_distance_to_ridge(self, fdir, weights, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ algorithm='iterative', nodata_out=np.nan, **kwargs):
# Find nodata cells and invalid cells
nodata_cells = self._get_nodata_cells(fdir)
invalid_cells = ~np.in1d(fdir.ravel(), dirmap).reshape(fdir.shape)
# Set nodata cells to zero
fdir[nodata_cells] = 0
fdir[invalid_cells] = 0
+ # TODO: Should this be ones for all cells?
if weights is None:
weights = (~nodata_cells).reshape(fdir.shape).astype(np.float64)
- maskleft, maskright, masktop, maskbottom = self._pop_rim(mask, nodata=0)
- masked_fdir = np.where(mask, fdir, 0).astype(np.int64)
startnodes = np.arange(fdir.size, dtype=np.int64)
- endnodes = _self._flatten_fdir_numba(masked_fdir, dirmap).reshape(fdir.shape)
+ endnodes = _self._flatten_fdir_numba(fdir, dirmap).reshape(fdir.shape)
indegree = np.bincount(endnodes.ravel()).astype(np.uint8)
- orig_indegree = np.copy(indegree)
startnodes = startnodes[(indegree == 0)]
- min_order = np.full(fdir.shape, np.iinfo(np.int64).max, dtype=np.int64)
- max_order = np.ones(fdir.shape, dtype=np.int64)
rdist = np.zeros(fdir.shape, dtype=np.float64)
if algorithm.lower() == 'iterative':
- rdist = _self._d8_reverse_distance_iter_numba(min_order, max_order, rdist,
- endnodes, indegree, startnodes,
- weights)
+ rdist = _self._d8_reverse_distance_iter_numba(rdist, endnodes,
+ indegree, startnodes,
+ weights)
elif algorithm.lower() == 'recursive':
- rdist = _self._d8_reverse_distance_recur_numba(min_order, max_order, rdist,
- endnodes, indegree, startnodes,
+ rdist = _self._d8_reverse_distance_recur_numba(rdist, endnodes,
+ indegree, startnodes,
weights)
else:
raise ValueError('Algorithm must be `iterative` or `recursive`.')
@@ -1426,6 +1617,76 @@ def distance_to_ridge(self, fdir, mask, weights=None, dirmap=(64, 128, 1, 2, 4,
metadata=fdir.metadata, nodata=nodata_out)
return rdist
+ def _dinf_distance_to_ridge(self, fdir, weights, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ algorithm='iterative', nodata_out=np.nan, cycle_size=1,
+ **kwargs):
+ # Find nodata cells and invalid cells
+ nodata_cells = self._get_nodata_cells(fdir)
+ # Split d-infinity grid
+ fdir_0, fdir_1, prop_0, prop_1 = _self._angle_to_d8_numba(fdir, dirmap, nodata_cells)
+ # Get matching of start and end nodes
+ startnodes = np.arange(fdir.size, dtype=np.int64)
+ endnodes_0 = _self._flatten_fdir_numba(fdir_0, dirmap).reshape(fdir.shape)
+ endnodes_1 = _self._flatten_fdir_numba(fdir_1, dirmap).reshape(fdir.shape)
+ # Remove cycles
+ _self._dinf_fix_cycles_numba(endnodes_0, endnodes_1, cycle_size)
+ # Find indegree of all cells
+ indegree_0 = np.bincount(endnodes_0.ravel(), minlength=fdir.size)
+ indegree_1 = np.bincount(endnodes_1.ravel(), minlength=fdir.size)
+ indegree = (indegree_0 + indegree_1).astype(np.uint8)
+ # Set starting nodes to those with no predecessors
+ startnodes = startnodes[(indegree == 0)]
+ # TODO: Should this be ones for all cells?
+ if weights is None:
+ weights = np.ones(fdir_0.shape, dtype=np.float64)
+ rdist = np.zeros(fdir_0.shape, dtype=np.float64)
+ if algorithm.lower() == 'iterative':
+ rdist = _self._dinf_reverse_distance_iter_numba(rdist,
+ endnodes_0,
+ endnodes_1,
+ indegree,
+ startnodes,
+ weights)
+ elif algorithm.lower() == 'recursive':
+ raise NotImplementedError('Recursive algorithm not implemented for distance_to_ridge.')
+ else:
+ raise ValueError('Algorithm must be `iterative` or `recursive`.')
+ rdist = self._output_handler(data=rdist, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=nodata_out)
+ return rdist
+
+ def _mfd_distance_to_ridge(self, fdir, weights, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ algorithm='iterative', nodata_out=np.nan, cycle_size=1,
+ **kwargs):
+ # Find nodata cells and invalid cells
+ nodata_cells = self._get_nodata_cells(fdir)
+ # Set nodata cells to zero
+ fdir[nodata_cells] = 0.
+ # Start and end nodes
+ startnodes = np.arange(fdir[0].size, dtype=np.int64)
+ props = fdir
+ endnodes = _self._flatten_mfd_fdir_numba(props)
+ if weights is None:
+ weights = np.ones(fdir[0].shape, dtype=np.float64)
+ # Find indegree of all cells
+ indegree = _self._mfd_bincount(endnodes)
+ # Set starting nodes to those with no predecessors
+ startnodes = startnodes[(indegree == 0)]
+ # Compute distance to ridge
+ rdist = np.zeros(fdir[0].shape, dtype=np.float64)
+ if algorithm.lower() == 'iterative':
+ rdist = _self._mfd_reverse_distance_iter_numba(rdist, endnodes, indegree,
+ startnodes, weights)
+ elif algorithm.lower() == 'recursive':
+ raise NotImplementedError('Not Implemented.')
+ else:
+ raise ValueError('Algorithm must be `iterative` or `recursive`.')
+ new_mask = fdir.mask[0]
+ rdist = self._output_handler(data=rdist, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=nodata_out,
+ mask=new_mask)
+ return rdist
+
def cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
nodata_out=np.nan, routing='d8', **kwargs):
"""
@@ -1447,7 +1708,8 @@ def cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
routing : str
Routing algorithm to use:
'd8' : D8 flow directions
- 'dinf' : D-infinity flow directions (not implemented)
+ 'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
Additional keyword arguments (**kwargs) are passed to self.view.
@@ -1460,8 +1722,10 @@ def cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
elif routing.lower() == 'dinf':
fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
dem_overrides = {'dtype' : np.float64, 'nodata' : dem.nodata}
kwargs.update(fdir_overrides)
fdir = self._input_handler(fdir, **kwargs)
@@ -1473,6 +1737,9 @@ def cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
elif routing.lower() == 'dinf':
dh = self._dinf_cell_dh(dem=dem, fdir=fdir, dirmap=dirmap,
nodata_out=nodata_out)
+ elif routing.lower() == 'mfd':
+ dh = self._mfd_cell_dh(dem=dem, fdir=fdir, dirmap=dirmap,
+ nodata_out=nodata_out)
return dh
def _d8_cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
@@ -1484,7 +1751,7 @@ def _d8_cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
fdir[nodata_cells] = 0
fdir[invalid_cells] = 0
dirleft, dirright, dirtop, dirbottom = self._pop_rim(fdir, nodata=0)
- startnodes = np.arange(fdir.size, dtype=np.int64)
+ startnodes = np.arange(fdir.size, dtype=np.int64).reshape(fdir.shape)
endnodes = _self._flatten_fdir_numba(fdir, dirmap).reshape(fdir.shape)
dh = _self._d8_cell_dh_numba(startnodes, endnodes, dem)
dh = self._output_handler(data=dh, viewfinder=fdir.viewfinder,
@@ -1492,7 +1759,7 @@ def _d8_cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
return dh
def _dinf_cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
- nodata_out=np.nan):
+ nodata_out=np.nan):
# Get nodata cells
nodata_cells = self._get_nodata_cells(fdir)
# Split dinf flowdir
@@ -1502,7 +1769,7 @@ def _dinf_cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
nodata=0)
dirleft_1, dirright_1, dirtop_1, dirbottom_1 = self._pop_rim(fdir_1,
nodata=0)
- startnodes = np.arange(fdir.size, dtype=np.int64)
+ startnodes = np.arange(fdir.size, dtype=np.int64).reshape(fdir.shape)
endnodes_0 = _self._flatten_fdir_numba(fdir_0, dirmap).reshape(fdir.shape)
endnodes_1 = _self._flatten_fdir_numba(fdir_1, dirmap).reshape(fdir.shape)
dh = _self._dinf_cell_dh_numba(startnodes, endnodes_0, endnodes_1, prop_0, prop_1, dem)
@@ -1510,6 +1777,23 @@ def _dinf_cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
metadata=fdir.metadata, nodata=nodata_out)
return dh
+ def _mfd_cell_dh(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ nodata_out=np.nan):
+ # Find nodata cells and invalid cells
+ nodata_cells = self._get_nodata_cells(fdir)
+ # Set nodata cells to zero
+ fdir[nodata_cells] = 0.
+ # Start and end nodes
+ startnodes = np.arange(fdir[0].size, dtype=np.int64).reshape(fdir[0].shape)
+ props = fdir
+ endnodes = _self._flatten_mfd_fdir_numba(props)
+ dh = _self._mfd_cell_dh_numba(startnodes, endnodes, props, dem)
+ new_mask = fdir.mask[0]
+ dh = self._output_handler(data=dh, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=nodata_out,
+ mask=new_mask)
+ return dh
+
def cell_distances(self, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32), nodata_out=np.nan,
routing='d8', **kwargs):
"""
@@ -1528,7 +1812,8 @@ def cell_distances(self, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32), nodata_out=
routing : str
Routing algorithm to use:
'd8' : D8 flow directions
- 'dinf' : D-infinity flow directions (not implemented)
+ 'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
Additional keyword arguments (**kwargs) are passed to self.view.
@@ -1542,8 +1827,10 @@ def cell_distances(self, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32), nodata_out=
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
elif routing.lower() == 'dinf':
fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
kwargs.update(fdir_overrides)
fdir = self._input_handler(fdir, **kwargs)
if routing.lower() == 'd8':
@@ -1552,6 +1839,9 @@ def cell_distances(self, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32), nodata_out=
elif routing.lower() == 'dinf':
cdist = self._dinf_cell_distances(fdir=fdir, dirmap=dirmap,
nodata_out=nodata_out)
+ elif routing.lower() == 'mfd':
+ cdist = self._mfd_cell_distances(fdir=fdir, dirmap=dirmap,
+ nodata_out=nodata_out)
return cdist
def _d8_cell_distances(self, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
@@ -1588,6 +1878,25 @@ def _dinf_cell_distances(self, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
metadata=fdir.metadata, nodata=nodata_out)
return cdist
+ def _mfd_cell_distances(self, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32),
+ nodata_out=np.nan):
+ # Find nodata cells and invalid cells
+ nodata_cells = self._get_nodata_cells(fdir)
+ # Set nodata cells to zero
+ fdir[nodata_cells] = 0.
+ # Start and end nodes
+ startnodes = np.arange(fdir[0].size, dtype=np.int64).reshape(fdir[0].shape)
+ props = fdir
+ endnodes = _self._flatten_mfd_fdir_numba(props)
+ dx = abs(fdir.affine.a)
+ dy = abs(fdir.affine.e)
+ cdist = _self._mfd_cell_distances_numba(startnodes, endnodes, props, dx, dy)
+ new_mask = fdir.mask[0]
+ cdist = self._output_handler(data=cdist, viewfinder=fdir.viewfinder,
+ metadata=fdir.metadata, nodata=nodata_out,
+ mask=new_mask)
+ return cdist
+
def cell_slopes(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32), nodata_out=np.nan,
routing='d8', **kwargs):
"""
@@ -1609,7 +1918,8 @@ def cell_slopes(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32), nodata_ou
routing : str
Routing algorithm to use:
'd8' : D8 flow directions
- 'dinf' : D-infinity flow directions (not implemented)
+ 'dinf' : D-infinity flow directions
+ 'mfd' : Multiple flow directions
Additional keyword arguments (**kwargs) are passed to self.view.
@@ -1623,8 +1933,10 @@ def cell_slopes(self, dem, fdir, dirmap=(64, 128, 1, 2, 4, 8, 16, 32), nodata_ou
fdir_overrides = {'dtype' : np.int64, 'nodata' : fdir.nodata}
elif routing.lower() == 'dinf':
fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
+ elif routing.lower() == 'mfd':
+ fdir_overrides = {'dtype' : np.float64, 'nodata' : fdir.nodata}
else:
- raise ValueError('Routing method must be one of: `d8`, `dinf`')
+ raise ValueError('Routing method must be one of: `d8`, `dinf`, `mfd`')
dem_overrides = {'dtype' : np.float64, 'nodata' : dem.nodata}
kwargs.update(fdir_overrides)
fdir = self._input_handler(fdir, **kwargs)
@@ -2021,7 +2333,10 @@ def _output_handler(self, data, viewfinder, metadata={}, **kwargs):
for param, value in kwargs.items():
if (value is not None) and (hasattr(new_view, param)):
setattr(new_view, param, value)
- dataset = Raster(data, new_view, metadata=metadata)
+ if (data.ndim == 2):
+ dataset = Raster(data, new_view, metadata=metadata)
+ elif (data.ndim == 3):
+ dataset = MultiRaster(data, new_view, metadata=metadata)
return dataset
def _get_nodata_cells(self, data):
@@ -2039,15 +2354,15 @@ def _get_nodata_cells(self, data):
def _pop_rim(self, data, nodata=0):
left, right, top, bottom = (data[:,0].copy(), data[:,-1].copy(),
data[0,:].copy(), data[-1,:].copy())
- data[:,0] = nodata
- data[:,-1] = nodata
- data[0,:] = nodata
- data[-1,:] = nodata
+ data[..., :, 0] = nodata
+ data[..., :, -1] = nodata
+ data[..., 0, :] = nodata
+ data[..., -1, :] = nodata
return left, right, top, bottom
def _replace_rim(self, data, left, right, top, bottom):
- data[:,0] = left
- data[:,-1] = right
- data[0,:] = top
- data[-1,:] = bottom
+ data[..., :, 0] = left
+ data[..., :, -1] = right
+ data[..., 0, :] = top
+ data[..., -1, :] = bottom
return None
diff --git a/pysheds/sview.py b/pysheds/sview.py
index 0822a6a..a13d13a 100644
--- a/pysheds/sview.py
+++ b/pysheds/sview.py
@@ -49,6 +49,13 @@ class Raster(np.ndarray):
"""
def __new__(cls, input_array, viewfinder=None, metadata={}):
+ try:
+ # MultiRaster must be subclass of ndarray
+ assert isinstance(input_array, np.ndarray)
+ # Ensure MultiRaster is 2D
+ assert input_array.ndim == 2
+ except:
+ raise TypeError('Input must be a 2-dimensional array-like object.')
# Handle case where input is a Raster itself
if isinstance(input_array, Raster):
input_array, viewfinder, metadata = cls._handle_raster_input(input_array,
@@ -218,7 +225,7 @@ def to_crs(self, new_crs, **kwargs):
old_crs = self.crs
dx = self.affine.a
dy = self.affine.e
- m, n = self.shape
+ m, n = self.shape[-2], self.shape[-1]
Y, X = np.mgrid[0:m, 0:n]
top = np.column_stack([X[0, :], Y[0, :]])
bottom = np.column_stack([X[-1, :], Y[-1, :]])
@@ -228,7 +235,7 @@ def to_crs(self, new_crs, **kwargs):
xi, yi = boundary[:,0], boundary[:,1]
xb, yb = View.affine_transform(self.affine, xi, yi)
xb_p, yb_p = pyproj.transform(old_crs, new_crs, xb, yb,
- errcheck=True, always_xy=True)
+ errcheck=True, always_xy=True)
x0_p = xb_p.min() if (dx > 0) else xb_p.max()
y0_p = yb_p.min() if (dy > 0) else yb_p.max()
xn_p = xb_p.max() if (dx > 0) else xb_p.min()
@@ -240,9 +247,60 @@ def to_crs(self, new_crs, **kwargs):
nodata=self.nodata, mask=self.mask,
crs=new_crs)
new_raster = View.view(self, target_view=new_viewfinder,
- data_view=self.viewfinder, **kwargs)
+ data_view=self.viewfinder, **kwargs)
return new_raster
+class MultiRaster(Raster):
+ def __new__(cls, input_array, viewfinder=None, metadata={}):
+ try:
+ # MultiRaster must be subclass of ndarray
+ assert isinstance(input_array, np.ndarray)
+ # If 2D, upcast to 3D
+ if (input_array.ndim == 2):
+ input_array = input_array.reshape(1, *input_array.shape)
+ # Ensure MultiRaster is 3D
+ assert input_array.ndim == 3
+ except:
+ raise TypeError('Input must be a 2d or 3d array-like object.')
+ # Handle case where input is a Raster itself
+ if isinstance(input_array, Raster):
+ input_array, viewfinder, metadata = cls._handle_raster_input(input_array,
+ viewfinder,
+ metadata)
+ # Create a numpy array from the input
+ obj = np.asarray(input_array).view(cls)
+ # If no viewfinder provided, construct one congruent with the array shape
+ if viewfinder is None:
+ viewfinder = ViewFinder(shape=obj.shape)
+ # If a viewfinder is provided, ensure that it is a viewfinder...
+ else:
+ try:
+ assert(isinstance(viewfinder, ViewFinder))
+ except:
+ raise ValueError("Must initialize with a ViewFinder.")
+ # Ensure that viewfinder shape is correct...
+ try:
+ assert viewfinder.shape == obj.shape
+ except:
+ raise ValueError('Viewfinder and array shape must be the same.')
+ # Test typing of array
+ try:
+ assert not np.issubdtype(obj.dtype, np.object_)
+ assert not np.issubdtype(obj.dtype, np.flexible)
+ except:
+ raise TypeError('`object` and `flexible` dtypes not allowed.')
+ try:
+ assert np.min_scalar_type(viewfinder.nodata) <= obj.dtype
+ except:
+ raise TypeError('`nodata` value not representable in dtype of array.')
+ # Don't allow original viewfinder and metadata to be modified
+ viewfinder = viewfinder.copy()
+ metadata = metadata.copy()
+ # Set attributes of array
+ obj._viewfinder = viewfinder
+ obj.metadata = metadata
+ return obj
+
class ViewFinder():
"""
Class that defines a spatial reference system for a Raster or Grid instance.
@@ -286,8 +344,7 @@ def __eq__(self, other):
if isinstance(other, ViewFinder):
is_eq = True
is_eq &= (self.affine == other.affine)
- is_eq &= (self.shape[0] == other.shape[0])
- is_eq &= (self.shape[1] == other.shape[1])
+ is_eq &= (self.shape == other.shape)
is_eq &= (self.crs == other.crs)
is_eq &= (self.mask == other.mask).all()
if np.isnan(self.nodata):
@@ -368,7 +425,7 @@ def size(self):
def bbox(self):
shape = self.shape
xmin, ymax = View.affine_transform(self.affine, 0, 0)
- xmax, ymin = View.affine_transform(self.affine, shape[1], shape[0])
+ xmax, ymin = View.affine_transform(self.affine, shape[-1], shape[-2])
_bbox = (xmin, ymin, xmax, ymax)
return _bbox
@@ -400,14 +457,15 @@ def properties(self):
@property
def axes(self):
- return View.axes(self.affine, self.shape)
+ shape = self.shape
+ return View.axes(self.affine, (shape[-2], shape[-1]))
def is_congruent_with(self, other):
if isinstance(other, ViewFinder):
is_congruent = True
is_congruent &= (self.affine == other.affine)
- is_congruent &= (self.shape[0] == other.shape[0])
- is_congruent &= (self.shape[1] == other.shape[1])
+ is_congruent &= (self.shape[-2] == other.shape[-2])
+ is_congruent &= (self.shape[-1] == other.shape[-1])
is_congruent &= (self.crs == other.crs)
return is_congruent
else:
@@ -456,7 +514,7 @@ def view(cls, data, target_view, data_view=None, interpolation='nearest',
target_view : ViewFinder
The desired spatial reference system.
data_view : ViewFinder
- The spatial reference system of the data. Defaults to the Raster dataset's
+ The spatial reference system of the data. Defaults to the Raster dataset
`viewfinder` attribute.
interpolation : 'nearest', 'linear'
Interpolation method to be used if spatial reference systems
@@ -490,12 +548,22 @@ def view(cls, data, target_view, data_view=None, interpolation='nearest',
out : Raster
View of the input Raster at the provided target view.
"""
- # If no data view given, use data's view
+ # TODO: Temporary check on target_view
+ try:
+ assert (len(target_view.shape) == 2)
+ except:
+ raise ValueError('Target view must be 2-dimensional.')
+ # TODO: Use type instead of isinstance
+ is_raster = isinstance(data, Raster)
+ is_multiraster = isinstance(data, MultiRaster)
+ is_2d_array = isinstance(data, np.ndarray) and (data.ndim == 2)
+ is_3d_array = isinstance(data, np.ndarray) and (data.ndim == 3)
+ # If no data view given, use data view
if data_view is None:
try:
- assert(isinstance(data, Raster))
+ assert (is_raster or is_multiraster)
except:
- raise TypeError('`data` must be a Raster instance.')
+ raise TypeError('`data` must be a Raster or MultiRaster instance.')
data_view = data.viewfinder
# By default, use dataset's `nodata` value
if nodata is None:
@@ -513,22 +581,66 @@ def view(cls, data, target_view, data_view=None, interpolation='nearest',
dtype=dtype,
interpolation=interpolation)
# Mask input data if desired
- if apply_input_mask:
+ has_input_mask = not data_view.mask.all()
+ if apply_input_mask and has_input_mask:
+ # TODO: Rewrite to avoid copying.
arr = np.where(data_view.mask, data, target_view.nodata).astype(dtype)
data = Raster(arr, data.viewfinder, metadata=data.metadata)
+ if is_multiraster or is_3d_array:
+ out = cls._view_multiraster(data, target_view, data_view=data_view,
+ interpolation=interpolation,
+ apply_output_mask=apply_output_mask,
+ dtype=dtype)
+ elif is_raster or is_2d_array:
+ out = cls._view_raster(data, target_view, data_view=data_view,
+ interpolation=interpolation,
+ apply_output_mask=apply_output_mask,
+ dtype=dtype)
+ else:
+ raise TypeError('`data` must be a Raster, MultiRaster, 2D array or 3D array.')
+ # Write metadata
+ if inherit_metadata:
+ out.metadata.update(data.metadata)
+ out.metadata.update(new_metadata)
+ return out
+
+ @classmethod
+ def _view_raster(cls, data, target_view, data_view=None, interpolation='nearest',
+ apply_output_mask=True, dtype=None, out=None):
+ # Create an output array to fill
+ if out is None:
+ out = np.empty(target_view.shape, dtype=dtype)
# If data view and target view are the same, return a copy of the data
if data_view.is_congruent_with(target_view):
- out = cls._view_same_viewfinder(data, data_view, target_view, dtype,
+ out = cls._view_same_viewfinder(data, data_view, target_view, out, dtype,
apply_output_mask=apply_output_mask)
# If data view and target view are different...
else:
- out = cls._view_different_viewfinder(data, data_view, target_view, dtype,
+ out = cls._view_different_viewfinder(data, data_view, target_view, out, dtype,
apply_output_mask=apply_output_mask,
interpolation=interpolation)
- # Write metadata
- if inherit_metadata:
- out.metadata.update(data.metadata)
- out.metadata.update(new_metadata)
+ return out
+
+ @classmethod
+ def _view_multiraster(cls, data, target_view, data_view=None, interpolation='nearest',
+ apply_output_mask=True, dtype=None, out=None):
+ k, m, n = data.shape
+ if out is None:
+ out = np.empty((k, *target_view.shape), dtype=dtype)
+ out_mask = np.ones((k, *target_view.shape), dtype=np.bool8)
+ for i in range(k):
+ slice_viewfinder = ViewFinder(affine=data_view.affine, mask=data_view.mask[i],
+ nodata=data_view.nodata, crs=data_view.crs)
+ data_i = Raster(np.asarray(data[i]), viewfinder=slice_viewfinder)
+ # Write to out array in-place
+ view_i = cls._view_raster(data_i, target_view, slice_viewfinder,
+ interpolation=interpolation,
+ apply_output_mask=apply_output_mask,
+ dtype=dtype, out=out[i, :, :])
+ out_mask[i, :, :] = view_i.mask
+ target_viewfinder = ViewFinder(affine=target_view.affine, mask=out_mask,
+ nodata=target_view.nodata, crs=target_view.crs)
+ out = MultiRaster(out, viewfinder=target_viewfinder)
return out
@classmethod
@@ -624,10 +736,10 @@ def axes(cls, affine, shape):
y, x : tuple
y- and x-coordinates of axes
"""
- y_ix = np.arange(shape[0])
- x_ix = np.arange(shape[1])
- x_null = np.zeros(shape[0])
- y_null = np.zeros(shape[1])
+ y_ix = np.arange(shape[-2])
+ x_ix = np.arange(shape[-1])
+ x_null = np.zeros(shape[-2])
+ y_null = np.zeros(shape[-1])
x, _ = cls.affine_transform(affine, x_ix, y_null)
_, y = cls.affine_transform(affine, x_null, y_ix)
return y, x
@@ -734,6 +846,10 @@ def clip_to_mask(cls, data, mask=None, pad=(0,0,0,0)):
A Raster dataset clipped to the bounding box of the non-null entries
in the given mask.
"""
+ try:
+ assert (data.ndim == 2)
+ except:
+ raise ValueError('Data must be 2-dimensional')
try:
for value in pad:
assert (isinstance(value, int))
@@ -805,44 +921,46 @@ def _override_dtype(cls, data, target_view, dtype=None, interpolation='nearest')
return dtype
@classmethod
- def _view_same_viewfinder(cls, data, data_view, target_view, dtype,
+ def _view_same_viewfinder(cls, data, data_view, target_view, out, dtype,
apply_output_mask=True):
- if apply_output_mask:
- out = np.where(target_view.mask, data, target_view.nodata).astype(dtype)
- else:
- out = np.asarray(data.copy(), dtype=dtype)
- out = Raster(out, target_view)
- return out
+ out[:] = data
+ has_output_mask = not target_view.mask.all()
+ if (apply_output_mask) and (has_output_mask):
+ out[~target_view.mask] = target_view.nodata
+ out_raster = Raster(out, target_view)
+ return out_raster
@classmethod
- def _view_different_viewfinder(cls, data, data_view, target_view, dtype,
+ def _view_different_viewfinder(cls, data, data_view, target_view, out, dtype,
apply_output_mask=True, interpolation='nearest'):
- out = np.full(target_view.shape, target_view.nodata, dtype=dtype)
if (data_view.crs == target_view.crs):
out = cls._view_same_crs(out, data, data_view,
target_view, interpolation)
else:
out = cls._view_different_crs(out, data, data_view,
target_view, interpolation)
- if apply_output_mask:
- np.place(out, ~target_view.mask, target_view.nodata)
- out = Raster(out, target_view)
- return out
+ has_output_mask = not target_view.mask.all()
+ if (apply_output_mask) and (has_output_mask):
+ out[~target_view.mask] = target_view.nodata
+ out_raster = Raster(out, target_view)
+ return out_raster
@classmethod
def _view_same_crs(cls, view, data, data_view, target_view, interpolation='nearest'):
y, x = target_view.axes
inv_affine = ~data_view.affine
_, y_ix = cls.affine_transform(inv_affine,
- np.zeros(target_view.shape[0],
+ np.zeros(target_view.shape[-2],
dtype=np.float64), y)
x_ix, _ = cls.affine_transform(inv_affine, x,
- np.zeros(target_view.shape[1],
+ np.zeros(target_view.shape[-1],
dtype=np.float64))
+ nodata = target_view.nodata
+ # TODO: Check that this works for rotated data.
if interpolation == 'nearest':
- view = _self._view_fill_by_axes_nearest_numba(data, view, y_ix, x_ix)
+ view = _self._view_fill_by_axes_nearest_numba(data, view, y_ix, x_ix, nodata)
elif interpolation == 'linear':
- view = _self._view_fill_by_axes_linear_numba(data, view, y_ix, x_ix)
+ view = _self._view_fill_by_axes_linear_numba(data, view, y_ix, x_ix, nodata)
else:
raise ValueError('Interpolation method must be one of: `nearest`, `linear`')
return view
@@ -854,10 +972,11 @@ def _view_different_crs(cls, view, data, data_view, target_view, interpolation='
errcheck=True, always_xy=True)
inv_affine = ~data_view.affine
x_ix, y_ix = cls.affine_transform(inv_affine, xt, yt)
+ nodata = target_view.nodata
if interpolation == 'nearest':
- view = _self._view_fill_by_entries_nearest_numba(data, view, y_ix, x_ix)
+ view = _self._view_fill_by_entries_nearest_numba(data, view, y_ix, x_ix, nodata)
elif interpolation == 'linear':
- view = _self._view_fill_by_entries_linear_numba(data, view, y_ix, x_ix)
+ view = _self._view_fill_by_entries_linear_numba(data, view, y_ix, x_ix, nodata)
else:
raise ValueError('Interpolation method must be one of: `nearest`, `linear`')
return view
diff --git a/setup.py b/setup.py
index 98ce916..5f62d67 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
from setuptools import setup
setup(name='pysheds',
- version='0.3.1',
+ version='0.3.2',
description='🌎 Simple and fast watershed delineation in python.',
author='Matt Bartos',
author_email='mdbartos@umich.edu',
diff --git a/tests/test_grid.py b/tests/test_grid.py
index dff383e..28a3e6d 100644
--- a/tests/test_grid.py
+++ b/tests/test_grid.py
@@ -118,6 +118,11 @@ def test_dinf_flowdir():
fdir_dinf = grid.flowdir(inflated_dem, dirmap=dirmap, routing='dinf')
d.fdir_dinf = fdir_dinf
+def test_mfd_flowdir():
+ inflated_dem = d.inflated_dem
+ fdir_mfd = grid.flowdir(inflated_dem, dirmap=dirmap, routing='mfd')
+ d.fdir_mfd = fdir_mfd
+
def test_clip_pad():
catch = d.catch
grid.clip_to(catch)
@@ -130,15 +135,17 @@ def test_clip_pad():
def test_computed_fdir_catch():
fdir_d8 = d.fdir_d8
fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
catch_d8 = grid.catchment(x, y, fdir_d8, dirmap=dirmap, routing='d8',
xytype='coordinate')
assert(np.count_nonzero(catch_d8) > 11300)
# Reference routing
- catch_d8 = grid.catchment(x, y, fdir_d8, dirmap=dirmap, routing='d8',
- xytype='coordinate')
catch_dinf = grid.catchment(x, y, fdir_dinf, dirmap=dirmap, routing='dinf',
xytype='coordinate')
assert(np.count_nonzero(catch_dinf) > 11300)
+ catch_mfd = grid.catchment(x, y, fdir_mfd, dirmap=dirmap, routing='mfd',
+ xytype='coordinate')
+ assert(np.count_nonzero(catch_dinf) > 11300)
catch_d8_recur = grid.catchment(x, y, fdir_d8, dirmap=dirmap, routing='d8',
xytype='coordinate', algorithm='recursive')
catch_dinf_recur = grid.catchment(x, y, fdir_dinf, dirmap=dirmap, routing='dinf',
@@ -148,6 +155,11 @@ def test_accumulation():
# D8 flow accumulation without efficiency
# external flow direction
fdir = d.fdir
+ catch = d.catch
+ fdir_d8 = d.fdir_d8
+ fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
+ # TODO: This breaks if clip_to's padding of dir is nonzero
grid.clip_to(fdir)
acc = grid.accumulation(fdir, dirmap=dirmap, routing='d8')
assert(acc.max() == acc_in_frame)
@@ -192,7 +204,19 @@ def test_accumulation():
assert((acc_dinf[np.where(acc_d8==acc_d8.max())]==acc_dinf.max()).all())
# original assertion
assert(acc_dinf.max() > 11300)
-
+ acc_mfd = grid.accumulation(fdir_mfd, dirmap=dirmap, routing='mfd')
+ assert(acc_mfd.max() > 11200)
+ # #set nodata to 1
+ # eff = grid.view(dinf_eff)
+ # eff[eff==dinf_eff.nodata] = 1
+ # acc_dinf_eff = grid.accumulation(fdir_dinf, dirmap=dirmap,
+ # routing='dinf', efficiency=eff)
+ # pos = np.where(grid.dinf_acc==grid.dinf_acc.max())
+ # assert(np.round(grid.dinf_acc[pos] / grid.dinf_acc_eff[pos]) == 4.)
+ acc_d8_recur = grid.accumulation(fdir_d8, dirmap=dirmap, routing='d8',
+ algorithm='recursive')
+ acc_dinf_recur = grid.accumulation(fdir_dinf, dirmap=dirmap, routing='dinf',
+ algorithm='recursive')
# ...with efficiency
# this is probably a bit hacky
# we have two grid cells with the outlet value == max flow accumulation
@@ -227,8 +251,10 @@ def test_hand():
dem = d.dem
acc = d.acc
fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
hand_d8 = grid.compute_hand(fdir, dem, acc > 100, routing='d8')
hand_dinf = grid.compute_hand(fdir_dinf, dem, acc > 100, routing='dinf')
+ hand_mfd = grid.compute_hand(fdir_mfd, dem, acc > 100, routing='mfd')
hand_d8_recur = grid.compute_hand(fdir, dem, acc > 100, routing='d8',
algorithm='recursive')
hand_dinf_recur = grid.compute_hand(fdir_dinf, dem, acc > 100, routing='dinf',
@@ -238,6 +264,7 @@ def test_distance_to_outlet():
fdir = d.fdir
catch = d.catch
fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
grid.clip_to(catch)
dist = grid.distance_to_outlet(x, y, fdir, dirmap=dirmap, xytype='coordinate')
assert(dist[np.isfinite(dist)].max() == max_distance_d8)
@@ -247,10 +274,14 @@ def test_distance_to_outlet():
weights = Raster(2 * np.ones(grid.shape), grid.viewfinder)
grid.distance_to_outlet(x, y, fdir_dinf, dirmap=dirmap, routing='dinf',
xytype='coordinate')
+ grid.distance_to_outlet(x, y, fdir_mfd, dirmap=dirmap, routing='mfd',
+ xytype='coordinate')
grid.distance_to_outlet(x, y, fdir, weights=weights,
dirmap=dirmap, xytype='label')
grid.distance_to_outlet(x, y, fdir_dinf, dirmap=dirmap, weights=weights,
routing='dinf', xytype='label')
+ grid.distance_to_outlet(x, y, fdir_mfd, dirmap=dirmap, weights=weights,
+ routing='mfd', xytype='label')
# Test recursive
grid.distance_to_outlet(x, y, fdir, dirmap=dirmap, xytype='coordinate',
routing='d8', algorithm='recursive')
@@ -266,29 +297,39 @@ def test_stream_order():
def test_distance_to_ridge():
fdir = d.fdir
acc = d.acc
+ fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
order = grid.distance_to_ridge(fdir, acc > 100)
order = grid.distance_to_ridge(fdir, acc > 100, algorithm='recursive')
+ order = grid.distance_to_ridge(fdir_dinf, acc > 100, routing='dinf')
+ order = grid.distance_to_ridge(fdir_mfd, acc > 100, routing='mfd')
def test_cell_dh():
fdir = d.fdir
fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
dem = d.dem
dh_d8 = grid.cell_dh(dem, fdir, routing='d8')
dh_dinf = grid.cell_dh(dem, fdir_dinf, routing='dinf')
+ dh_mfd = grid.cell_dh(dem, fdir_mfd, routing='mfd')
def test_cell_distances():
fdir = d.fdir
fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
dem = d.dem
cdist_d8 = grid.cell_distances(fdir, routing='d8')
cdist_dinf = grid.cell_distances(fdir_dinf, routing='dinf')
+ cdist_mfd = grid.cell_distances(fdir_mfd, routing='mfd')
def test_cell_slopes():
fdir = d.fdir
fdir_dinf = d.fdir_dinf
+ fdir_mfd = d.fdir_mfd
dem = d.dem
slopes_d8 = grid.cell_slopes(dem, fdir, routing='d8')
slopes_dinf = grid.cell_slopes(dem, fdir_dinf, routing='dinf')
+ slopes_mfd = grid.cell_slopes(dem, fdir_mfd, routing='mfd')
# def test_set_nodata():
# grid.set_nodata('dir', 0)