Skip to content

Commit

Permalink
reverting ignore flags due to pandas stubs
Browse files Browse the repository at this point in the history
  • Loading branch information
hollymandel committed Oct 10, 2024
1 parent d00cd93 commit c5c9db6
Show file tree
Hide file tree
Showing 9 changed files with 24 additions and 28 deletions.
2 changes: 1 addition & 1 deletion xarray/coding/cftimeindex.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,7 @@ def contains(self, key: Any) -> bool:
"""Needed for .loc based partial-string indexing"""
return self.__contains__(key)

def shift( # freq is typed Any, we are more precise
def shift( # type: ignore[override] # freq is typed Any, we are more precise
self,
periods: int | float,
freq: str | timedelta | BaseCFTimeOffset | None = None,
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -3032,7 +3032,7 @@ def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Data
if not isinstance(idx, pd.MultiIndex):
raise ValueError(f"'{dim}' is not a stacked coordinate")

level_number = idx._get_level_number(level)
level_number = idx._get_level_number(level) # type: ignore[attr-defined]
variables = idx.levels[level_number]
variable_dim = idx.names[level_number]

Expand Down
10 changes: 5 additions & 5 deletions xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -6629,7 +6629,7 @@ def interpolate_na(
| None
) = None,
**kwargs: Any,
) -> Dataset:
) -> Self:
"""Fill in NaNs by interpolating according to different methods.
Parameters
Expand Down Expand Up @@ -6760,7 +6760,7 @@ def interpolate_na(
)
return new

def ffill(self, dim: Hashable, limit: int | None = None) -> Dataset:
def ffill(self, dim: Hashable, limit: int | None = None) -> Self:
"""Fill NaN values by propagating values forward
*Requires bottleneck.*
Expand Down Expand Up @@ -6824,7 +6824,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Dataset:
new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)
return new

def bfill(self, dim: Hashable, limit: int | None = None) -> Dataset:
def bfill(self, dim: Hashable, limit: int | None = None) -> Self:
"""Fill NaN values by propagating values backward
*Requires bottleneck.*
Expand Down Expand Up @@ -7523,7 +7523,7 @@ def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self:

if isinstance(idx, pd.MultiIndex):
dims = tuple(
name if name is not None else "level_%i" % n
name if name is not None else "level_%i" % n # type: ignore[redundant-expr]
for n, name in enumerate(idx.names)
)
for dim, lev in zip(dims, idx.levels, strict=True):
Expand Down Expand Up @@ -9829,7 +9829,7 @@ def eval(
c (x) float64 40B 0.0 1.25 2.5 3.75 5.0
"""

return pd.eval(
return pd.eval( # type: ignore[return-value]
statement,
resolvers=[self],
target=self,
Expand Down
10 changes: 4 additions & 6 deletions xarray/core/extension_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def __extension_duck_array__stack(arr: T_ExtensionArray, axis: int):
def __extension_duck_array__concatenate(
arrays: Sequence[T_ExtensionArray], axis: int = 0, out=None
) -> T_ExtensionArray:
return type(arrays[0])._concat_same_type(arrays)
return type(arrays[0])._concat_same_type(arrays) # type: ignore[attr-defined]


@implements(np.where)
Expand All @@ -57,8 +57,8 @@ def __extension_duck_array__where(
and isinstance(y, pd.Categorical)
and x.dtype != y.dtype
):
x = x.add_categories(set(y.categories).difference(set(x.categories)))
y = y.add_categories(set(x.categories).difference(set(y.categories)))
x = x.add_categories(set(y.categories).difference(set(x.categories))) # type: ignore[assignment]
y = y.add_categories(set(x.categories).difference(set(y.categories))) # type: ignore[assignment]
return cast(T_ExtensionArray, pd.Series(x).where(condition, pd.Series(y)).array)


Expand Down Expand Up @@ -116,9 +116,7 @@ def __getitem__(self, key) -> PandasExtensionArray[T_ExtensionArray]:
if is_extension_array_dtype(item):
return type(self)(item)
if np.isscalar(item):
return type(self)(
type(self.array)([item])
) # only subclasses with proper __init__ allowed
return type(self)(type(self.array)([item])) # type: ignore[call-arg] # only subclasses with proper __init__ allowed
return item

def __setitem__(self, key, val):
Expand Down
12 changes: 5 additions & 7 deletions xarray/core/indexes.py
Original file line number Diff line number Diff line change
Expand Up @@ -740,7 +740,7 @@ def isel(
# scalar indexer: drop index
return None

return self._replace(self.index[indxr])
return self._replace(self.index[indxr]) # type: ignore[index]

def sel(
self, labels: dict[Any, Any], method=None, tolerance=None
Expand Down Expand Up @@ -926,7 +926,7 @@ def remove_unused_levels_categories(index: T_PDIndex) -> T_PDIndex:
return cast(T_PDIndex, new_index)

if isinstance(index, pd.CategoricalIndex):
return index.remove_unused_categories()
return index.remove_unused_categories() # type: ignore[attr-defined]

return index

Expand Down Expand Up @@ -1164,7 +1164,7 @@ def create_variables(
dtype = None
else:
level = name
dtype = self.level_coords_dtype[name] # TODO: are Hashables ok?
dtype = self.level_coords_dtype[name] # type: ignore[index] # TODO: are Hashables ok?

var = variables.get(name, None)
if var is not None:
Expand All @@ -1174,9 +1174,7 @@ def create_variables(
attrs = {}
encoding = {}

data = PandasMultiIndexingAdapter(
self.index, dtype=dtype, level=level
) # TODO: are Hashables ok?
data = PandasMultiIndexingAdapter(self.index, dtype=dtype, level=level) # type: ignore[arg-type] # TODO: are Hashables ok?
index_vars[name] = IndexVariable(
self.dim,
data,
Expand Down Expand Up @@ -1673,7 +1671,7 @@ def copy_indexes(
convert_new_idx = False
xr_idx = idx

new_idx = xr_idx._copy(deep=deep, memo=memo)
new_idx = xr_idx._copy(deep=deep, memo=memo) # type: ignore[assignment]
idx_vars = xr_idx.create_variables(coords)

if convert_new_idx:
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def get_valid_numpy_dtype(array: np.ndarray | pd.Index) -> np.dtype:
if not is_valid_numpy_dtype(array.dtype):
return np.dtype("O")

return array.dtype
return array.dtype # type: ignore[return-value]


def maybe_coerce_to_str(index, original_coords):
Expand Down Expand Up @@ -180,7 +180,7 @@ def equivalent(first: T, second: T) -> bool:
return duck_array_ops.array_equiv(first, second)
if isinstance(first, list) or isinstance(second, list):
return list_equiv(first, second) # type: ignore[arg-type]
return (first == second) or (pd.isnull(first) and pd.isnull(second))
return (first == second) or (pd.isnull(first) and pd.isnull(second)) # type: ignore[call-overload]


def list_equiv(first: Sequence[T], second: Sequence[T]) -> bool:
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def as_variable(
) from error
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None: # type: ignore[redundant-expr]
elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, set | dict):
raise TypeError(f"variable {name!r} has invalid type {type(obj)!r}")
Expand Down
2 changes: 1 addition & 1 deletion xarray/groupers.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ def factorize(self, group: T_Group) -> EncodedGroups:

data = np.asarray(group.data) # Cast _DummyGroup data to array

binned, self.bins = pd.cut(
binned, self.bins = pd.cut( # type: ignore [call-overload]
data.ravel(),
bins=self.bins,
right=self.right,
Expand Down
8 changes: 4 additions & 4 deletions xarray/namedarray/daskmanager.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@
try:
from dask.array import Array as DaskArray
except ImportError:
DaskArray = np.ndarray[Any, Any] # type: ignore[misc,assignment]
DaskArray = np.ndarray[Any, Any]


dask_available = module_available("dask")


class DaskManager(ChunkManagerEntrypoint["DaskArray"]): # type: ignore[type-var]
class DaskManager(ChunkManagerEntrypoint["DaskArray"]):
array_cls: type[DaskArray]
available: bool = dask_available

Expand Down Expand Up @@ -91,7 +91,7 @@ def array_api(self) -> Any:

return da

def reduction( # type: ignore[override]
def reduction(
self,
arr: T_ChunkedArray,
func: Callable[..., Any],
Expand All @@ -113,7 +113,7 @@ def reduction( # type: ignore[override]
keepdims=keepdims,
) # type: ignore[no-untyped-call]

def scan( # type: ignore[override]
def scan(
self,
func: Callable[..., Any],
binop: Callable[..., Any],
Expand Down

0 comments on commit c5c9db6

Please sign in to comment.