Skip to content

Commit

Permalink
Don't need the type_guards module
Browse files Browse the repository at this point in the history
  • Loading branch information
RandallPittmanOrSt committed Aug 23, 2024
1 parent 00d3c61 commit 29fd359
Show file tree
Hide file tree
Showing 15 changed files with 82 additions and 269 deletions.
9 changes: 6 additions & 3 deletions examples/bench.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
# benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from typing import TYPE_CHECKING, Any
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
import type_guards
if TYPE_CHECKING:
from netCDF4 import Format as NCFormat
else:
NCFormat = Any

# create an n1dim by n2dim by n3dim random array.
n1dim = 30
Expand All @@ -15,8 +19,7 @@
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))

def write_netcdf(filename,zlib=False,least_significant_digit=None,format='NETCDF4'):
assert type_guards.valid_format(format)
def write_netcdf(filename,zlib=False,least_significant_digit=None,format: NCFormat='NETCDF4'):
file = netCDF4.Dataset(filename,'w',format=format)
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
Expand Down
11 changes: 2 additions & 9 deletions examples/bench_compress.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
# benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Any
from numpy.random.mtrand import uniform
import netCDF4
import netCDF4.utils
from timeit import Timer
import os, sys
from typing_extensions import TypeGuard
if TYPE_CHECKING:
from netCDF4 import CompressionLevel
else:
Expand All @@ -22,18 +21,12 @@
sys.stdout.write('(average of %s trials)\n' % ntrials)
array = netCDF4.utils._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4)

def valid_complevel(complevel) -> TypeGuard[CompressionLevel]:
"""Check for a valid `complevel` argument for creating a Variable"""
return isinstance(complevel, int) and 0 <= complevel <= 9

def write_netcdf(filename,zlib=False,shuffle=False,complevel=6):
def write_netcdf(filename,zlib=False,shuffle=False,complevel: CompressionLevel = 6):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
if not (valid_complevel(complevel) or complevel is None):
raise ValueError("Invalid compression level")
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),zlib=zlib,shuffle=shuffle,complevel=complevel)
foo[:] = array
Expand Down
9 changes: 6 additions & 3 deletions examples/bench_compress4.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from typing import Literal
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
import type_guards

# use real data.
URL="http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis/pressure/hgt.1990.nc"
Expand All @@ -20,12 +20,15 @@
array = nc.variables['hgt'][0:n1dim,5,:,:]


def write_netcdf(filename,nsd,quantize_mode='BitGroom'):
def write_netcdf(
filename,
nsd,
quantize_mode: Literal["BitGroom", "BitRound", "GranularBitRound"] = "BitGroom"
):
file = netCDF4.Dataset(filename,'w',format='NETCDF4')
file.createDimension('n1', None)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
assert type_guards.valid_quantize_mode(quantize_mode)
foo = file.createVariable('data',\
'f4',('n1','n3','n4'),\
zlib=True,shuffle=True,\
Expand Down
13 changes: 8 additions & 5 deletions examples/bench_diskless.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
# benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from typing import TYPE_CHECKING, Any, Literal
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
import type_guards
if TYPE_CHECKING:
from netCDF4 import Format as NCFormat
else:
NCFormat = Any

# create an n1dim by n2dim by n3dim random array.
n1dim = 30
Expand All @@ -15,8 +19,7 @@
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))

def write_netcdf(filename,zlib=False,least_significant_digit=None,format='NETCDF4',closeit=False):
assert type_guards.valid_format(format)
def write_netcdf(filename, zlib=False, least_significant_digit=None, format: NCFormat='NETCDF4',closeit=False):
file = netCDF4.Dataset(filename,'w',format=format,diskless=True,persist=True)
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
Expand Down Expand Up @@ -44,13 +47,13 @@ def read_netcdf(ncfile):
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
ncfile = write_netcdf('test1.nc',format=format)
ncfile = write_netcdf('test1.nc',format=format) # type: ignore
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))

# test diskless=True in nc_open
format='NETCDF3_CLASSIC'
format: Literal["NETCDF3_CLASSIC"] = 'NETCDF3_CLASSIC' # mypy should know this but it needs help...
trials=50
sys.stdout.write('test caching of file in memory on open for %s\n' % format)
sys.stdout.write('testing file format %s ...\n' % format)
Expand Down
105 changes: 0 additions & 105 deletions examples/type_guards.py

This file was deleted.

4 changes: 2 additions & 2 deletions src/netCDF4/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ class _VarDatatypeProperty:
def __get__(self, instance: Variable[str], owner: Any) -> VLType: ...
@overload
def __get__(
self, instance: Variable[Any], owner: Any
self, instance: Variable, owner: Any
) -> Any: ... # actual return type np.dtype | CompoundType | VLType | EnumType

class _VarDtypeProperty:
Expand All @@ -463,7 +463,7 @@ class _VarDtypeProperty:
@overload
def __get__(self, instance: Variable[str], owner: Any) -> type[str]: ...
@overload
def __get__(self, instance: Variable[Any], owner: Any) -> Any: ... # actual return type np.dtype | Type[str]
def __get__(self, instance: Variable, owner: Any) -> Any: ... # actual return type np.dtype | Type[str]

class Variable(Generic[VarT]):
# Overloads of __new__ are provided for some cases where the Variable's type may be statically inferred from the datatype arg
Expand Down
17 changes: 9 additions & 8 deletions test/test_compression.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,13 @@
from typing import TYPE_CHECKING, Any
from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from netCDF4.utils import _quantize
from numpy.testing import assert_almost_equal
import os, tempfile, unittest
import type_guards
if TYPE_CHECKING:
from netCDF4 import CompressionLevel
else:
CompressionLevel = Any

ndim = 100000
ndim2 = 100
Expand All @@ -15,8 +19,7 @@
lsd = 3

def write_netcdf(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
assert type_guards.valid_complevel(complevel) or complevel is None
chunksizes=None, complevel: CompressionLevel = 6, fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
foo = file.createVariable('data',\
Expand All @@ -32,9 +35,8 @@ def write_netcdf(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=F
#compression=''
#compression=0
#compression='gzip' # should fail
assert type_guards.valid_compression(compression) or compression is None
foo2 = file.createVariable('data2',\
dtype,('n'),compression=compression,least_significant_digit=least_significant_digit,\
foo2 = file.createVariable('data2',
dtype,('n'),compression=compression,least_significant_digit=least_significant_digit, # type: ignore # mypy doesn't like compression
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
foo[:] = data
foo2[:] = data
Expand All @@ -45,11 +47,10 @@ def write_netcdf(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=F
file.close()

def write_netcdf2(filename,zlib,least_significant_digit,data,dtype='f8',shuffle=False,contiguous=False,\
chunksizes=None,complevel=6,fletcher32=False):
chunksizes=None, complevel: CompressionLevel = 6, fletcher32=False):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
file.createDimension('n2', ndim2)
assert type_guards.valid_complevel(complevel) or complevel is None
foo = file.createVariable('data2',\
dtype,('n','n2'),zlib=zlib,least_significant_digit=least_significant_digit,\
shuffle=shuffle,contiguous=contiguous,complevel=complevel,fletcher32=fletcher32,chunksizes=chunksizes)
Expand Down
12 changes: 7 additions & 5 deletions test/test_compression_blosc.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
from typing import TYPE_CHECKING, Any, Literal
from numpy.random.mtrand import uniform
from netCDF4 import Dataset
import type_guards
from numpy.testing import assert_almost_equal
import os, tempfile, unittest, sys
from filter_availability import no_plugins, has_blosc_filter
import type_guards
if TYPE_CHECKING:
from netCDF4 import CompressionLevel
else:
CompressionLevel = Any


ndim = 100000
Expand All @@ -13,8 +16,7 @@
filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
datarr = uniform(size=(ndim,))

def write_netcdf(filename,dtype='f8',blosc_shuffle=1,complevel=6):
assert (type_guards.valid_complevel(complevel) or complevel is None) and type_guards.valid_bloscshuffle(blosc_shuffle)
def write_netcdf(filename, dtype='f8', blosc_shuffle: Literal[0, 1, 2] = 1, complevel: CompressionLevel = 6):
nc = Dataset(filename,'w')
nc.createDimension('n', ndim)
foo = nc.createVariable('data',\
Expand All @@ -41,7 +43,7 @@ def write_netcdf(filename,dtype='f8',blosc_shuffle=1,complevel=6):
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.filename = filename
write_netcdf(self.filename,complevel=iblosc_complevel,blosc_shuffle=iblosc_shuffle)
write_netcdf(self.filename,complevel=iblosc_complevel,blosc_shuffle=iblosc_shuffle) # type: ignore

def tearDown(self):
# Remove the temporary files
Expand Down
9 changes: 6 additions & 3 deletions test/test_compression_bzip2.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
from typing import TYPE_CHECKING, Any
from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from numpy.testing import assert_almost_equal
import os, tempfile, unittest, sys
from filter_availability import no_plugins, has_bzip2_filter
import type_guards
if TYPE_CHECKING:
from netCDF4 import CompressionLevel
else:
CompressionLevel = Any

ndim = 100000
filename1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
filename2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
array = uniform(size=(ndim,))

def write_netcdf(filename,dtype='f8',complevel=6):
assert type_guards.valid_complevel(complevel) or complevel is None
def write_netcdf(filename,dtype='f8',complevel: CompressionLevel = 6):
nc = Dataset(filename,'w')
nc.createDimension('n', ndim)
foo = nc.createVariable('data',\
Expand Down
10 changes: 7 additions & 3 deletions test/test_compression_quant.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
from typing import TYPE_CHECKING, Any
from numpy.random.mtrand import uniform
from netCDF4 import Dataset, __has_quantization_support__
from numpy.testing import assert_almost_equal
import numpy as np
import os, tempfile, unittest
import type_guards
if TYPE_CHECKING:
from netCDF4 import CompressionLevel, QuantizeMode
else:
CompressionLevel = Any
QuantizeMode = Any

ndim = 100000
nfiles = 7
Expand All @@ -14,10 +19,9 @@
complevel = 6

def write_netcdf(filename,zlib,significant_digits,data,dtype='f8',shuffle=False,\
complevel=6,quantize_mode="BitGroom"):
complevel: CompressionLevel = 6, quantize_mode: QuantizeMode = "BitGroom"):
file = Dataset(filename,'w')
file.createDimension('n', ndim)
assert (type_guards.valid_complevel(complevel) or complevel is None) and type_guards.valid_quantize_mode(quantize_mode)
foo = file.createVariable('data',\
dtype,('n'),zlib=zlib,significant_digits=significant_digits,\
shuffle=shuffle,complevel=complevel,quantize_mode=quantize_mode)
Expand Down
Loading

0 comments on commit 29fd359

Please sign in to comment.