Skip to content

Commit

Permalink
Reduce tolerance
Browse files Browse the repository at this point in the history
  • Loading branch information
marvinfriede committed Mar 28, 2024
1 parent 7ca4d71 commit 569fc81
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 24 deletions.
15 changes: 7 additions & 8 deletions test/test_grad/test_hessian.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,12 @@
from tad_dftd4 import dftd4
from tad_dftd4.typing import DD, Tensor

from ..conftest import DEVICE
from .samples_hessian import samples

sample_list = ["LiH", "SiH4", "PbH4-BiH3", "MB16_43_01"]

tol = 1e-8

device = None
tol = 1e-7


def test_fail() -> None:
Expand Down Expand Up @@ -72,10 +71,10 @@ def dummy(x: Tensor) -> Tensor:
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_single(dtype: torch.dtype, name: str) -> None:
dd: DD = {"device": device, "dtype": dtype}
dd: DD = {"device": DEVICE, "dtype": dtype}

sample = samples[name]
numbers = sample["numbers"].to(device)
numbers = sample["numbers"].to(DEVICE)
positions = sample["positions"].to(**dd)
charge = torch.tensor(0.0, **dd)

Expand Down Expand Up @@ -107,13 +106,13 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
def skip_test_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
dd: DD = {"device": device, "dtype": dtype}
dd: DD = {"device": DEVICE, "dtype": dtype}

sample1, sample2 = samples[name1], samples[name2]
numbers = pack(
[
sample1["numbers"].to(device),
sample2["numbers"].to(device),
sample1["numbers"].to(DEVICE),
sample2["numbers"].to(DEVICE),
]
)
positions = pack(
Expand Down
15 changes: 8 additions & 7 deletions test/test_grad/test_nan.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,10 @@
from tad_dftd4 import dftd4
from tad_dftd4.typing import DD

tol = 1e-8
from ..conftest import DEVICE

tol = 1e-7

device = None

# sample, which previously failed with NaN's in tad-dftd3
numbers = torch.tensor([6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1, 1, 1, 7, 8, 8, 8])
Expand Down Expand Up @@ -68,9 +69,9 @@
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
def test_single(dtype: torch.dtype) -> None:
dd: DD = {"device": device, "dtype": dtype}
dd: DD = {"device": DEVICE, "dtype": dtype}

nums = numbers.to(device=device)
nums = numbers.to(device=DEVICE)
pos = positions.to(**dd)
chrg = charge.to(**dd)
par = {k: v.to(**dd) for k, v in param.items()}
Expand All @@ -96,12 +97,12 @@ def test_single(dtype: torch.dtype) -> None:
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", ["LiH", "SiH4"])
def test_batch(dtype: torch.dtype, name: str) -> None:
dd: DD = {"device": device, "dtype": dtype}
dd: DD = {"device": DEVICE, "dtype": dtype}

nums = pack(
(
numbers.to(device=device),
samples[name]["numbers"].to(device=device),
numbers.to(device=DEVICE),
samples[name]["numbers"].to(device=DEVICE),
)
)
pos = pack(
Expand Down
16 changes: 8 additions & 8 deletions test/test_grad/test_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,21 +28,21 @@
from tad_dftd4 import dftd4
from tad_dftd4.typing import DD, Callable, Tensor

sample_list = ["LiH", "AmF3", "SiH4"]
from ..conftest import DEVICE

tol = 1e-8
sample_list = ["LiH", "AmF3", "SiH4"]

device = None
tol = 1e-7


def gradchecker(dtype: torch.dtype, name: str) -> tuple[
Callable[[Tensor, Tensor, Tensor, Tensor], Tensor], # autograd function
tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor],
]:
dd: DD = {"device": device, "dtype": dtype}
dd: DD = {"device": DEVICE, "dtype": dtype}

sample = samples[name]
numbers = sample["numbers"].to(device=device)
numbers = sample["numbers"].to(device=DEVICE)
positions = sample["positions"].to(**dd)
charge = torch.tensor(0.0, **dd)

Expand Down Expand Up @@ -104,13 +104,13 @@ def gradchecker_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[
Callable[[Tensor, Tensor, Tensor, Tensor], Tensor], # autograd function
tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor],
]:
dd: DD = {"device": device, "dtype": dtype}
dd: DD = {"device": DEVICE, "dtype": dtype}

sample1, sample2 = samples[name1], samples[name2]
numbers = pack(
[
sample1["numbers"].to(device=device),
sample2["numbers"].to(device=device),
sample1["numbers"].to(device=DEVICE),
sample2["numbers"].to(device=DEVICE),
]
)
positions = pack(
Expand Down
2 changes: 1 addition & 1 deletion test/test_grad/test_pos.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

sample_list = ["LiH", "SiH4", "PbH4-BiH3", "MB16_43_01"]

tol = 1e-8
tol = 1e-7


def gradchecker(dtype: torch.dtype, name: str) -> tuple[
Expand Down

0 comments on commit 569fc81

Please sign in to comment.