-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
85e726e
commit 7328c76
Showing
8 changed files
with
976 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,179 @@ | ||
""" | ||
This class provides tools for normalizing features to have more | ||
desirable behavior under uniform scaling. | ||
""" | ||
from abc import ABC, abstractmethod | ||
|
||
import numpy as np | ||
|
||
from ciderpress.dft.settings import get_cider_exponent | ||
|
||
|
||
def regularize_cider_exponent_(a, dadrho, dadsigma, dadtau, amin): | ||
cond = a < amin | ||
a[cond] = amin * np.exp(a[cond] / amin - 1) | ||
grad_damp = a[cond] * np.exp(a[cond] / amin - 1) | ||
dadrho[cond] *= grad_damp | ||
dadsigma[cond] *= grad_damp | ||
dadtau[cond] *= grad_damp | ||
|
||
|
||
class FeatNormalizer(ABC): | ||
@abstractmethod | ||
def evaluate(self, rho, sigma, tau): | ||
pass | ||
|
||
@abstractmethod | ||
def evaluate_with_grad(self, rho, sigma, tau): | ||
pass | ||
|
||
def apply_norm_fwd_(self, feat, rho, sigma, tau): | ||
feat[:] *= self.evaluate(rho, sigma, tau) | ||
|
||
def apply_norm_bwd_(self, vfeat, feat, rho, sigma, tau, vrho, vsigma, vtau): | ||
c, d1, d2, d3 = self.evaluate_with_grad(rho, sigma, tau) | ||
vfeat[:] *= c | ||
vrho[:] += d1 * vfeat * feat | ||
vsigma[:] += d2 * vfeat * feat | ||
vtau[:] += d3 * vfeat * feat | ||
|
||
@abstractmethod | ||
def get_usp(self): | ||
pass | ||
|
||
|
||
class ConstantNormalizer(FeatNormalizer): | ||
def __init__(self, const): | ||
self.const = const | ||
|
||
def evaluate(self, rho, sigma, tau): | ||
res = np.ones_like(rho) | ||
res[:] *= self.const | ||
return res | ||
|
||
def evaluate_with_grad(self, rho, sigma, tau): | ||
res = self.evaluate(rho, sigma, tau) | ||
dres = np.zeros_like(rho) | ||
return res, dres, dres, dres | ||
|
||
def get_usp(self): | ||
return 0 | ||
|
||
|
||
class DensityPowerNormalizer(FeatNormalizer): | ||
def __init__(self, const, power, cutoff=1e-10): | ||
self.const = const | ||
self.power = power | ||
self.cutoff = cutoff | ||
|
||
def evaluate(self, rho, sigma, tau): | ||
rho = np.maximum(self.cutoff, rho) | ||
return self.const * rho**self.power | ||
|
||
def evaluate_with_grad(self, rho, sigma, tau): | ||
cond = rho < self.cutoff | ||
rho = np.maximum(self.cutoff, rho) | ||
res = self.const * rho**self.power | ||
dres = self.power * res / rho | ||
dres[cond] = 0 | ||
zeros = np.zeros_like(res) | ||
return res, dres, zeros, zeros | ||
|
||
def get_usp(self): | ||
return 3 * self.power | ||
|
||
|
||
class CiderExponentNormalizer(FeatNormalizer): | ||
def __init__(self, const1, const2, const3, power, cutoff=1e-10): | ||
self.const1 = const1 | ||
self.const2 = const2 | ||
self.const3 = const3 | ||
self.power = power | ||
self.cutoff = cutoff | ||
|
||
def evaluate(self, rho, sigma, tau): | ||
rho = np.maximum(self.cutoff, rho) | ||
return ( | ||
get_cider_exponent( | ||
rho, | ||
sigma, | ||
tau, | ||
a0=self.const1, | ||
grad_mul=self.const2, | ||
tau_mul=self.const3, | ||
rhocut=self.cutoff, | ||
nspin=1, | ||
)[0] | ||
** self.power | ||
) | ||
|
||
def evaluate_with_grad(self, rho, sigma, tau): | ||
cond = rho < self.cutoff | ||
res0, d1, d2, d3 = get_cider_exponent( | ||
rho, | ||
sigma, | ||
tau, | ||
a0=self.const1, | ||
grad_mul=self.const2, | ||
tau_mul=self.const3, | ||
rhocut=self.cutoff, | ||
nspin=1, | ||
) | ||
res = res0**self.power | ||
tmp = self.power * res0 ** (self.power - 1) | ||
d1 *= tmp | ||
d2 *= tmp | ||
d3 *= tmp | ||
d1[cond] = 0 | ||
d2[cond] = 0 | ||
d3[cond] = 0 | ||
return res, d1, d2, d3 | ||
|
||
def get_usp(self): | ||
return 2 * self.power | ||
|
||
|
||
class GeneralNormalizer(FeatNormalizer): | ||
def __init__(self, const1, const2, const3, power1, power2, cutoff=1e-10): | ||
self.norm1 = CiderExponentNormalizer(const1, const2, const3, power1, cutoff) | ||
self.norm2 = DensityPowerNormalizer(1.0, power2, cutoff) | ||
|
||
def evaluate(self, rho, sigma, tau): | ||
return self.norm1.evaluate(rho, sigma, tau) * self.norm2.evaluate( | ||
rho, sigma, tau | ||
) | ||
|
||
def evaluate_with_grad(self, rho, sigma, tau): | ||
n1, d1a, d1b, d1c = self.norm1.evaluate_with_grad(rho, sigma, tau) | ||
n2, d2a, d2b, d2c = self.norm2.evaluate_with_grad(rho, sigma, tau) | ||
return ( | ||
n1 * n2, | ||
n1 * d2a + n2 * d1a, | ||
n1 * d2b + n2 * d1b, | ||
n1 * d2c + n2 * d1c, | ||
) | ||
|
||
def get_usp(self): | ||
return self.norm1.get_usp() + self.norm2.get_usp() | ||
|
||
|
||
class FeatNormalizerList: | ||
def __init__(self, normalizers): | ||
""" | ||
Args: | ||
normalizers (list[FeatNormalizer]): list of feature normalizers | ||
""" | ||
self._normalizers = normalizers | ||
|
||
@property | ||
def nfeat(self): | ||
return len(self._normalizers) | ||
|
||
def get_usps(self): | ||
return [n.get_usp() for n in self._normalizers] | ||
|
||
def __getitem__(self, item): | ||
return self._normalizers[item] | ||
|
||
def __setitem__(self, key, value): | ||
raise RuntimeError("Cannot set element of normalizer list") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,154 @@ | ||
import unittest | ||
|
||
import numpy as np | ||
from numpy.testing import assert_almost_equal | ||
|
||
from ciderpress.dft.feat_normalizer import ( | ||
CFC, | ||
ConstantNormalizer, | ||
DensityNormalizer, | ||
FeatNormalizerList, | ||
get_normalizer_from_exponent_params, | ||
) | ||
from ciderpress.dft.settings import get_alpha, get_s2 | ||
from ciderpress.dft.tests import debug_normalizers as rho_normalizers | ||
|
||
DELTA = 1e-7 | ||
|
||
|
||
def eval_fd(func, inps, i): | ||
inps = [v.copy() for v in inps] | ||
inps[i] += 0.5 * DELTA | ||
fp = func(*inps) | ||
inps[i] -= DELTA | ||
fm = func(*inps) | ||
return (fp - fm) / DELTA | ||
|
||
|
||
class TestFeatNormalizer(unittest.TestCase): | ||
|
||
_x = None | ||
norm_list: FeatNormalizerList = None | ||
ref_norm_list: rho_normalizers.FeatNormalizerList = None | ||
|
||
@classmethod | ||
def setUpClass(cls): | ||
N = 50 | ||
cls._x = np.linspace(0, 1, N) | ||
np.random.seed(53) | ||
directions = 0.25 * np.random.normal(size=(3, N)) | ||
np.random.seed(53) | ||
alpha = np.random.normal(size=N) ** 2 | ||
rho = 1 + 0.5 * np.sin(np.pi * cls._x) | ||
drho = directions * rho | ||
tauw = np.einsum("xg,xg->g", drho, drho) / (8 * rho) | ||
tau = tauw + alpha * CFC * rho ** (5.0 / 3) | ||
rho_data = np.concatenate([[rho], drho, [tau]], axis=0) | ||
assert rho_data.shape[0] == 5 | ||
cls.rho = rho_data | ||
sigma = np.einsum("xg,xg->g", drho, drho) | ||
cls.norm_list = FeatNormalizerList( | ||
[ | ||
None, | ||
None, | ||
None, | ||
ConstantNormalizer(3.14), | ||
DensityNormalizer(1.2, 0.75), | ||
get_normalizer_from_exponent_params(0.0, 1.5, 1.0, 0.03125), | ||
get_normalizer_from_exponent_params( | ||
0.75, 1.5, 1.0 * 1.2, 0.03125 * 1.2 | ||
), | ||
] | ||
) | ||
np.random.seed(72) | ||
feat = np.random.normal(size=(cls.norm_list.nfeat - 3, N)) | ||
dfeat = np.random.normal(size=(cls.norm_list.nfeat, N)) | ||
cls.x = np.stack( | ||
[ | ||
rho, | ||
get_s2(rho, sigma), | ||
get_alpha(rho, sigma, tau), | ||
] | ||
+ [feat[i] for i in range(feat.shape[0])] | ||
)[None, :, :] | ||
cls.dx = dfeat | ||
cls.ref_norm_list = rho_normalizers.FeatNormalizerList( | ||
[ | ||
rho_normalizers.ConstantNormalizer(3.14), | ||
rho_normalizers.DensityPowerNormalizer(1.2, 0.75), | ||
rho_normalizers.CiderExponentNormalizer(1.0, 0.0, 0.03125, 1.5), | ||
rho_normalizers.GeneralNormalizer( | ||
1.0 * 1.2, 0.0, 0.03125 * 1.2, 1.5, 0.75 | ||
), | ||
] | ||
) | ||
|
||
def test_evaluate(self): | ||
rho_data = self.rho.copy() | ||
rho = rho_data[0] | ||
sigma = np.einsum("xg,xg->g", rho_data[1:4], rho_data[1:4]) | ||
tau = rho_data[4] | ||
initial_vals = [] | ||
|
||
feat = self.norm_list.get_normalized_feature_vector(self.x) | ||
dfeat = self.norm_list.get_derivative_of_normed_features(self.x[0], self.dx) | ||
featp = self.norm_list.get_normalized_feature_vector(self.x + DELTA * self.dx) | ||
featm = self.norm_list.get_normalized_feature_vector(self.x - DELTA * self.dx) | ||
dfeat_ref = (featp - featm) / (2 * DELTA) | ||
for i in range(self.norm_list.nfeat): | ||
assert_almost_equal(dfeat[i], dfeat_ref[0, i]) | ||
v = 2 * feat | ||
vfeat = self.norm_list.get_derivative_wrt_unnormed_features(self.x, v) | ||
for i in range(self.norm_list.nfeat): | ||
x = self.x.copy() | ||
x[:, i] += 0.5 * DELTA | ||
featp = self.norm_list.get_normalized_feature_vector(x) | ||
ep = (featp**2).sum(axis=1) | ||
x[:, i] -= DELTA | ||
featm = self.norm_list.get_normalized_feature_vector(x) | ||
em = (featm**2).sum(axis=1) | ||
assert_almost_equal(vfeat[:, i], (ep - em) / DELTA, 5) | ||
|
||
for i in range(self.ref_norm_list.nfeat): | ||
res0 = self.ref_norm_list[i].evaluate(rho, sigma, tau) | ||
res, drdn, drds, drdt = self.ref_norm_list[i].evaluate_with_grad( | ||
rho, sigma, tau | ||
) | ||
inps = [rho, sigma, tau] | ||
drdn_ref = eval_fd(self.ref_norm_list[i].evaluate, inps, 0) | ||
drds_ref = eval_fd(self.ref_norm_list[i].evaluate, inps, 1) | ||
drdt_ref = eval_fd(self.ref_norm_list[i].evaluate, inps, 2) | ||
assert_almost_equal(res, res0) | ||
assert_almost_equal(feat[0, i + 3], res0 * self.x[0, 3 + i]) | ||
assert_almost_equal(drdn, drdn_ref) | ||
assert_almost_equal(drds, drds_ref) | ||
assert_almost_equal(drdt, drdt_ref) | ||
initial_vals.append((res, drdn, drds, drdt)) | ||
|
||
lambd = 1.7 | ||
rho = lambd**3 * rho_data[0] | ||
sigma = lambd**8 * np.einsum("xg,xg->g", rho_data[1:4], rho_data[1:4]) | ||
tau = lambd**5 * rho_data[4] | ||
for i in range(self.ref_norm_list.nfeat): | ||
res, drdn, drds, drdt = self.ref_norm_list[i].evaluate_with_grad( | ||
rho, sigma, tau | ||
) | ||
res_ref, drdn_ref, drds_ref, drdt_ref = initial_vals[i] | ||
usp = self.ref_norm_list[i].get_usp() | ||
assert_almost_equal(res, lambd**usp * res_ref) | ||
assert_almost_equal(drdn, lambd ** (usp - 3) * drdn_ref) | ||
assert_almost_equal(drds, lambd ** (usp - 8) * drds_ref) | ||
assert_almost_equal(drdt, lambd ** (usp - 5) * drdt_ref) | ||
|
||
x = self.x.copy() | ||
x[:, 0] *= lambd**3 | ||
scaled_feat = self.norm_list.get_normalized_feature_vector(x) | ||
usps = np.array(self.norm_list.get_usps()) | ||
usps[0] = 3 | ||
scaled_ref = lambd ** (usps[None, :, None]) * feat | ||
for i in range(scaled_ref.shape[1]): | ||
assert_almost_equal(scaled_feat[:, i], scaled_ref[:, i]) | ||
|
||
|
||
if __name__ == "__main__": | ||
unittest.main() |
Oops, something went wrong.