diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 810582cc..a3b9c57c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -9,7 +9,7 @@ on: jobs: build: - name: python "3.10" on ubuntu-latest + name: python "3.11" on ubuntu-latest runs-on: ubuntu-latest strategy: @@ -20,7 +20,7 @@ jobs: - name: Set up python uses: actions/setup-python@v2 with: - python-version: "3.10" + python-version: "3.11" - name: Upgrade pip run: | python -m pip install --upgrade pip diff --git a/examples/03-qsgw.py b/examples/03-qsgw.py index 2a7706b2..3a18a50b 100644 --- a/examples/03-qsgw.py +++ b/examples/03-qsgw.py @@ -34,3 +34,12 @@ gw = qsGW(mf) gw.solver_options = dict(optimise_chempot=True, fock_loop=True) gw.kernel(nmom_max=3) + +# qsGW finds a self-consistent Green's function in the space defined +# by the moment expansion, and via quasiparticle self-consistency +# obtains a (different) set of single particle Koopmans states. Both +# of these can be accessed via the `gw` object: +print("QP energies:") +print(gw.qp_energy) +print("GF energies:") +print(gw.gf.energy) diff --git a/examples/11-srg_qsgw_s_parameter.py b/examples/11-srg_qsgw_s_parameter.py index 02c657e2..b5741016 100644 --- a/examples/11-srg_qsgw_s_parameter.py +++ b/examples/11-srg_qsgw_s_parameter.py @@ -40,7 +40,7 @@ # GW gw = GW(mf) -_, gf, se = gw.kernel(nmom_max) +_, gf, se, _ = gw.kernel(nmom_max) gf.remove_uncoupled(tol=0.8) if which == "ip": gw_eta = -gf.get_occupied().energy.max() * HARTREE2EV @@ -50,12 +50,12 @@ # qsGW gw = qsGW(mf) gw.eta = 0.05 -_, gf, se = gw.kernel(nmom_max) +_, gf, se, _ = gw.kernel(nmom_max) gf.remove_uncoupled(tol=0.8) if which == "ip": - qsgw_eta = -gf.get_occupied().energy.max() * HARTREE2EV + qsgw_eta = -np.max(gw.qp_energy[mf.mo_occ > 0]) * HARTREE2EV else: - qsgw_eta = gf.get_virtual().energy.min() * HARTREE2EV + qsgw_eta = np.min(gw.qp_energy[mf.mo_occ == 0]) * HARTREE2EV # SRG-qsGW s_params = sorted(list(data.keys()))[::-1] @@ -72,16 +72,16 @@ gw.diis_space = 10 gw.conv_tol = 1e-5 gw.conv_tol_moms = 1 - conv, gf, se = gw.kernel(nmom_max, moments=moments) + conv, gf, se, _ = gw.kernel(nmom_max, moments=moments) moments = ( se.get_occupied().moment(range(nmom_max+1)), se.get_virtual().moment(range(nmom_max+1)), ) gf.remove_uncoupled(tol=0.8) if which == "ip": - qsgw_srg.append(-gf.get_occupied().energy.max() * HARTREE2EV) + qsgw_srg.append(-np.max(gw.qp_energy[mf.mo_occ > 0]) * HARTREE2EV) else: - qsgw_srg.append(gf.get_virtual().energy.min() * HARTREE2EV) + qsgw_srg.append(np.min(gw.qp_energy[mf.mo_occ == 0]) * HARTREE2EV) qsgw_srg = np.array(qsgw_srg) diff --git a/examples/30-ktda.py b/examples/30-ktda.py new file mode 100644 index 00000000..8704d0bc --- /dev/null +++ b/examples/30-ktda.py @@ -0,0 +1,44 @@ +"""Example of running k-space GW calculations with dTDA screening. +""" + +import numpy as np +from pyscf.pbc import gto, dft +from momentGW.pbc.gw import KGW +from momentGW.pbc.qsgw import qsKGW +from momentGW.pbc.evgw import evKGW +from momentGW.pbc.scgw import scKGW + +cell = gto.Cell() +cell.atom = "He 0 0 0; He 1 1 1" +cell.a = np.eye(3) * 3 +cell.basis = "6-31g" +cell.verbose = 5 +cell.build() + +kpts = cell.make_kpts([2, 2, 2]) + +mf = dft.KRKS(cell, kpts) +mf = mf.density_fit() +mf.xc = "hf" +mf.kernel() + +# KGW +gw = KGW(mf) +gw.polarizability = "dtda" +gw.kernel(nmom_max=5) + +# qsKGW +gw = qsKGW(mf) +gw.polarizability = "dtda" +gw.srg = 100 +gw.kernel(nmom_max=1) + +# evKGW +gw = evKGW(mf) +gw.polarizability = "dtda" +gw.kernel(nmom_max=1) + +# scKGW +gw = scKGW(mf) +gw.polarizability = "dtda" +gw.kernel(nmom_max=1) diff --git a/examples/31-kpt_interpolation.py b/examples/31-kpt_interpolation.py new file mode 100644 index 00000000..cee0de1a --- /dev/null +++ b/examples/31-kpt_interpolation.py @@ -0,0 +1,55 @@ +"""Example of interpolation of a GW calculation onto a new k-point mesh. +""" + +import numpy as np +import matplotlib.pyplot as plt +from pyscf.pbc import gto, dft +from momentGW.pbc.gw import KGW + +cell = gto.Cell() +cell.atom = "H 0 0 0; H 0 0 1" +cell.a = np.array([[25, 0, 0], [0, 25, 0], [0, 0, 2]]) +cell.basis = "sto6g" +cell.max_memory = 1e10 +cell.verbose = 5 +cell.build() + +kmesh1 = [1, 1, 3] +kmesh2 = [1, 1, 9] +kpts1 = cell.make_kpts(kmesh1) +kpts2 = cell.make_kpts(kmesh2) + +mf1 = dft.KRKS(cell, kpts1, xc="hf") +mf1 = mf1.density_fit(auxbasis="weigend") +mf1.exxdiv = None +mf1.conv_tol = 1e-10 +mf1.kernel() + +mf2 = dft.KRKS(cell, kpts2, xc="hf") +mf2 = mf2.density_fit(mf1.with_df.auxbasis) +mf2.exxdiv = mf1.exxdiv +mf2.conv_tol = mf1.conv_tol +mf2.kernel() + +gw1 = KGW(mf1) +gw1.polarizability = "dtda" +gw1.kernel(5) + +gw2 = gw1.interpolate(mf2, 5) + +e1 = gw1.qp_energy +e2 = gw2.qp_energy + + +def get_xy(kpts, e): + kpts = kpts.wrap_around(kpts._kpts)[:, 2] + arg = np.argsort(kpts) + return kpts[arg], np.array(e)[arg] + +plt.figure() +plt.plot(*get_xy(gw1.kpts, e1), "C0.-", label="original") +plt.plot(*get_xy(gw2.kpts, e2), "C2.-", label="interpolated") +handles, labels = plt.gca().get_legend_handles_labels() +by_label = dict(zip(labels, handles)) +plt.legend(by_label.values(), by_label.keys()) +plt.show() diff --git a/momentGW/__init__.py b/momentGW/__init__.py index bee02b40..3adbdde3 100644 --- a/momentGW/__init__.py +++ b/momentGW/__init__.py @@ -52,3 +52,7 @@ from momentGW.evgw import evGW from momentGW.scgw import scGW from momentGW.qsgw import qsGW +from momentGW.pbc.gw import KGW +from momentGW.pbc.evgw import evKGW +from momentGW.pbc.qsgw import qsKGW +from momentGW.pbc.scgw import scKGW diff --git a/momentGW/base.py b/momentGW/base.py index 794fd5b7..b7b9efe3 100644 --- a/momentGW/base.py +++ b/momentGW/base.py @@ -2,6 +2,8 @@ Base class for moment-constrained GW solvers. """ +import warnings + import numpy as np from pyscf import lib from pyscf.agf2 import mpi_helper @@ -82,6 +84,9 @@ def __init__(self, mf, **kwargs): self.stdout = self.mol.stdout self.max_memory = 1e10 + if kwargs.pop("vhf_df", None) is not None: + warnings.warn("Keyword argument vhf_df is deprecated.", DeprecationWarning) + for key, val in kwargs.items(): if not hasattr(self, key): raise AttributeError("%s has no attribute %s", self.name, key) @@ -97,6 +102,7 @@ def __init__(self, mf, **kwargs): self.converged = None self.se = None self.gf = None + self._qp_energy = None self._keys = set(self.__dict__.keys()).union(self._opts) @@ -118,10 +124,60 @@ def build_se_moments(self, *args, **kwargs): def solve_dyson(self, *args, **kwargs): raise NotImplementedError + def _kernel(self, *args, **kwargs): + raise NotImplementedError + + def kernel( + self, + nmom_max, + mo_energy=None, + mo_coeff=None, + moments=None, + integrals=None, + ): + if mo_coeff is None: + mo_coeff = self.mo_coeff + if mo_energy is None: + mo_energy = self.mo_energy + + cput0 = (logger.process_clock(), logger.perf_counter()) + self.dump_flags() + logger.info(self, "nmom_max = %d", nmom_max) + + self.converged, self.gf, self.se, self._qp_energy = self._kernel( + nmom_max, + mo_energy, + mo_coeff, + integrals=integrals, + ) + + gf_occ = self.gf.get_occupied() + gf_occ.remove_uncoupled(tol=1e-1) + for n in range(min(5, gf_occ.naux)): + en = -gf_occ.energy[-(n + 1)] + vn = gf_occ.coupling[:, -(n + 1)] + qpwt = np.linalg.norm(vn) ** 2 + logger.note(self, "IP energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) + + gf_vir = self.gf.get_virtual() + gf_vir.remove_uncoupled(tol=1e-1) + for n in range(min(5, gf_vir.naux)): + en = gf_vir.energy[n] + vn = gf_vir.coupling[:, n] + qpwt = np.linalg.norm(vn) ** 2 + logger.note(self, "EA energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) + + logger.timer(self, self.name, *cput0) + + return self.converged, self.gf, self.se, self.qp_energy + @staticmethod def _moment_error(t, t_prev): """Compute scaled error between moments.""" + if t_prev is None: + t_prev = np.zeros_like(t) + error = 0 for a, b in zip(t, t_prev): a = a / max(np.max(np.abs(a)), 1) @@ -141,6 +197,63 @@ def _gf_to_occ(gf): return occ + @staticmethod + def _gf_to_energy(gf): + """Return the `energy` attribute of a `gf`. Allows hooking in + `pbc` methods to retain syntax. + """ + return gf.energy + + @staticmethod + def _gf_to_coupling(gf): + """Return the `coupling` attribute of a `gf`. Allows hooking in + `pbc` methods to retain syntax. + """ + return gf.coupling + + def _gf_to_mo_energy(self, gf): + """Find the poles of a GF which best overlap with the MOs. + + Parameters + ---------- + gf : GreensFunction + Green's function object. + + Returns + ------- + mo_energy : ndarray + Updated MO energies. + """ + + check = set() + mo_energy = np.zeros_like(self.mo_energy) + + for i in range(self.nmo): + arg = np.argmax(gf.coupling[i] ** 2) + mo_energy[i] = gf.energy[arg] + check.add(arg) + + if len(check) != self.nmo: + logger.warn(self, "Inconsistent quasiparticle weights!") + + return mo_energy + + @property + def qp_energy(self): + """ + Return the quasiparticle energies. For most GW methods, this + simply consists of the poles of the `self.gf` that best + overlap with the MOs, in order. In some methods such as qsGW, + these two quantities are not the same. + """ + + if self._qp_energy is not None: + return self._qp_energy + + qp_energy = self._gf_to_mo_energy(self.gf) + + return qp_energy + @property def mol(self): return self._scf.mol @@ -151,6 +264,17 @@ def with_df(self): raise ValueError("GW solvers require density fitting.") return self._scf.with_df + @property + def has_fock_loop(self): + """ + Returns a boolean indicating whether the solver requires a Fock + loop. For most GW methods, this is simply `self.fock_loop`. In + some methods such as qsGW, a Fock loop is required with or + without `self.fock_loop` for the quasiparticle self-consistency, + with this property acting as a hook to indicate this. + """ + return self.fock_loop + get_nmo = get_nmo get_nocc = get_nocc get_frozen_mask = get_frozen_mask diff --git a/momentGW/evgw.py b/momentGW/evgw.py index 35bd9e11..0f9949a4 100644 --- a/momentGW/evgw.py +++ b/momentGW/evgw.py @@ -50,6 +50,9 @@ def kernel( Green's function object se : pyscf.agf2.SelfEnergy Self-energy object + qp_energy : numpy.ndarray + Quasiparticle energies. Always None for evGW, returned for + compatibility with other evGW methods. """ logger.warn(gw, "evGW is untested!") @@ -60,8 +63,6 @@ def kernel( if integrals is None: integrals = gw.ao2mo() - nmo = gw.nmo - nocc = gw.nocc mo_energy = mo_energy.copy() mo_energy_ref = mo_energy.copy() @@ -76,7 +77,7 @@ def kernel( ) conv = False - th_prev = tp_prev = np.zeros((nmom_max + 1, nmo, nmo)) + th_prev = tp_prev = None for cycle in range(1, gw.max_cycle + 1): logger.info(gw, "%s iteration %d", gw.name, cycle) @@ -95,12 +96,12 @@ def kernel( # Extrapolate the moments try: - th, tp = diis.update_with_scaling(np.array((th, tp)), (2, 3)) + th, tp = diis.update_with_scaling(np.array((th, tp)), (-2, -1)) except: logger.debug(gw, "DIIS step failed at iteration %d", cycle) # Damp the moments - if gw.damping != 0.0: + if gw.damping != 0.0 and cycle > 1: th = gw.damping * th_prev + (1.0 - gw.damping) * th tp = gw.damping * tp_prev + (1.0 - gw.damping) * tp @@ -108,34 +109,17 @@ def kernel( gf, se = gw.solve_dyson(th, tp, se_static, integrals=integrals) # Update the MO energies - check = set() mo_energy_prev = mo_energy.copy() - for i in range(nmo): - arg = np.argmax(gf.coupling[i] ** 2) - mo_energy[i] = gf.energy[arg] - check.add(arg) - if len(check) != nmo: - logger.warn(gw, "Inconsistent quasiparticle weights!") + mo_energy = gw._gf_to_mo_energy(gf) # Check for convergence - error_homo = abs(mo_energy[nocc - 1] - mo_energy_prev[nocc - 1]) - error_lumo = abs(mo_energy[nocc] - mo_energy_prev[nocc]) - error_th = gw._moment_error(th, th_prev) - error_tp = gw._moment_error(tp, tp_prev) + conv = gw.check_convergence(mo_energy, mo_energy_prev, th, th_prev, tp, tp_prev) th_prev = th.copy() tp_prev = tp.copy() - logger.info(gw, "Change in QPs: HOMO = %.6g LUMO = %.6g", error_homo, error_lumo) - logger.info(gw, "Change in moments: occ = %.6g vir = %.6g", error_th, error_tp) - if gw.conv_logical( - ( - max(error_homo, error_lumo) < gw.conv_tol, - max(error_th, error_tp) < gw.conv_tol_moms, - ) - ): - conv = True + if conv: break - return conv, gf, se + return conv, gf, se, None class evGW(GW): @@ -195,52 +179,28 @@ class evGW(GW): def name(self): return "evG%sW%s" % ("0" if self.g0 else "", "0" if self.w0 else "") - def kernel( - self, - nmom_max, - mo_energy=None, - mo_coeff=None, - moments=None, - integrals=None, - ): - if mo_coeff is None: - mo_coeff = self.mo_coeff - if mo_energy is None: - mo_energy = self.mo_energy - - cput0 = (logger.process_clock(), logger.perf_counter()) - self.dump_flags() - logger.info(self, "nmom_max = %d", nmom_max) - - self.converged, self.gf, self.se = kernel( - self, - nmom_max, - mo_energy, - mo_coeff, - integrals=integrals, - ) + _kernel = kernel - gf_occ = self.gf.get_occupied() - gf_occ.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_occ.naux)): - en = -gf_occ.energy[-(n + 1)] - vn = gf_occ.coupling[:, -(n + 1)] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "IP energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - gf_vir = self.gf.get_virtual() - gf_vir.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_vir.naux)): - en = gf_vir.energy[n] - vn = gf_vir.coupling[:, n] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "EA energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - if self.converged: - logger.note(self, "%s converged", self.name) - else: - logger.note(self, "%s failed to converge", self.name) + def check_convergence(self, mo_energy, mo_energy_prev, th, th_prev, tp, tp_prev): + """Check for convergence, and print a summary of changes.""" + + if th_prev is None: + th_prev = np.zeros_like(th) + if tp_prev is None: + tp_prev = np.zeros_like(tp) - logger.timer(self, self.name, *cput0) + error_homo = abs(mo_energy[self.nocc - 1] - mo_energy_prev[self.nocc - 1]) + error_lumo = abs(mo_energy[self.nocc] - mo_energy_prev[self.nocc]) - return self.converged, self.gf, self.se + error_th = self._moment_error(th, th_prev) + error_tp = self._moment_error(tp, tp_prev) + + logger.info(self, "Change in QPs: HOMO = %.6g LUMO = %.6g", error_homo, error_lumo) + logger.info(self, "Change in moments: occ = %.6g vir = %.6g", error_th, error_tp) + + return self.conv_logical( + ( + max(error_homo, error_lumo) < self.conv_tol, + max(error_th, error_tp) < self.conv_tol_moms, + ) + ) diff --git a/momentGW/gw.py b/momentGW/gw.py index 7afb76af..4d91b190 100644 --- a/momentGW/gw.py +++ b/momentGW/gw.py @@ -53,12 +53,15 @@ def kernel( Returns ------- conv : bool - Convergence flag. Always True for AGW, returned for + Convergence flag. Always True for GW, returned for compatibility with other GW methods. gf : pyscf.agf2.GreensFunction Green's function object se : pyscf.agf2.SelfEnergy Self-energy object + qp_energy : numpy.ndarray + Quasiparticle energies. Always None for GW, returned for + compatibility with other GW methods. """ if integrals is None: @@ -85,7 +88,7 @@ def kernel( gf, se = gw.solve_dyson(th, tp, se_static, integrals=integrals) conv = True - return conv, gf, se + return conv, gf, se, None class GW(BaseGW): @@ -98,6 +101,8 @@ class GW(BaseGW): def name(self): return "G0W0" + _kernel = kernel + def build_se_static(self, integrals, mo_coeff=None, mo_energy=None): """Build the static part of the self-energy, including the Fock matrix. @@ -126,7 +131,7 @@ def build_se_static(self, integrals, mo_coeff=None, mo_energy=None): mo_energy = self.mo_energy if getattr(self._scf, "xc", "hf") == "hf": - se_static = np.zeros((self.nmo, self.nmo)) + se_static = np.zeros_like(self._scf.make_rdm1(mo_coeff=mo_coeff)) else: with util.SilentSCF(self._scf): vmf = self._scf.get_j() - self._scf.get_veff() @@ -134,12 +139,12 @@ def build_se_static(self, integrals, mo_coeff=None, mo_energy=None): vk = integrals.get_k(dm, basis="ao") se_static = vmf - vk * 0.5 - se_static = lib.einsum("pq,pi,qj->ij", se_static, mo_coeff, mo_coeff) + se_static = lib.einsum("...pq,...pi,...qj->...ij", se_static, mo_coeff, mo_coeff) if self.diagonal_se: - se_static = np.diag(np.diag(se_static)) + se_static = lib.einsum("...pq,pq->...pq", se_static, np.eye(se_static.shape[-1])) - se_static += np.diag(mo_energy) + se_static += lib.einsum("...p,...pq->...pq", mo_energy, np.eye(se_static.shape[-1])) return se_static @@ -180,7 +185,7 @@ def build_se_moments(self, nmom_max, integrals, **kwargs): else: raise NotImplementedError - def ao2mo(self): + def ao2mo(self, transform=True): """Get the integrals.""" integrals = Integrals( @@ -189,9 +194,10 @@ def ao2mo(self): self.mo_occ, compression=self.compression, compression_tol=self.compression_tol, - store_full=self.fock_loop, + store_full=self.has_fock_loop, ) - integrals.transform() + if transform: + integrals.transform() return integrals @@ -256,6 +262,7 @@ def solve_dyson(self, se_moments_hole, se_moments_part, se_static, integrals=Non gf.coupling = mpi_helper.bcast(gf.coupling, root=0) if self.fock_loop: + # TODO remove these try...except try: gf, se, conv = fock_loop(self, gf, se, integrals=integrals, **self.fock_opts) except IndexError: @@ -269,7 +276,7 @@ def solve_dyson(self, se_moments_hole, se_moments_part, se_static, integrals=Non ) except IndexError: cpt = gf.chempot - error = np.trace(gf.make_rdm1()) - gw.nocc * 2 + error = np.trace(gf.make_rdm1()) - self.nocc * 2 se.chempot = cpt gf.chempot = cpt @@ -300,48 +307,3 @@ def moment_error(self, se_moments_hole, se_moments_part, se): ) return eh, ep - - def kernel( - self, - nmom_max, - mo_energy=None, - mo_coeff=None, - moments=None, - integrals=None, - ): - if mo_coeff is None: - mo_coeff = self.mo_coeff - if mo_energy is None: - mo_energy = self.mo_energy - - cput0 = (logger.process_clock(), logger.perf_counter()) - self.dump_flags() - logger.info(self, "nmom_max = %d", nmom_max) - - self.converged, self.gf, self.se = kernel( - self, - nmom_max, - mo_energy, - mo_coeff, - integrals=integrals, - ) - - gf_occ = self.gf.get_occupied() - gf_occ.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_occ.naux)): - en = -gf_occ.energy[-(n + 1)] - vn = gf_occ.coupling[:, -(n + 1)] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "IP energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - gf_vir = self.gf.get_virtual() - gf_vir.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_vir.naux)): - en = gf_vir.energy[n] - vn = gf_vir.coupling[:, n] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "EA energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - logger.timer(self, self.name, *cput0) - - return self.converged, self.gf, self.se diff --git a/momentGW/ints.py b/momentGW/ints.py index 36580cbc..91c16475 100644 --- a/momentGW/ints.py +++ b/momentGW/ints.py @@ -43,8 +43,8 @@ def __init__( compression_tol=1e-10, store_full=False, ): - self.verbose = with_df.mol.verbose - self.stdout = with_df.mol.stdout + self.verbose = with_df.verbose + self.stdout = with_df.stdout self.with_df = with_df self.mo_coeff = mo_coeff @@ -59,18 +59,23 @@ def __init__( self._mo_occ_w = None self._rot = None + def _parse_compression(self): + if not self.compression: + return set() + compression = self.compression.replace("vo", "ov") + compression = set(x for x in compression.split(",")) + if "ia" in compression and "ov" in compression: + raise ValueError("`compression` cannot contain both `'ia'` and `'ov'` (or `'vo'`)") + return compression + def get_compression_metric(self): """ Return the compression metric. """ - # TODO cache this if needed - compression = self.compression.replace("vo", "ov") - compression = set(x for x in compression.split(",")) + compression = self._parse_compression() if not compression: return None - if "ia" in compression and "ov" in compression: - raise ValueError("`compression` cannot contain both `'ia'` and `'ov'` (or `'vo'`)") cput0 = (logger.process_clock(), logger.perf_counter()) logger.info(self, f"Computing compression metric for {self.__class__.__name__}") @@ -169,9 +174,8 @@ def transform(self, do_Lpq=None, do_Lpx=True, do_Lia=True): # If needed, rotate the full (L|pq) array if do_Lpq: logger.debug(self, f"(L|pq) size: ({self.naux_full}, {self.nmo}, {o1 - o0})") - Lpq[b0:b1] = lib.einsum( - "Lpq,pi,qj->Lij", block, self.mo_coeff, self.mo_coeff[:, o0:o1] - ) + coeffs = (self.mo_coeff, self.mo_coeff[:, o0:o1]) + Lpq[b0:b1] = lib.einsum("Lpq,pi,qj->Lij", block, *coeffs) # Compress the block block = lib.einsum("L...,LQ->Q...", block, rot[b0:b1]) @@ -179,15 +183,16 @@ def transform(self, do_Lpq=None, do_Lpx=True, do_Lia=True): # Build the compressed (L|px) array if do_Lpx: logger.debug(self, f"(L|px) size: ({self.naux}, {self.nmo}, {p1 - p0})") - Lpx += lib.einsum("Lpq,pi,qj->Lij", block, self.mo_coeff, self.mo_coeff_g[:, p0:p1]) + coeffs = (self.mo_coeff, self.mo_coeff_g[:, p0:p1]) + Lpx += lib.einsum("Lpq,pi,qj->Lij", block, *coeffs) # Build the compressed (L|ia) array if do_Lia: logger.debug(self, f"(L|ia) size: ({self.naux}, {q1 - q0})") i0, a0 = divmod(q0, self.nvir_w) i1, a1 = divmod(q1, self.nvir_w) - coeffs = [self.mo_coeff_w[:, i0 : i1 + 1], self.mo_coeff_w[:, self.nocc_w :]] - tmp = lib.einsum("Lpq,pi,qj->Lij", block, coeffs[0], coeffs[1]) + coeffs = (self.mo_coeff_w[:, i0 : i1 + 1], self.mo_coeff_w[:, self.nocc_w :]) + tmp = lib.einsum("Lpq,pi,qj->Lij", block, *coeffs) tmp = tmp.reshape(self.naux, -1) Lia += tmp[:, a0 : a0 + (q1 - q0)] @@ -215,7 +220,7 @@ def update_coeffs(self, mo_coeff_g=None, mo_coeff_w=None, mo_occ_w=None): if mo_coeff_w is not None: self._mo_coeff_w = mo_coeff_w self._mo_occ_w = mo_occ_w - if "ia" in self.compression: + if "ia" in self._parse_compression(): self.rot = self.get_compression_metric() self.transform( diff --git a/momentGW/pbc/__init__.py b/momentGW/pbc/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/momentGW/pbc/base.py b/momentGW/pbc/base.py new file mode 100644 index 00000000..a2e46680 --- /dev/null +++ b/momentGW/pbc/base.py @@ -0,0 +1,203 @@ +""" +Base class for moment-constrained GW solvers with periodic boundary +conditions. +""" + +import functools + +import numpy as np +from pyscf import lib +from pyscf.lib import logger +from pyscf.pbc.mp.kmp2 import get_frozen_mask, get_nmo, get_nocc + +from momentGW.base import BaseGW +from momentGW.pbc.kpts import KPoints + + +class BaseKGW(BaseGW): + """{description} + + Parameters + ---------- + mf : pyscf.pbc.scf.KSCF + PySCF periodic mean-field class. + diagonal_se : bool, optional + If `True`, use a diagonal approximation in the self-energy. + Default value is `False`. + polarizability : str, optional + Type of polarizability to use, can be one of `("drpa", + "drpa-exact"). Default value is `"drpa"`. + npoints : int, optional + Number of numerical integration points. Default value is `48`. + optimise_chempot : bool, optional + If `True`, optimise the chemical potential by shifting the + position of the poles in the self-energy relative to those in + the Green's function. Default value is `False`. + fock_loop : bool, optional + If `True`, self-consistently renormalise the density matrix + according to the updated Green's function. Default value is + `False`. + fock_opts : dict, optional + Dictionary of options compatiable with `pyscf.dfragf2.DFRAGF2` + objects that are used in the Fock loop. + compression : str, optional + Blocks of the ERIs to use as a metric for compression. Can be + one or more of `("oo", "ov", "vv", "ia")` which can be passed as + a comma-separated string. `"oo"`, `"ov"` and `"vv"` refer to + compression on the initial ERIs, whereas `"ia"` refers to + compression on the ERIs entering RPA, which may change under a + self-consistent scheme. Default value is `"ia"`. + compression_tol : float, optional + Tolerance for the compression. Default value is `1e-10`. + {extra_parameters} + """ + + # --- Default KGW options + + compression = None + + # --- Extra PBC options + + fc = False + + _opts = BaseGW._opts + [ + "fc", + ] + + def __init__(self, mf, **kwargs): + self._scf = mf + self.verbose = self.mol.verbose + self.stdout = self.mol.stdout + self.max_memory = 1e10 + + for key, val in kwargs.items(): + if not hasattr(self, key): + raise AttributeError("%s has no attribute %s", self.name, key) + setattr(self, key, val) + + # Do not modify: + self.mo_energy = np.asarray(mf.mo_energy) + self.mo_coeff = np.asarray(mf.mo_coeff) + self.mo_occ = np.asarray(mf.mo_occ) + self.frozen = None + self._nocc = None + self._nmo = None + self._kpts = KPoints(self.cell, getattr(mf, "kpts", np.zeros((1, 3)))) + self.converged = None + self.se = None + self.gf = None + self._qp_energy = None + + self._keys = set(self.__dict__.keys()).union(self._opts) + + def kernel( + self, + nmom_max, + mo_energy=None, + mo_coeff=None, + moments=None, + integrals=None, + ): + if mo_coeff is None: + mo_coeff = self.mo_coeff + if mo_energy is None: + mo_energy = self.mo_energy + + cput0 = (logger.process_clock(), logger.perf_counter()) + self.dump_flags() + logger.info(self, "nmom_max = %d", nmom_max) + + self.converged, self.gf, self.se, self._qp_energy = self._kernel( + nmom_max, + mo_energy, + mo_coeff, + integrals=integrals, + ) + + gf_occ = self.gf[0].get_occupied() + gf_occ.remove_uncoupled(tol=1e-1) + for n in range(min(5, gf_occ.naux)): + en = -gf_occ.energy[-(n + 1)] + vn = gf_occ.coupling[:, -(n + 1)] + qpwt = np.linalg.norm(vn) ** 2 + logger.note(self, "IP energy level (Γ) %d E = %.16g QP weight = %0.6g", n, en, qpwt) + + gf_vir = self.gf[0].get_virtual() + gf_vir.remove_uncoupled(tol=1e-1) + for n in range(min(5, gf_vir.naux)): + en = gf_vir.energy[n] + vn = gf_vir.coupling[:, n] + qpwt = np.linalg.norm(vn) ** 2 + logger.note(self, "EA energy level (Γ) %d E = %.16g QP weight = %0.6g", n, en, qpwt) + + logger.timer(self, self.name, *cput0) + + return self.converged, self.gf, self.se, self.qp_energy + + @staticmethod + def _gf_to_occ(gf): + return tuple(BaseGW._gf_to_occ(g) for g in gf) + + @staticmethod + def _gf_to_energy(gf): + return tuple(BaseGW._gf_to_energy(g) for g in gf) + + @staticmethod + def _gf_to_coupling(gf): + return tuple(BaseGW._gf_to_coupling(g) for g in gf) + + def _gf_to_mo_energy(self, gf): + """Find the poles of a GF which best overlap with the MOs. + + Parameters + ---------- + gf : tuple of GreensFunction + Green's function object. + + Returns + ------- + mo_energy : ndarray + Updated MO energies. + """ + + mo_energy = np.zeros_like(self.mo_energy) + + for k in self.kpts.loop(1): + check = set() + for i in range(self.nmo): + arg = np.argmax(gf[k].coupling[i] * gf[k].coupling[i].conj()) + mo_energy[k][i] = gf[k].energy[arg] + check.add(arg) + + if len(check) != self.nmo: + logger.warn(self, f"Inconsistent quasiparticle weights at k-point {k}!") + + return mo_energy + + @property + def cell(self): + return self._scf.cell + + mol = cell + + get_nmo = get_nmo + get_nocc = get_nocc + get_frozen_mask = get_frozen_mask + + @property + def kpts(self): + return self._kpts + + @property + def nkpts(self): + return len(self.kpts) + + @property + def nmo(self): + nmo = self.get_nmo(per_kpoint=False) + return nmo + + @property + def nocc(self): + nocc = self.get_nocc(per_kpoint=True) + return nocc diff --git a/momentGW/pbc/df.py b/momentGW/pbc/df.py new file mode 100644 index 00000000..914e17a4 --- /dev/null +++ b/momentGW/pbc/df.py @@ -0,0 +1,1262 @@ +""" +Functionally equivalent to PySCF GDF, with all storage incore and +MPI parallelism. + +Adapted from pyscf.pbc.df.df + +Ref: +J. Chem. Phys. 147, 164119 (2017) +""" + +import collections +import copy +import ctypes + +import numpy as np +import scipy.linalg +from pyscf import __config__, ao2mo, gto, lib +from pyscf.agf2 import mpi_helper +from pyscf.ao2mo.incore import _conc_mos, iden_coeffs +from pyscf.ao2mo.outcore import balance_partition +from pyscf.df import addons +from pyscf.lib import logger +from pyscf.pbc import tools +from pyscf.pbc.df import df, fft_ao2mo, ft_ao, incore +from pyscf.pbc.df.gdf_builder import ( + auxbar, + estimate_eta_for_ke_cutoff, + estimate_eta_min, + estimate_ke_cutoff_for_eta, +) +from pyscf.pbc.df.rsdf_builder import _round_off_to_odd_mesh +from pyscf.pbc.gto.cell import _estimate_rcut +from pyscf.pbc.lib.kpts_helper import KPT_DIFF_TOL, gamma_point, get_kconserv, is_zero, unique + +COMPACT = getattr(__config__, "pbc_df_ao2mo_get_eri_compact", True) + + +def _format_kpts(kpts, n=4): + if kpts is None: + return np.zeros((n, 3)) + else: + kpts = np.asarray(kpts) + if kpts.size == 3 and n != 1: + return np.vstack([kpts] * n).reshape(4, 3) + else: + return kpts.reshape(n, 3) + + +def hash_array(array, tol=KPT_DIFF_TOL): + """ + Get a hashable representation of an array up to a given tol. + + Parameters + ---------- + array : np.ndarray + Array. + tol : float, optional + Threshold for the hashable representation. Default is equal + to `pyscf.pbc.lib.kpts_helper.KPT_DIFF_TOL`. + + Returns + ------- + array_round : tuple + Hashable representation of the array. + """ + + array_round = np.rint(np.asarray(array) / tol).astype(int) + return tuple(array_round.ravel()) + + +def make_auxcell(with_df, auxbasis=None, drop_eta=None): + """ + Build the cell corresponding to the auxiliary functions. + + Note: almost identical to pyscf.pyscf.pbc.df.df.make_modrho_basis + + Parameters + ---------- + with_df : GDF + Density fitting object. + auxbasis : str, optional + Auxiliary basis, default is `with_df.auxbasis`. + drop_eta : float, optional + Threshold in exponents to discard, default is `cell.exp_to_discard`. + + Returns + ------- + auxcell : pyscf.pbc.gto.Cell + Unit cell containg the auxiliary basis. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + + auxcell = addons.make_auxmol(with_df.cell, auxbasis) + + steep_shls = [] + ndrop = 0 + rcut = [] + _env = auxcell._env.copy() + + for ib in range(len(auxcell._bas)): + l = auxcell.bas_angular(ib) + nprim = auxcell.bas_nprim(ib) + nctr = auxcell.bas_nctr(ib) + exps = auxcell.bas_exp(ib) + ptr_coeffs = auxcell._bas[ib, gto.PTR_COEFF] + coeffs = auxcell._env[ptr_coeffs : ptr_coeffs + nprim * nctr].reshape(nctr, nprim).T + + mask = exps >= (drop_eta or -np.inf) + log.debug if np.sum(~mask) > 0 else log.debug1( + "Auxiliary function %4d (L = %2d): " "dropped %d functions.", + ib, + l, + np.sum(~mask), + ) + + if drop_eta is not None and np.sum(~mask) > 0: + for k in np.where(mask == 0)[0]: + log.debug(" > %3d : coeff = %12.6f exp = %12.6g", k, coeffs[k], exps[k]) + coeffs = coeffs[mask] + exps = exps[mask] + nprim, ndrop = len(exps), ndrop + nprim - len(exps) + + if nprim > 0: + ptr_exps = auxcell._bas[ib, gto.PTR_EXP] + auxcell._bas[ib, gto.NPRIM_OF] = nprim + _env[ptr_exps : ptr_exps + nprim] = exps + + int1 = gto.gaussian_int(l * 2 + 2, exps) + s = np.einsum("pi,p->i", coeffs, int1) + + coeffs *= np.sqrt(0.25 / np.pi) + coeffs /= s[None] + _env[ptr_coeffs : ptr_coeffs + nprim * nctr] = coeffs.T.ravel() + + steep_shls.append(ib) + + r = _estimate_rcut(exps, l, np.abs(coeffs).max(axis=1), with_df.cell.precision) + rcut.append(r.max()) + + auxcell._env = _env + auxcell._bas = np.asarray(auxcell._bas[steep_shls], order="C") + auxcell.rcut = max(rcut) + + log.info("Dropped %d primitive functions.", ndrop) + log.info("Auxiliary basis: shells = %d cGTOs = %d", auxcell.nbas, auxcell.nao_nr()) + log.info("rcut = %.6g", auxcell.rcut) + + return auxcell + + +def make_chgcell(with_df, smooth_eta, rcut=15.0): + """ + Build the cell corresponding to the smooth Gaussian functions. + + Note: almost identical to pyscf.pyscf.pbc.df.df.make_modchg_basis + + Parameters + ---------- + with_df : GDF + Density fitting object. + smooth_eta : float + Eta optimised for carrying the charge. + rcut : float, optional + Default cutoff distance for carrying the charge, default 15. + + Returns + ------- + chgcell : pyscf.pbc.gto.Cell + Unit cell containing the smooth charge carrying functions. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + + chgcell = copy.copy(with_df.auxcell) + chg_bas = [] + chg_env = [smooth_eta] + ptr_eta = with_df.auxcell._env.size + ptr = ptr_eta + 1 + l_max = with_df.auxcell._bas[:, gto.ANG_OF].max() + norms = [ + 0.5 / np.sqrt(np.pi) / gto.gaussian_int(l * 2 + 2, smooth_eta) for l in range(l_max + 1) + ] + + for ia in range(with_df.auxcell.natm): + for l in set(with_df.auxcell._bas[with_df.auxcell._bas[:, gto.ATOM_OF] == ia, gto.ANG_OF]): + chg_bas.append([ia, l, 1, 1, 0, ptr_eta, ptr, 0]) + chg_env.append(norms[l]) + ptr += 1 + + chgcell._atm = with_df.auxcell._atm + chgcell._bas = np.asarray(chg_bas, dtype=np.int32).reshape(-1, gto.BAS_SLOTS) + chgcell._env = np.hstack((with_df.auxcell._env, chg_env)) + + # _estimate_rcut is too tight for the model charge + chgcell.rcut = np.sqrt(np.log(4 * np.pi * rcut**2 / with_df.auxcell.precision) / smooth_eta) + + log.info("Compensating basis: shells = %d cGTOs = %d", chgcell.nbas, chgcell.nao_nr()) + log.info("rcut = %.6g", chgcell.rcut) + + return chgcell + + +def fuse_auxcell(with_df): + """ + Fuse the cells responsible for the auxiliary functions and smooth + Gaussian functions used to carry the charge. Returns the fused cell + along with a function which incorporates contributions from the + charge carrying functions into the auxiliary functions according to + their angular momenta and spherical harmonics. + + Note: almost identical to pyscf.pyscf.pbc.df.df.fuse_auxcell + + Parameters + ---------- + with_df : GDF + Density fitting object. + + Returns + ------- + fused_cell : pyscf.pbc.gto.Cell + Fused cell consisting of auxcell and chgcell. + fuse : callable + Function which incorporates contributions from chgcell into auxcell. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + + auxcell = with_df.auxcell + chgcell = with_df.chgcell + + fused_cell = copy.copy(auxcell) + fused_cell._atm, fused_cell._bas, fused_cell._env = gto.conc_env( + auxcell._atm, + auxcell._bas, + auxcell._env, + chgcell._atm, + chgcell._bas, + chgcell._env, + ) + fused_cell.rcut = max(auxcell.rcut, chgcell.rcut) + + log.info("Fused basis: shells = %d cGTOs = %d", fused_cell.nbas, fused_cell.nao_nr()) + log.info("rcut = %.6g", fused_cell.rcut) + + aux_loc = auxcell.ao_loc_nr() + naux = aux_loc[-1] + modchg_offset = -np.ones((chgcell.natm, 8), dtype=int) + smooth_loc = chgcell.ao_loc_nr() + + for i in range(chgcell.nbas): + ia = chgcell.bas_atom(i) + l = chgcell.bas_angular(i) + modchg_offset[ia, l] = smooth_loc[i] + + if auxcell.cart: + # See pyscf.pyscf.pbc.df.df.fuse_auxcell + + c2s_fn = gto.moleintor.libcgto.CINTc2s_ket_sph + aux_loc_sph = auxcell.ao_loc_nr(cart=False) + naux_sph = aux_loc_sph[-1] + + log.debug("Building fusion function via Cartesian primitives.") + + def fuse(Lpq): + Lpq, Lpq_chg = Lpq[:naux], Lpq[naux:] + if Lpq.ndim == 1: + npq = 1 + Lpq_sph = np.empty((naux_sph), dtype=Lpq.dtype) + else: + npq = Lpq.shape[1] + Lpq_sph = np.empty((naux_sph, npq), dtype=Lpq.dtype) + + if Lpq.dtype == np.complex128: + npq *= 2 + + for i in range(auxcell.nbas): + ia = auxcell.bas_atom(i) + l = auxcell.bas_angular(i) + p0 = modchg_offset[ia, l] + + if p0 >= 0: + nd = (l + 1) * (l + 2) // 2 + c0, c1 = aux_loc[i], aux_loc[i + 1] + s0, s1 = aux_loc_sph[i], aux_loc_sph[i + 1] + + for i0, i1 in lib.prange(c0, c1, nd): + Lpq[i0:i1] -= Lpq_chg[p0 : p0 + nd] + + if l < 2: + Lpq_sph[s0:s1] = Lpq[c0:c1] + else: + Lpq_cart = np.asarray(Lpq[c0:c1], order="C") + c2s_fn( + Lpq_sph[s0:s1].ctypes.data_as(ctypes.c_void_p), + ctypes.c_int(npq * auxcell.bas_nctr(i)), + Lpq_cart.ctypes.data_as(ctypes.c_void_p), + ctypes.c_int(l), + ) + + return Lpq_sph + + else: + log.debug("Building fusion function via spherical primitives.") + + def fuse(Lpq): + Lpq, Lpq_chg = Lpq[:naux], Lpq[naux:] + + for i in range(auxcell.nbas): + ia = auxcell.bas_atom(i) + l = auxcell.bas_angular(i) + p0 = modchg_offset[ia, l] + + if p0 >= 0: + nd = l * 2 + 1 + + for i0, i1 in lib.prange(aux_loc[i], aux_loc[i + 1], nd): + Lpq[i0:i1] -= Lpq_chg[p0 : p0 + nd] + + return Lpq + + return fused_cell, fuse + + +def find_conserving_qpts(cell, qpts, qpt, tol=KPT_DIFF_TOL): + """ + Search a list of q-points for those that conserve momentum as + Search which (qpts+qpt) satisfies momentum conservation. + + Parameters + ---------- + cell : pyscf.pbc.gto.Cell + Cell object. + qpts : np.ndarray + List of q-points. + qpt : np.ndarray + Other q-point. + tol : float, optional + Threshold for equality. Default is equal to + `pyscf.pbc.lib.kpts_helper.KPT_DIFF_TOL`. + + Returns + ------- + ids : np.ndarray + Indices of qpts that conserve momentum. + """ + + a = cell.lattice_vectors() / (2 * np.pi) + + dif = np.dot(a, (qpts + qpt).T) + dif_int = np.rint(dif) + + mask = np.sum(np.abs(dif - dif_int), axis=0) < tol + ids = np.where(mask)[0] + + return ids + + +def _get_2c2e(with_df, uniq_qpts): + """ + Get the bare two-center two-electron interaction, first term + of Eq. 32. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + cput0 = (logger.process_clock(), logger.perf_counter()) + + int2c2e = with_df.fused_cell.pbc_intor("int2c2e", hermi=1, kpts=uniq_qpts) + + log.timer("2c2e", *cput0) + + return int2c2e + + +def _get_3c2e(with_df, kpt_pairs): + """ + Get the bare three-center two-electron interaction, first term + of Eq. 31. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + cput0 = (logger.process_clock(), logger.perf_counter()) + + cell = with_df.cell + fused_cell = with_df.fused_cell + + nkij = len(kpt_pairs) + nao = cell.nao_nr() + ngrids = fused_cell.nao_nr() + aux_loc = fused_cell.ao_loc_nr(fused_cell.cart) + + int3c2e = np.zeros((nkij, ngrids, nao * nao), dtype=np.complex128) + + for p0, p1 in mpi_helper.prange(0, fused_cell.nbas, fused_cell.nbas): + cput1 = (logger.process_clock(), logger.perf_counter()) + + shls_slice = (0, cell.nbas, 0, cell.nbas, p0, p1) + q0, q1 = aux_loc[p0], aux_loc[p1] + + int3c2e_part = incore.aux_e2( + cell, fused_cell, "int3c2e", aosym="s2", kptij_lst=kpt_pairs, shls_slice=shls_slice + ) + int3c2e_part = lib.transpose(int3c2e_part, axes=(0, 2, 1)) + + if int3c2e_part.shape[-1] != nao * nao: + assert int3c2e_part.shape[-1] == nao * (nao + 1) // 2 + int3c2e_part = int3c2e_part.reshape(-1, nao * (nao + 1) // 2) + int3c2e_part = lib.unpack_tril(int3c2e_part, lib.HERMITIAN, axis=-1) + + int3c2e_part = int3c2e_part.reshape((nkij, q1 - q0, nao * nao)) + int3c2e[:, q0:q1] = int3c2e_part + + log.timer_debug1("3c2e [%d -> %d] part" % (p0, p1), *cput1) + + mpi_helper.allreduce_safe_inplace(int3c2e) + mpi_helper.barrier() + + log.timer("3c2e", *cput0) + + return int3c2e + + +def _get_j2c(with_df, int2c2e, uniq_qpts): + """ + Build j2c using the 2c2e interaction, int2c2e, Eq. 32. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + cput0 = (logger.process_clock(), logger.perf_counter()) + + naux = with_df.auxcell.nao_nr() + Gv, Gvbase, kws = with_df.cell.get_Gv_weights(with_df.mesh) + gxyz = lib.cartesian_prod([np.arange(len(x)) for x in Gvbase]) + b = with_df.cell.reciprocal_vectors() + + j2c = int2c2e + + for q, qpt in enumerate(uniq_qpts): + cput1 = (logger.process_clock(), logger.perf_counter()) + + G_chg = ft_ao.ft_ao(with_df.fused_cell, Gv, b=b, gxyz=gxyz, Gvbase=Gvbase, kpt=qpt).T + G_aux = G_chg[naux:] * with_df.weighted_coulG(qpt) + + # Eq. 32 final three terms: + j2c_comp = np.dot(G_aux.conj(), G_chg.T) + if is_zero(qpt): + j2c_comp = j2c_comp.real + j2c[q][naux:] -= j2c_comp + j2c[q][:naux, naux:] = j2c[q][naux:, :naux].T.conj() + + j2c[q] = with_df.fuse(with_df.fuse(j2c[q]).T).T + + del G_chg, G_aux + + log.timer_debug1("j2c [qpt %d] part" % q, *cput1) + + log.timer("j2c", *cput0) + + return j2c + + +def _cholesky_decomposed_metric(with_df, j2c): + """ + Get a function which applies the Cholesky decomposed j2c for a + single q-point. + + NOTE: matches Max's changes to pyscf DF rather than vanilla DF. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + cput0 = (logger.process_clock(), logger.perf_counter()) + + j2c_chol = None + + if not with_df.linear_dep_always: + try: + j2c = scipy.linalg.cholesky(j2c, lower=True) + + def j2c_chol(x): + return scipy.linalg.solve_triangular(j2c, x, lower=True, overwrite_b=True) + + except scipy.linalg.LinAlgError: + log.warning("Cholesky decomposition failed for j2c") + + if j2c_chol is None and with_df.linear_dep_method == "regularize": + try: + eps = 1e-14 * np.eye(j2c.shape[-1]) + j2c = scipy.linalg.cholesky(j2c + eps, lower=True) + + def j2c_chol(x): + return scipy.linalg.solve_triangular(j2c, x, lower=True, overwrite_b=True) + + except scipy.linalg.LinAlgError: + log.warning("Regularised Cholesky decomposition failed for j2c") + + if j2c_chol is None: + w, v = scipy.linalg.eigh(j2c) + cond = w.max() / w.min() + mask = w > with_df.linear_dep_threshold + + log.info("DF metric linear dependency detected") + log.info("Condition number = %.4g", cond) + log.info("Dropped %d auxiliary functions.", np.sum(mask)) + + j2c = v[:, mask].conj().T + j2c /= np.sqrt(w[mask])[:, None] + + def j2c_chol(x): + return np.dot(j2c, x) + + return j2c_chol + + +def _get_j3c(with_df, j2c, int3c2e, uniq_qpts, uniq_inverse_dict, kpt_pairs, out=None): + """ + Use j2c and int3c2e to construct j3c. + """ + + log = logger.Logger(with_df.stdout, with_df.verbose) + cput0 = (logger.process_clock(), logger.perf_counter()) + + cell = with_df.cell + auxcell = with_df.auxcell + + nao = cell.nao_nr() + naux = auxcell.nao_nr() + Gv, Gvbase, kws = cell.get_Gv_weights(with_df.mesh) + gxyz = lib.cartesian_prod([np.arange(len(x)) for x in Gvbase]) + ngrids = gxyz.shape[0] + b = cell.reciprocal_vectors() + j2c_chol = lambda v: v + kpts = with_df.kpts + nkpts = len(kpts) + + if out is None: + out = np.zeros((nkpts, nkpts, naux, nao, nao), dtype=np.complex128) + + for q in mpi_helper.nrange(len(uniq_qpts)): + cput1 = (logger.process_clock(), logger.perf_counter()) + log.debug("Constructing j3c [qpt %d].", q) + + qpt = uniq_qpts[q] + adapted_pair_idx = uniq_inverse_dict[q] + adapted_kpts = kpt_pairs[:, 1][adapted_pair_idx] + + log.debug("qpt = %s", qpt) + log.debug("adapted_pair_idx = %s", adapted_pair_idx) + + # Prepare cholesky decomposition of j2c + if j2c is not None: + pair_idx = find_conserving_qpts(cell, uniq_qpts, -qpt)[0] + j2c_chol = _cholesky_decomposed_metric(with_df, j2c[pair_idx]) + log.debug("pair_idx = %s", pair_idx) + + # Eq. 33 + shls_slice = (auxcell.nbas, with_df.fused_cell.nbas) + G_chg = ft_ao.ft_ao( + with_df.fused_cell, Gv, shls_slice=shls_slice, b=b, gxyz=gxyz, Gvbase=Gvbase, kpt=qpt + ) + G_chg *= with_df.weighted_coulG(qpt).ravel()[:, None] + log.debug1("Norm of FT for fused cell: %.12g", np.linalg.norm(G_chg)) + + # Eq. 26 + if is_zero(qpt): + log.debug("Including net charge of AO products") + vbar = with_df.fuse(auxbar(with_df.fused_cell)) + ovlp = cell.pbc_intor("int1e_ovlp", hermi=0, kpts=adapted_kpts) + ovlp = [np.ravel(s) for s in ovlp] + + # Eq. 24 + bstart, bend, ncol = balance_partition(cell.ao_loc_nr() * nao, nao * nao)[0] + shls_slice = (bstart, bend, 0, cell.nbas) + G_ao = ft_ao.ft_aopair_kpts( + cell, + Gv, + shls_slice=shls_slice, + aosym="s1", + b=b, + gxyz=gxyz, + Gvbase=Gvbase, + q=qpt, + kptjs=adapted_kpts, + ) + G_ao = G_ao.reshape(-1, ngrids, ncol) + log.debug1("Norm of FT for AO cell: %.12g", np.linalg.norm(G_ao)) + + for kji, ji in enumerate(adapted_pair_idx): + # Eq. 31 first term: + v = int3c2e[ji] + + # Eq. 31 second term + if is_zero(qpt): + for i in np.where(vbar != 0)[0]: + v[i] -= vbar[i] * ovlp[kji] + + # Eq. 31 third term + v[naux:] -= np.dot(G_chg.T.conj(), G_ao[kji]) + + # Fused charge-carrying part with auxiliary part + v = with_df.fuse(v) + + # Cholesky decompose Eq. 29 + v = j2c_chol(v) + v = v.reshape(-1, nao, nao) + + # Sum into all symmetry-related k-points + for ki in with_df.kpt_hash[hash_array(kpt_pairs[ji][0])]: + for kj in with_df.kpt_hash[hash_array(kpt_pairs[ji][1])]: + out[ki, kj, : v.shape[0]] += v + log.debug("Filled j3c for kpt [%d, %d]", ki, kj) + if ki != kj: + out[kj, ki, : v.shape[0]] += lib.transpose(v, axes=(0, 2, 1)).conj() + log.debug("Filled j3c for kpt [%d, %d]", kj, ki) + + log.timer_debug1("j3c [qpt %d]" % q, *cput1) + + mpi_helper.allreduce_safe_inplace(out) + mpi_helper.barrier() + + log.timer("j3c", *cput0) + + return out + + +def _make_j3c(with_df, kpt_pairs): + """ + Build the j3c array. + + cell: the unit cell for the calculation + auxcell: the unit cell for the auxiliary functions + chgcell: the unit cell for the smooth Gaussians + fused_cell: auxcell and chgcell combined + """ + + cput0 = (logger.process_clock(), logger.perf_counter()) + log = with_df.log + + if with_df.cell.dimension < 3: + raise ValueError("GDF does not support low-dimension cells") + + kptis = kpt_pairs[:, 0] + kptjs = kpt_pairs[:, 1] + qpts = kptjs - kptis + uniq_qpts, uniq_index, uniq_inverse = unique(qpts) + uniq_inverse_dict = {k: np.where(uniq_inverse == k)[0] for k in range(len(uniq_qpts))} + + log.info("Number of unique qpts (kj - ki): %d", len(uniq_qpts)) + log.debug1("uniq_qpts:\n%s", uniq_qpts) + + # Get the 2c2e interaction: + int2c2e = _get_2c2e(with_df, uniq_qpts) + + # Get the 3c2e interaction: + int3c2e = _get_3c2e(with_df, kpt_pairs) + + # Get j2c: + j2c = _get_j2c(with_df, int2c2e, uniq_qpts) + + # Get j3c: + j3c = _get_j3c(with_df, j2c, int3c2e, uniq_qpts, uniq_inverse_dict, kpt_pairs) + + log.timer("j3c", *cput0) + + return j3c + + +def _fao2mo(eri, cp, cq): + """AO2MO for 3c integrals""" + npq, cpq, spq = _conc_mos(cp, cq, compact=False)[1:] + naux = eri.shape[0] + cpq = np.asarray(cpq, dtype=np.complex128) + out = ao2mo._ao2mo.r_e2(eri, cpq, spq, [], None) + return out.reshape(naux, cp.shape[1], cq.shape[1]) + + +class GDF(df.GDF): + """Incore Gaussian density fitting. + + Parameters + ---------- + cell : pyscf.pbc.gto.Cell + Unit cell containing the system information and basis. + kpts : numpy.ndarray (nk, 3), optional + Array containing the sampled k-points, default value is the gamma + point only. + log : logger.Logger, optional + Logger to stream output to, default value is logging.getLogger(__name__). + """ + + def __init__(self, cell, kpts=np.zeros((1, 3))): + if not cell.dimension == 3: + raise ValueError("%s does not support low dimension systems" % self.__class__) + + self.verbose = cell.verbose + self.stdout = cell.stdout + self.log = logger.Logger(self.stdout, self.verbose) + self.log.info("Initializing %s", self.__class__.__name__) + self.log.info("=============%s", len(str(self.__class__.__name__)) * "=") + + self.cell = cell + self.kpts = kpts + self._auxbasis = None + + self.eta, self.mesh = self.build_mesh() + self.exp_to_discard = cell.exp_to_discard + self.rcut_smooth = 15.0 + self.linear_dep_threshold = 1e-9 + self.linear_dep_method = "regularize" + self.linear_dep_always = False + + # Cached properties: + self._get_nuc = None + self._get_pp = None + self._ovlp = None + self._madelung = None + + # The follow attributes are not input options. + self.auxcell = None + self.chgcell = None + self.fused_cell = None + self.kconserv = None + self.kpts_band = None + self.kpt_hash = None + self._j_only = False + self._cderi = None + self._rsh_df = {} + self._keys = set(self.__dict__.keys()) + + def build_mesh(self): + """ + Search for optimised eta and mesh. + """ + + cell = self.cell + + ke_cutoff = tools.mesh_to_cutoff(cell.lattice_vectors(), cell.mesh) + ke_cutoff = ke_cutoff[: cell.dimension].min() + + eta_cell = estimate_eta_for_ke_cutoff(cell, ke_cutoff, cell.precision) + eta_guess = estimate_eta_min(cell, cell.precision) + + self.log.debug("ke_cutoff = %.6g", ke_cutoff) + self.log.debug("eta_cell = %.6g", eta_cell) + self.log.debug("eta_guess = %.6g", eta_guess) + + if eta_cell < eta_guess: + eta, mesh = eta_cell, cell.mesh + else: + eta = eta_guess + ke_cutoff = estimate_ke_cutoff_for_eta(cell, eta, cell.precision) + mesh = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_cutoff) + + mesh = _round_off_to_odd_mesh(mesh) + + self.log.info("Built mesh: shape = %s eta = %.6g", mesh, eta) + + return eta, mesh + + def reset(self, cell=None): + """ + Reset the object. + """ + if cell is not None: + self.cell = cell + self.auxcell = None + self.chgcell = None + self.fused_cell = None + self.kconserv = None + self.kpt_hash = None + self._cderi = None + self._rsh_df = {} + self._get_nuc = None + self._get_pp = None + self._ovlp = None + self._madelung = None + return self + + def dump_flags(self): + """ + Print flags. + """ + self.log.info("GDF parameters:") + for key in [ + "auxbasis", + "eta", + "exp_to_discard", + "rcut_smooth", + "linear_dep_threshold", + "linear_dep_method", + "linear_dep_always", + ]: + self.log.info(" > %-24s %r", key + ":", getattr(self, key)) + return self + + def build(self, j_only=None, with_j3c=True): + """ + Build the _cderi array and associated values. + + Parameters + ---------- + with_j3c : bool, optional + If False, do not build the _cderi array and only build the + associated values, default False. + """ + + j_only = j_only or self._j_only + if j_only: + self.log.warn("j_only=True has not effect on overhead in %s", self.__class__) + if self.kpts_band is not None: + raise ValueError("%s does not support kwarg kpts_band" % self.__class__) + + self.check_sanity() + self.dump_flags() + + self.auxcell = make_auxcell(self, auxbasis=self.auxbasis, drop_eta=self.exp_to_discard) + self.chgcell = make_chgcell(self, self.eta, rcut=self.rcut_smooth) + self.fused_cell, self.fuse = fuse_auxcell(self) + self.kconserv = get_kconserv(self.cell, self.kpts) + + kpts = np.asarray(self.kpts)[unique(self.kpts)[1]] + kpt_pairs = [(ki, kpts[j]) for i, ki in enumerate(kpts) for j in range(i + 1)] + kpt_pairs = np.asarray(kpt_pairs) + + self.kpt_hash = collections.defaultdict(list) + for k, kpt in enumerate(self.kpts): + val = hash_array(kpt) + self.kpt_hash[val].append(k) + + self.kptij_hash = collections.defaultdict(list) + for k, kpt in enumerate(kpt_pairs): + val = hash_array(kpt) + self.kptij_hash[val].append(k) + + if with_j3c: + self._cderi = self._make_j3c(kpt_pairs) + + return self + + _make_j3c = _make_j3c + + def get_naoaux(self): + """Get the number of auxiliaries.""" + + if self._cderi is None: + self.build() + return self._cderi.shape[2] + + def sr_loop(self, kpti_kptj=np.zeros((2, 3)), max_memory=None, compact=True, blksize=None): + """ + Short-range part. + + Parameters + ---------- + kpti_kptj : numpy.ndarray (2, 3), optional + k-points to loop over integral contributions at, default is + the Gamma point. + compact : bool, optional + If True, return the lower-triangular part of the symmetric + integrals where appropriate, default value is True. + blksize : int, optional + Block size for looping over integrals, default is naux. + + Yields + ------ + LpqR : numpy.ndarray (blk, nao2) + Real part of the integrals. Shape depends on `compact`, + `blksize` and `self.get_naoaux`. + LpqI : numpy.ndarray (blk, nao2) + Imaginary part of the integrals, shape is the same as LpqR. + sign : int + Sign of integrals, `vayesta.misc.GDF` does not support any + instances where this is not 1, but it is included for + compatibility. + """ + + if self._cderi is None: + self.build() + kpti, kptj = kpti_kptj + ki = self.kpt_hash[hash_array(kpti)][0] + kj = self.kpt_hash[hash_array(kptj)][0] + naux = self.get_naoaux() + Lpq = self._cderi + if blksize is None: + blksize = naux + + for q0, q1 in lib.prange(0, naux, blksize): + LpqR = Lpq[ki, kj, q0:q1].real + LpqI = Lpq[ki, kj, q0:q1].imag + if compact: # and is_zero(kpti-kptj): + LpqR = lib.pack_tril(LpqR, axis=-1) + LpqI = lib.pack_tril(LpqI, axis=-1) + LpqR = np.asarray(LpqR.reshape(min(q1 - q0, naux), -1), order="C") + LpqI = np.asarray(LpqI.reshape(min(q1 - q0, naux), -1), order="C") + yield LpqR, LpqI, 1 + LpqR = LpqI = None + + def get_jk( + self, + dm, + hermi=1, + kpts=None, + kpts_band=None, + with_j=True, + with_k=True, + omega=None, + exxdiv=None, + ): + """ + Build the J (direct) and K (exchange) contributions to the Fock + matrix due to a given density matrix. + + Parameters + ---------- + dm : numpy.ndarray (nk, nao, nao) or (nset, nk, nao, nao) + Density matrices at each k-point. + with_j : bool, optional + If True, build the J matrix, default value is True. + with_k : bool, optional + If True, build the K matrix, default value is True. + exxdiv : str, optional + Type of exchange divergence treatment to use, default value + is None. + """ + + from vayesta import libs # FIXME dependency? + + if kpts_band is not None: + raise ValueError( + "%s.get_jk does not support keyword argument kpts_band", self.__class__ + ) + if omega is not None: + raise ValueError( + "%s.get_jk does not support keyword argument omega=%s", self.__class__, omega + ) + if not (kpts is None or kpts is self.kpts): + raise ValueError("%s.get_jk only supports kpts=None or kpts=GDF.kpts" % self.__class__) + + nkpts = len(self.kpts) + nao = self.cell.nao_nr() + naux = self.get_naoaux() + naux_slice = ( + mpi_helper.rank * naux // mpi_helper.size, + (mpi_helper.rank + 1) * naux // mpi_helper.size, + ) + + dms = dm.reshape(-1, nkpts, nao, nao) + ndm = dms.shape[0] + + cderi = np.asarray(self._cderi, order="C") + dms = np.asarray(dms, order="C", dtype=np.complex128) + libcore = libs.load_library("core") + vj = vk = None + + if with_j: + vj = np.zeros((ndm, nkpts, nao, nao), dtype=np.complex128) + if with_k: + vk = np.zeros((ndm, nkpts, nao, nao), dtype=np.complex128) + + for i in range(ndm): + libcore.j3c_jk( + ctypes.c_int64(nkpts), + ctypes.c_int64(nao), + ctypes.c_int64(naux), + (ctypes.c_int64 * 2)(*naux_slice), + cderi.ctypes.data_as(ctypes.c_void_p), + dms[i].ctypes.data_as(ctypes.c_void_p), + ctypes.c_bool(with_j), + ctypes.c_bool(with_k), + vj[i].ctypes.data_as(ctypes.c_void_p) + if vj is not None + else ctypes.POINTER(ctypes.c_void_p)(), + vk[i].ctypes.data_as(ctypes.c_void_p) + if vk is not None + else ctypes.POINTER(ctypes.c_void_p)(), + ) + + mpi_helper.barrier() + mpi_helper.allreduce_safe_inplace(vj) + mpi_helper.allreduce_safe_inplace(vk) + + if with_k and exxdiv == "ewald": + s = self.get_ovlp() + madelung = self.madelung + for i in range(ndm): + for k in range(nkpts): + vk[i, k] += madelung * np.linalg.multi_dot((s[k], dms[i, k], s[k])) + + vj = vj.reshape(dm.shape) + vk = vk.reshape(dm.shape) + + return vj, vk + + def get_eri(self, kpts, compact=COMPACT): + """ + Get the four-center AO electronic repulsion integrals at a + given k-point. + + Parameters + ---------- + kpts : numpy.ndarray + k-points to build the integrals at. + compact : bool, optional + If True, compress the auxiliaries according to symmetries + where appropriate. + + Returns + ------- + numpy.ndarray (nao2, nao2) + Output ERIs, shape depends on `compact`. + """ + + if self._cderi is None: + self.build() + + nao = self.cell.nao_nr() + naux = self.get_naoaux() + kptijkl = _format_kpts(kpts, n=4) + if not fft_ao2mo._iskconserv(self.cell, kptijkl): + self.log.warning( + self.cell, "Momentum conservation not found in " "the given k-points %s", kptijkl + ) + return np.zeros((nao, nao, nao, nao)) + ki, kj, kk, kl = (self.kpt_hash[hash_array(kpt)][0] for kpt in kptijkl) + + Lpq = self._cderi[ki, kj] + Lrs = self._cderi[kk, kl] + if gamma_point(kptijkl): + Lpq = Lpq.real + Lrs = Lrs.real + if compact: + Lpq = lib.pack_tril(Lpq) + Lrs = lib.pack_tril(Lrs) + Lpq = Lpq.reshape(naux, -1) + Lrs = Lrs.reshape(naux, -1) + + eri = np.dot(Lpq.T, Lrs) + + return eri + + get_ao_eri = get_eri + + def ao2mo(self, mo_coeffs, kpts, compact=COMPACT): + """ + Get the four-center MO electronic repulsion integrals at a + given k-point. + + Parameters + ---------- + mo_coeffs : numpy.ndarray + MO coefficients to rotate into. + kpts : numpy.ndarray + k-points to build the integrals at. + compact : bool, optional + If True, compress the auxiliaries according to symmetries + where appropriate. + + Returns + ------- + numpy.ndarray (nmo2, nmo2) + Output ERIs, shape depends on `compact`. + """ + + if self._cderi is None: + self.build() + + nao = self.cell.nao_nr() + naux = self.get_naoaux() + kptijkl = _format_kpts(kpts, n=4) + if not fft_ao2mo._iskconserv(self.cell, kptijkl): + self.log.warning( + self.cell, "Momentum conservation not found in " "the given k-points %s", kptijkl + ) + return np.zeros((nao, nao, nao, nao)) + ki, kj, kk, kl = (self.kpt_hash[hash_array(kpt)][0] for kpt in kptijkl) + + if isinstance(mo_coeffs, np.ndarray) and mo_coeffs.ndim == 2: + mo_coeffs = (mo_coeffs,) * 4 + all_real = not any(np.iscomplexobj(mo) for mo in mo_coeffs) + + Lij = _fao2mo(self._cderi[ki, kj], mo_coeffs[0], mo_coeffs[1]) + Lkl = _fao2mo(self._cderi[kk, kl], mo_coeffs[2], mo_coeffs[3]) + if gamma_point(kptijkl) and all_real: + Lij = Lij.real + Lkl = Lkl.real + if compact and iden_coeffs(mo_coeffs[0], mo_coeffs[1]): + Lij = lib.pack_tril(Lij) + if compact and iden_coeffs(mo_coeffs[2], mo_coeffs[3]): + Lkl = lib.pack_tril(Lkl) + Lij = Lij.reshape(naux, -1) + Lkl = Lkl.reshape(naux, -1) + + eri = np.dot(Lij.T, Lkl) + + return eri + + get_mo_eri = ao2mo + + def get_3c_eri(self, kpts, compact=COMPACT): + """ + Get the three-center AO electronic repulsion integrals at a + given k-point. + + Parameters + ---------- + kpts : numpy.ndarray + k-points to build the integrals at. + compact : bool, optional + If True, compress the auxiliaries according to symmetries + where appropriate. + + Returns + ------- + numpy.ndarray (naux, nao2) + Output ERIs, shape depends on `compact`. + """ + + if self._cderi is None: + self.build() + + naux = self.get_naoaux() + kptij = _format_kpts(kpts, n=2) + ki, kj = (self.kpt_hash[hash_array(kpt)][0] for kpt in kptij) + + Lpq = self._cderi[ki, kj] + if gamma_point(kptij): + Lpq = Lpq.real + if compact: + Lpq = lib.pack_tril(Lpq) + Lpq = Lpq.reshape(naux, -1) + + return Lpq + + get_ao_3c_eri = get_3c_eri + + def ao2mo_3c(self, mo_coeffs, kpts, compact=COMPACT): + """ + Get the three-center MO electronic repulsion integrals at a + given k-point. + + Parameters + ---------- + mo_coeffs : numpy.ndarray + MO coefficients to rotate into. + kpts : numpy.ndarray + k-points to build the integrals at. + compact : bool, optional + If True, compress the auxiliaries according to symmetries + where appropriate. + + Returns + ------- + numpy.ndarray (naux, nmo2) + Output ERIs, shape depends on `compact`. + """ + + if self._cderi is None: + self.build() + + naux = self.get_naoaux() + kptij = _format_kpts(kpts, n=2) + ki, kj = (self.kpt_hash[hash_array(kpt)][0] for kpt in kptij) + + if isinstance(mo_coeffs, np.ndarray) and mo_coeffs.ndim == 2: + mo_coeffs = (mo_coeffs,) * 2 + all_real = not any(np.iscomplexobj(mo) for mo in mo_coeffs) + + Lij = _fao2mo(self._cderi[ki, kj], *mo_coeffs) + if gamma_point(kptij) and all_real: + Lij = Lij.real + if compact and iden_coeffs(*mo_coeffs): + Lij = lib.pack_tril(Lij) + Lij = Lij.reshape(naux, -1) + + return Lij + + get_mo_3c_eri = ao2mo_3c + + def save(self, file): + """ + Dump the integrals to disk. + + Parameters + ---------- + file : str + Output file to dump integrals to. + """ + + if self._cderi is None: + raise ValueError("_cderi must have been built in order to save to disk.") + + with open(file, "wb") as f: + np.save(f, self._cderi) + + return self + + def load(self, file): + """ + Load the integrals from disk. Must have called self.build() first. + + Parameters + ---------- + file : str + Output file to load integrals from. + """ + + if self.auxcell is None: + raise ValueError( + "Must call GDF.build() before loading integrals from disk - use " + "keyword with_j3c=False to avoid re-building the integrals." + ) + + with open(file, "rb") as f: + _cderi = np.load(f) + + self._cderi = _cderi + + return self + + @property + def max_memory(self): + return self.cell.max_memory + + @property + def blockdim(self): + return self.get_naoaux() + + @property + def exxdiv(self): + # To mimic KSCF in get_coulG + return None + + # Cached properties: + + def get_nuc(self, kpts=None): + if not (kpts is None or kpts is self.kpts or np.allclose(kpts, self.kpts)): + return super().get_nuc(kpts) + if self._get_nuc is None: + self._get_nuc = super().get_nuc(kpts) + return self._get_nuc + + def get_pp(self, kpts=None): + if not (kpts is None or kpts is self.kpts or np.allclose(kpts, self.kpts)): + return super().get_pp(kpts) + if self._get_pp is None: + self._get_pp = super().get_pp(kpts) + return self._get_pp + + def get_ovlp(self): + if self._ovlp is None: + self._ovlp = self.cell.pbc_intor("int1e_ovlp", hermi=1, kpts=self.kpts) + return self._ovlp + + @property + def madelung(self): + if self._madelung is None: + self._madelung = tools.pbc.madelung(self.cell, self.kpts) + return self._madelung + + +DF = GDF + +del COMPACT diff --git a/momentGW/pbc/evgw.py b/momentGW/pbc/evgw.py new file mode 100644 index 00000000..f7714553 --- /dev/null +++ b/momentGW/pbc/evgw.py @@ -0,0 +1,56 @@ +""" +Spin-restricted eigenvalue self-consistent GW via self-energy moment +constraints for periodic systems. +""" + +import unittest + +import numpy as np +import pytest +from pyscf.agf2 import mpi_helper +from pyscf.lib import logger +from pyscf.pbc import dft, gto +from pyscf.pbc.tools import k2gamma + +from momentGW import util +from momentGW.evgw import evGW +from momentGW.pbc.gw import KGW + + +class evKGW(KGW, evGW): + __doc__ = evGW.__doc__.replace("molecules", "periodic systems", 1) + + _opts = util.list_union(KGW._opts, evGW._opts) + + @property + def name(self): + return "evKG%sW%s" % ("0" if self.g0 else "", "0" if self.w0 else "") + + def check_convergence(self, mo_energy, mo_energy_prev, th, th_prev, tp, tp_prev): + """Check for convergence, and print a summary of changes.""" + + if th_prev is None: + th_prev = np.zeros_like(th) + if tp_prev is None: + tp_prev = np.zeros_like(tp) + + error_homo = max( + abs(mo[n - 1] - mo_prev[n - 1]) + for mo, mo_prev, n in zip(mo_energy, mo_energy_prev, self.nocc) + ) + error_lumo = max( + abs(mo[n] - mo_prev[n]) for mo, mo_prev, n in zip(mo_energy, mo_energy_prev, self.nocc) + ) + + error_th = max(abs(self._moment_error(t, t_prev)) for t, t_prev in zip(th, th_prev)) + error_tp = max(abs(self._moment_error(t, t_prev)) for t, t_prev in zip(tp, tp_prev)) + + logger.info(self, "Change in QPs: HOMO = %.6g LUMO = %.6g", error_homo, error_lumo) + logger.info(self, "Change in moments: occ = %.6g vir = %.6g", error_th, error_tp) + + return self.conv_logical( + ( + max(error_homo, error_lumo) < self.conv_tol, + max(error_th, error_tp) < self.conv_tol_moms, + ) + ) diff --git a/momentGW/pbc/fock.py b/momentGW/pbc/fock.py new file mode 100644 index 00000000..ebd90fbc --- /dev/null +++ b/momentGW/pbc/fock.py @@ -0,0 +1,262 @@ +""" +Fock matrix and static self-energy parts with periodic boundary +conditions. +""" + +import numpy as np +import scipy.optimize +from pyscf import lib +from pyscf.agf2 import mpi_helper +from pyscf.lib import logger + +from momentGW import util + + +class ChemicalPotentialError(ValueError): + pass + + +def _gradient(x, se, fock, nelec, occupancy=2, buf=None): + """Gradient of the number of electrons w.r.t shift in auxiliary + energies. + """ + # TODO buf + + ws, vs = zip(*[s.eig(f, chempot=x) for s, f in zip(se, fock)]) + chempot, error = search_chempot(ws, vs, se[0].nphys, nelec) + + nmo = se[0].nphys + + ddm = 0.0 + for i in mpi_helper.nrange(len(ws)): + w, v = ws[i], vs[i] + nmo = se[i].nphys + nocc = np.sum(w < chempot) + h1 = -np.dot(v[nmo:, nocc:].T.conj(), v[nmo:, :nocc]) + zai = -h1 / lib.direct_sum("i-a->ai", w[:nocc], w[nocc:]) + ddm += lib.einsum("ai,pa,pi->", zai, v[:nmo, nocc:], v[:nmo, :nocc].conj()).real * 4 + + ddm = mpi_helper.allreduce(ddm) + grad = occupancy * error * ddm + + return error**2, grad + + +def search_chempot_constrained(w, v, nphys, nelec, occupancy=2): + """ + Search for a chemical potential, constraining the k-point + dependent occupancy to ensure no crossover of states. If this + is not possible, a ValueError will be raised. + """ + + nmo = max(len(x) for x in w) + nkpts = len(w) + sum0 = sum1 = 0.0 + + for i in range(nmo): + n = 0 + for k in range(nkpts): + n += np.dot(v[k][:nphys, i].conj().T, v[k][:nphys, i]).real + n *= occupancy + sum0, sum1 = sum1, sum1 + n + + if i > 0: + if sum0 <= nelec and nelec <= sum1: + break + + if abs(sum0 - nelec) < abs(sum1 - nelec): + homo = i - 1 + error = nelec - sum0 + else: + homo = i + error = nelec - sum1 + + lumo = homo + 1 + + if lumo == nmo: + chempot = np.max(w) + 1e-6 + else: + e_homo = np.max([x[homo] for x in w]) + e_lumo = np.min([x[lumo] for x in w]) + + if e_homo > e_lumo: + raise ChemicalPotentialError( + "Could not find a chemical potential under " + "the constrain of equal k-point occupancy." + ) + + chempot = 0.5 * (e_homo + e_lumo) + + return chempot, error + + +def search_chempot_unconstrained(w, v, nphys, nelec, occupancy=2): + """ + Search for a chemical potential, without constraining the + k-point dependent occupancy. + """ + + kidx = np.concatenate([[i] * x.size for i, x in enumerate(w)]) + w = np.concatenate(w) + v = np.hstack([vk[:nphys] for vk in v]) + + mask = np.argsort(w) + kidx = kidx[mask] + w = w[mask] + v = v[:, mask] + + nmo = v.shape[-1] + sum0 = sum1 = 0.0 + + for i in range(nmo): + k = kidx[i] + n = occupancy * np.dot(v[:nphys, i].conj().T, v[:nphys, i]).real + sum0, sum1 = sum1, sum1 + n + + if i > 0: + if sum0 <= nelec and nelec <= sum1: + break + + if abs(sum0 - nelec) < abs(sum1 - nelec): + homo = i - 1 + error = nelec - sum0 + else: + homo = i + error = nelec - sum1 + + lumo = homo + 1 + + if lumo == len(w): + chempot = w[homo] + 1e-6 + else: + chempot = 0.5 * (w[homo] + w[lumo]) + + return chempot, error + + +def search_chempot(w, v, nphys, nelec, occupancy=2): + """ + Search for a chemical potential, first trying with k-point + restraints and if that doesn't succeed then without. + """ + + try: + chempot, error = search_chempot_constrained(w, v, nphys, nelec, occupancy=occupancy) + except ChemicalPotentialError: + chempot, error = search_chempot_unconstrained(w, v, nphys, nelec, occupancy=occupancy) + + return chempot, error + + +def minimize_chempot(se, fock, nelec, occupancy=2, x0=0.0, tol=1e-6, maxiter=200): + """ + Optimise the shift in auxiliary energies to satisfy the electron + number, ensuring that the same shift is applied at all k-points. + """ + + tol = tol**2 # we minimize the squared error + dtype = np.result_type(*[s.coupling.dtype for s in se], *[f.dtype for f in fock]) + nkpts = len(se) + nphys = max([s.nphys for s in se]) + naux = max([s.naux for s in se]) + buf = np.zeros(((nphys + naux) ** 2,), dtype=dtype) + fargs = (se, fock, nelec, occupancy, buf) + + options = dict(maxiter=maxiter, ftol=tol, xtol=tol, gtol=tol) + kwargs = dict(x0=x0, method="TNC", jac=True, options=options) + fun = _gradient + + opt = scipy.optimize.minimize(fun, args=fargs, **kwargs) + + for s in se: + s.energy -= opt.x + + ws, vs = zip(*[s.eig(f) for s, f in zip(se, fock)]) + chempot = search_chempot(ws, vs, se[0].nphys, nelec, occupancy=occupancy)[0] + + for s in se: + s.chempot = chempot + + return se, opt + + +def fock_loop( + gw, + gf, + se, + integrals=None, + fock_diis_space=10, + fock_diis_min_space=1, + conv_tol_nelec=1e-6, + conv_tol_rdm1=1e-8, + max_cycle_inner=100, + max_cycle_outer=20, +): + """Self-consistent loop for the density matrix via the HF self- + consistent field. + """ + + if integrals is None: + integrals = gw.ao2mo() + + h1e = lib.einsum("kpq,kpi,kqj->kij", gw._scf.get_hcore(), np.conj(gw.mo_coeff), gw.mo_coeff) + nmo = gw.nmo + nocc = gw.nocc + naux = [s.naux for s in se] + nqmo = [nmo + n for n in naux] + nelec = [n * 2 for n in nocc] + kpts = gw.kpts + + diis = util.DIIS() + diis.space = fock_diis_space + diis.min_space = fock_diis_min_space + gf_to_dm = lambda gf: np.array([g.get_occupied().moment(0) for g in gf]) * 2.0 + rdm1 = gf_to_dm(gf) + fock = integrals.get_fock(rdm1, h1e) + + buf = np.zeros((max(nqmo), max(nqmo)), dtype=complex) + converged = False + opts = dict(tol=conv_tol_nelec, maxiter=max_cycle_inner) + rdm1_prev = 0 + + for niter1 in range(1, max_cycle_outer + 1): + se, opt = minimize_chempot(se, fock, sum(nelec), x0=se[0].chempot, **opts) + + for niter2 in range(1, max_cycle_inner + 1): + w, v = zip(*[s.eig(f, chempot=0.0, out=buf) for s, f in zip(se, fock)]) + chempot, nerr = search_chempot(w, v, nmo, sum(nelec)) + + for k in kpts.loop(1): + se[k].chempot = chempot + w, v = se[k].eig(fock[k], out=buf) + gf[k] = gf[k].__class__(w, v[:nmo], chempot=se[k].chempot) + + rdm1 = gf_to_dm(gf) + fock = integrals.get_fock(rdm1, h1e) + fock = diis.update(fock, xerr=None) + + if niter2 > 1: + derr = np.max(np.absolute(rdm1 - rdm1_prev)) + if derr < conv_tol_rdm1: + break + + rdm1_prev = rdm1.copy() + + logger.debug1( + gw, "fock loop %d cycles = %d dN = %.3g |ddm| = %.3g", niter1, niter2, nerr, derr + ) + + if derr < conv_tol_rdm1 and abs(nerr) < conv_tol_nelec: + converged = True + break + + logger.info( + gw, + "fock converged = %s chempot (Γ) = %.9g dN = %.3g |ddm| = %.3g", + converged, + se[0].chempot, + nerr, + derr, + ) + + return gf, se, converged diff --git a/momentGW/pbc/gw.py b/momentGW/pbc/gw.py new file mode 100644 index 00000000..e2c15fe3 --- /dev/null +++ b/momentGW/pbc/gw.py @@ -0,0 +1,220 @@ +""" +Spin-restricted one-shot GW via self-energy moment constraints for +periodic systems. +""" + +import numpy as np +from dyson import MBLSE, MixedMBLSE, NullLogger +from pyscf import lib +from pyscf.agf2 import GreensFunction, SelfEnergy +from pyscf.lib import logger +from pyscf.pbc import scf + +from momentGW import util +from momentGW.gw import GW +from momentGW.pbc.base import BaseKGW +from momentGW.pbc.fock import fock_loop, minimize_chempot, search_chempot +from momentGW.pbc.ints import KIntegrals +from momentGW.pbc.tda import TDA + + +class KGW(BaseKGW, GW): + __doc__ = BaseKGW.__doc__.format( + description="Spin-restricted one-shot GW via self-energy moment constraints for " + + "periodic systems.", + extra_parameters="", + ) + + _opts = util.list_union(BaseKGW._opts, GW._opts) + + @property + def name(self): + return "KG0W0" + + def ao2mo(self, transform=True): + """Get the integrals.""" + + integrals = KIntegrals( + self.with_df, + self.kpts, + self.mo_coeff, + self.mo_occ, + compression=self.compression, + compression_tol=self.compression_tol, + store_full=self.has_fock_loop, + ) + if transform: + integrals.transform() + + return integrals + + def build_se_moments(self, nmom_max, integrals, **kwargs): + """Build the moments of the self-energy. + + Parameters + ---------- + nmom_max : int + Maximum moment number to calculate. + integrals : KIntegrals + Density-fitted integrals. + + See functions in `momentGW.rpa` for `kwargs` options. + + Returns + ------- + se_moments_hole : numpy.ndarray + Moments of the hole self-energy at each k-point. If + `self.diagonal_se`, non-diagonal elements are set to zero. + se_moments_part : numpy.ndarray + Moments of the particle self-energy at each k-point. If + `self.diagonal_se`, non-diagonal elements are set to zero. + """ + + if self.polarizability == "dtda": + tda = TDA(self, nmom_max, integrals, **kwargs) + return tda.kernel() + else: + raise NotImplementedError + + def solve_dyson(self, se_moments_hole, se_moments_part, se_static, integrals=None): + """Solve the Dyson equation due to a self-energy resulting + from a list of hole and particle moments, along with a static + contribution. + + Also finds a chemical potential best satisfying the physical + number of electrons. If `self.optimise_chempot`, this will + shift the self-energy poles relative to the Green's function, + which is a partial self-consistency that better conserves the + particle number. + + If `self.fock_loop`, this function will also require that the + outputted Green's function is self-consistent with respect to + the corresponding density and Fock matrix. + + Parameters + ---------- + se_moments_hole : numpy.ndarray + Moments of the hole self-energy at each k-point. + se_moments_part : numpy.ndarray + Moments of the particle self-energy at each k-point. + se_static : numpy.ndarray + Static part of the self-energy at each k-point. + integrals : KIntegrals, optional + Density-fitted integrals. Required if `self.fock_loop` + is `True`. Default value is `None`. + + Returns + ------- + gf : list of pyscf.agf2.GreensFunction + Green's function at each k-point. + se : list of pyscf.agf2.SelfEnergy + Self-energy at each k-point. + """ + + nlog = NullLogger() + + se = [] + gf = [] + for k in self.kpts.loop(1): + solver_occ = MBLSE(se_static[k], np.array(se_moments_hole[k]), log=nlog) + solver_occ.kernel() + + solver_vir = MBLSE(se_static[k], np.array(se_moments_part[k]), log=nlog) + solver_vir.kernel() + + solver = MixedMBLSE(solver_occ, solver_vir) + e_aux, v_aux = solver.get_auxiliaries() + se.append(SelfEnergy(e_aux, v_aux)) + + logger.debug( + self, + "Error in moments [kpt %d]: occ = %.6g vir = %.6g", + k, + *self.moment_error(se_moments_hole[k], se_moments_part[k], se[k]), + ) + + gf.append(se[k].get_greens_function(se_static[k])) + + if self.optimise_chempot: + se, opt = minimize_chempot(se, se_static, sum(self.nocc) * 2) + + if self.fock_loop: + gf, se, conv = fock_loop(self, gf, se, integrals=integrals, **self.fock_opts) + + w = [g.energy for g in gf] + v = [g.coupling for g in gf] + cpt, error = search_chempot(w, v, self.nmo, sum(self.nocc) * 2) + + for k in self.kpts.loop(1): + se[k].chempot = cpt + gf[k].chempot = cpt + logger.info(self, "Error in number of electrons [kpt %d]: %.5g", k, error) + + return gf, se + + def make_rdm1(self, gf=None): + """Get the first-order reduced density matrix at each k-point.""" + + if gf is None: + gf = self.gf + if gf is None: + gf = [GreensFunction(self.mo_energy, np.eye(self.nmo))] + + return np.array([g.make_rdm1() for g in gf]) + + def interpolate(self, mf, nmom_max): + """ + Interpolate the object to a new k-point grid, represented by a + new mean-field object. + + Parameters + ---------- + mf : pyscf.pbc.scf.KSCF + Mean-field object on new k-point mesh. + nmom_max : int + Maximum moment number to calculate. + + Returns + ------- + other : __class__ + Interpolated object. + """ + + if len(mf.kpts) % len(self.kpts) != 0: + raise ValueError("Size of interpolation grid must be a multiple of the old grid.") + + other = self.__class__(mf) + other.__dict__.update({key: getattr(self, key) for key in self._opts}) + sc = lib.einsum("kpq,kqi->kpi", mf.get_ovlp(), mf.mo_coeff) + + def interp(m): + # Interpolate part of the moments via the AO basis + m = lib.einsum("knij,kpi,kqj->knpq", m, self.mo_coeff, self.mo_coeff.conj()) + m = np.stack( + [self.kpts.interpolate(other.kpts, m[:, n]) for n in range(nmom_max + 1)], + axis=1, + ) + m = lib.einsum("knpq,kpi,kqj->knij", m, sc.conj(), sc) + return m + + # Get the moments of the self-energy on the small k-point grid + th = np.array([se.get_occupied().moment(range(nmom_max + 1)) for se in self.se]) + tp = np.array([se.get_virtual().moment(range(nmom_max + 1)) for se in self.se]) + + # Interpolate the moments + th = interp(th) + tp = interp(tp) + + # Get the static self-energy on the fine k-point grid + integrals = other.ao2mo(transform=False) + se_static = other.build_se_static(integrals) + + # Solve the Dyson equation on the fine k-point grid + gf, se = other.solve_dyson(th, tp, se_static, integrals=integrals) + + # Set attributes + # TODO handle _qp_energy if not None + other.gf = gf + other.se = se + + return other diff --git a/momentGW/pbc/ints.py b/momentGW/pbc/ints.py new file mode 100644 index 00000000..3e5845a6 --- /dev/null +++ b/momentGW/pbc/ints.py @@ -0,0 +1,486 @@ +""" +Integral helpers with periodic boundary conditions. +""" + +from collections import defaultdict + +import numpy as np +from pyscf import lib +from pyscf.agf2 import mpi_helper +from pyscf.lib import logger +from pyscf.pbc import tools + +from momentGW.ints import Integrals + + +class KIntegrals(Integrals): + """ + Container for the integrals required for KGW methods. + """ + + def __init__( + self, + with_df, + kpts, + mo_coeff, + mo_occ, + compression="ia", + compression_tol=1e-10, + store_full=False, + ): + Integrals.__init__( + self, + with_df, + mo_coeff, + mo_occ, + compression=compression, + compression_tol=compression_tol, + store_full=store_full, + ) + + self.kpts = kpts + + self._madelung = None + + def get_compression_metric(self): + """ + Return the compression metric. + """ + + # TODO MPI + + compression = self._parse_compression() + if not compression: + return None + + cput0 = (logger.process_clock(), logger.perf_counter()) + logger.info(self, f"Computing compression metric for {self.__class__.__name__}") + + prod = np.zeros((len(self.kpts), self.naux_full, self.naux_full), dtype=complex) + + # Loop over required blocks + for key in sorted(compression): + logger.debug(self, f"Transforming {key} block") + ci, cj = [ + { + "o": [c[:, o > 0] for c, o in zip(self.mo_coeff, self.mo_occ)], + "v": [c[:, o == 0] for c, o in zip(self.mo_coeff, self.mo_occ)], + "i": [c[:, o > 0] for c, o in zip(self.mo_coeff_w, self.mo_occ_w)], + "a": [c[:, o == 0] for c, o in zip(self.mo_coeff_w, self.mo_occ_w)], + }[k] + for k in key + ] + ni = [c.shape[-1] for c in ci] + nj = [c.shape[-1] for c in cj] + + for q, ki in self.kpts.loop(2): + kj = self.kpts.member(self.kpts.wrap_around(self.kpts[ki] - self.kpts[q])) + + Lxy = np.zeros((self.naux_full, ni[ki] * nj[kj]), dtype=complex) + b1 = 0 + for block in self.with_df.sr_loop((ki, kj), compact=False): + if block[2] == -1: + raise NotImplementedError("Low dimensional integrals") + block = block[0] + block[1] * 1.0j + block = block.reshape(self.naux_full, self.nmo, self.nmo) + b0, b1 = b1, b1 + block.shape[0] + logger.debug(self, f" Block [{ki}, {kj}, {b0}:{b1}]") + + tmp = lib.einsum("Lpq,pi,qj->Lij", block, ci[ki].conj(), cj[kj]) + tmp = tmp.reshape(b1 - b0, -1) + Lxy[b0:b1] = tmp + + prod[q] += np.dot(Lxy, Lxy.T.conj()) / len(self.kpts) + + rot = np.empty((len(self.kpts),), dtype=object) + if mpi_helper.rank == 0: + for q in self.kpts.loop(1): + e, v = np.linalg.eigh(prod[q]) + mask = np.abs(e) > self.compression_tol + rot[q] = v[:, mask] + else: + for q in self.kpts.loop(1): + rot[q] = np.zeros((0,), dtype=complex) + del prod + + for q in self.kpts.loop(1): + rot[q] = mpi_helper.bcast(rot[q], root=0) + + if rot[q].shape[-1] == self.naux_full: + logger.info(self, f"No compression found at q-point {q}") + rot[q] = None + else: + logger.info( + self, + f"Compressed auxiliary space from {self.naux_full} to {rot[q].shape[-1]} and q-point {q}", + ) + logger.timer(self, "compression metric", *cput0) + + return rot + + def transform(self, do_Lpq=None, do_Lpx=True, do_Lia=True): + """ + Initialise the integrals, building: + - Lpq: the full (aux, MO, MO) array if `store_full` + - Lpx: the compressed (aux, MO, MO) array + - Lia: the compressed (aux, occ, vir) array + """ + + # Get the compression metric + if self._rot is None: + self._rot = self.get_compression_metric() + rot = self._rot + if rot is None: + eye = np.eye(self.naux_full) + rot = defaultdict(lambda: eye) + for q in self.kpts.loop(1): + if rot[q] is None: + rot[q] = np.eye(self.naux_full) + + do_Lpq = self.store_full if do_Lpq is None else do_Lpq + if not any([do_Lpq, do_Lpx, do_Lia]): + return + + cput0 = (logger.process_clock(), logger.perf_counter()) + logger.info(self, f"Transforming {self.__class__.__name__}") + + Lpq = {} + Lpx = {} + Lia = {} + Lai = {} + + for q in self.kpts.loop(1): + for ki in self.kpts.loop(1, mpi=True): + kj = self.kpts.member(self.kpts.wrap_around(self.kpts[q] + self.kpts[ki])) + + # Get the slices on the current process and initialise the arrays + Lpq_k = ( + np.zeros((self.naux_full, self.nmo, self.nmo), dtype=complex) + if do_Lpq + else None + ) + Lpx_k = ( + np.zeros((self.naux[q], self.nmo, self.nmo_g[kj]), dtype=complex) + if do_Lpx + else None + ) + Lia_k = ( + np.zeros((self.naux[q], self.nocc_w[ki] * self.nvir_w[kj]), dtype=complex) + if do_Lia + else None + ) + Lai_k = ( + np.zeros((self.naux[q], self.nocc_w[kj] * self.nvir_w[ki]), dtype=complex) + if do_Lia + else None + ) + + # Build the integrals blockwise + b1 = 0 + for block in self.with_df.sr_loop((ki, kj), compact=False): # TODO lock I/O + if block[2] == -1: + raise NotImplementedError("Low dimensional integrals") + block = block[0] + block[1] * 1.0j + block = block.reshape(self.naux_full, self.nmo, self.nmo) + b0, b1 = b1, b1 + block.shape[0] + logger.debug(self, f" Block [{ki}, {kj}, {b0}:{b1}]") + + # If needed, rotate the full (L|pq) array + if do_Lpq: + logger.debug( + self, f"(L|pq) size: ({self.naux_full}, {self.nmo}, {self.nmo})" + ) + coeffs = (self.mo_coeff[ki], self.mo_coeff[kj]) + Lpq_k[b0:b1] = lib.einsum( + "Lpq,pi,qj->Lij", block, coeffs[0].conj(), coeffs[1] + ) + + # Compress the block + block_comp = lib.einsum("L...,LQ->Q...", block, rot[q][b0:b1].conj()) + + # Build the compressed (L|px) array + if do_Lpx: + logger.debug( + self, f"(L|px) size: ({self.naux[q]}, {self.nmo}, {self.nmo_g[ki]})" + ) + coeffs = (self.mo_coeff[ki], self.mo_coeff_g[kj]) + Lpx_k += lib.einsum( + "Lpq,pi,qj->Lij", block_comp, coeffs[0].conj(), coeffs[1] + ) + + # Build the compressed (L|ia) array + if do_Lia: + logger.debug( + self, + f"(L|ia) size: ({self.naux[q]}, {self.nocc_w[ki] * self.nvir_w[kj]})", + ) + coeffs = ( + self.mo_coeff_w[ki][:, : self.nocc_w[ki]], + self.mo_coeff_w[kj][:, self.nocc_w[kj] :], + ) + tmp = lib.einsum("Lpq,pi,qj->Lij", block_comp, coeffs[0].conj(), coeffs[1]) + tmp = tmp.reshape(self.naux[q], -1) + Lia_k += tmp + + if do_Lpq: + Lpq[ki, kj] = Lpq_k + if do_Lpx: + Lpx[ki, kj] = Lpx_k + if do_Lia: + Lia[ki, kj] = Lia_k + else: + continue + + # Inverse q for ki <-> kj + q = self.kpts.member(self.kpts.wrap_around(-self.kpts[q])) + + # Build the integrals blockwise + b1 = 0 + for block in self.with_df.sr_loop((kj, ki), compact=False): # TODO lock I/O + if block[2] == -1: + raise NotImplementedError("Low dimensional integrals") + block = block[0] + block[1] * 1.0j + block = block.reshape(self.naux_full, self.nmo, self.nmo) + b0, b1 = b1, b1 + block.shape[0] + logger.debug(self, f" Block [{ki}, {kj}, {b0}:{b1}]") + + # Compress the block + block_comp = lib.einsum("L...,LQ->Q...", block, rot[q][b0:b1].conj()) + + # Build the compressed (L|ai) array + logger.debug( + self, f"(L|ai) size: ({self.naux[q]}, {self.nvir_w[kj] * self.nocc_w[ki]})" + ) + coeffs = ( + self.mo_coeff_w[kj][:, self.nocc_w[kj] :], + self.mo_coeff_w[ki][:, : self.nocc_w[ki]], + ) + tmp = lib.einsum("Lpq,pi,qj->Lij", block_comp, coeffs[0].conj(), coeffs[1]) + tmp = tmp.swapaxes(1, 2) + tmp = tmp.reshape(self.naux[q], -1) + Lai_k += tmp + + Lai[ki, kj] = Lai_k + + if do_Lpq: + self._blocks["Lpq"] = Lpq + if do_Lpx: + self._blocks["Lpx"] = Lpx + if do_Lia: + self._blocks["Lia"] = Lia + self._blocks["Lai"] = Lai + + logger.timer(self, "transform", *cput0) + + def get_j(self, dm, basis="mo"): + """Build the J matrix.""" + + assert basis in ("ao", "mo") + + vj = np.zeros_like(dm, dtype=complex) + + if self.store_full and basis == "mo": + buf = 0.0 + for kk in self.kpts.loop(1, mpi=True): + buf += lib.einsum("Lpq,pq->L", self.Lpq[kk, kk], dm[kk].conj()) + + buf = mpi_helper.allreduce(buf) + + for ki in self.kpts.loop(1, mpi=True): + vj[ki] += lib.einsum("Lpq,L->pq", self.Lpq[ki, ki], buf) + + vj = mpi_helper.allreduce(vj) + + else: + if basis == "mo": + dm = lib.einsum("kij,kpi,kqj->kpq", dm, self.mo_coeff, np.conj(self.mo_coeff)) + + buf = np.zeros((self.naux_full,), dtype=complex) + + for kk in self.kpts.loop(1, mpi=True): + b1 = 0 + for block in self.with_df.sr_loop((kk, kk), compact=False): # TODO lock I/O + if block[2] == -1: + raise NotImplementedError("Low dimensional integrals") + block = block[0] + block[1] * 1.0j + block = block.reshape(self.naux_full, self.nmo, self.nmo) + b0, b1 = b1, b1 + block.shape[0] + buf[b0:b1] += lib.einsum("Lpq,pq->L", block, dm[kk].conj()) + + buf = mpi_helper.allreduce(buf) + + for ki in self.kpts.loop(1, mpi=True): + b1 = 0 + for block in self.with_df.sr_loop((ki, ki), compact=False): + if block[2] == -1: + raise NotImplementedError("Low dimensional integrals") + block = block[0] + block[1] * 1.0j + block = block.reshape(self.naux_full, self.nmo, self.nmo) + b0, b1 = b1, b1 + block.shape[0] + vj[ki] += lib.einsum("Lpq,L->pq", block, buf[b0:b1]) + + vj = mpi_helper.allreduce(vj) + + if basis == "mo": + vj = lib.einsum("kpq,kpi,kqj->kij", vj, np.conj(self.mo_coeff), self.mo_coeff) + + vj /= len(self.kpts) + + return vj + + def get_k(self, dm, basis="mo", ewald=False): + """Build the K matrix.""" + + assert basis in ("ao", "mo") + + vk = np.zeros_like(dm, dtype=complex) + + if self.store_full and basis == "mo": + # TODO is there a better way to distribute this? + for p0, p1 in lib.prange(0, self.naux_full, 240): + buf = np.zeros( + (len(self.kpts), len(self.kpts), p1 - p0, self.nmo, self.nmo), dtype=complex + ) + for ki in self.kpts.loop(1, mpi=True): + for kk in self.kpts.loop(1): + buf[kk, ki] = lib.einsum("Lpq,qr->Lrp", self.Lpq[ki, kk][p0:p1], dm[kk]) + + buf = mpi_helper.allreduce(buf) + + for ki in self.kpts.loop(1): + for kk in self.kpts.loop(1, mpi=True): + vk[ki] += lib.einsum("Lrp,Lrs->ps", buf[kk, ki], self.Lpq[kk, ki][p0:p1]) + + vk = mpi_helper.allreduce(vk) + + else: + if basis == "mo": + dm = lib.einsum("kij,kpi,kqj->kpq", dm, self.mo_coeff, np.conj(self.mo_coeff)) + + for kk in self.kpts.loop(1): + buf = np.zeros((len(self.kpts), self.naux_full, self.nmo, self.nmo), dtype=complex) + for ki in self.kpts.loop(1, mpi=True): + b1 = 0 + for block in self.with_df.sr_loop((ki, kk), compact=False): + if block[2] == -1: + raise NotImplementedError("Low dimensional integrals") + block = block[0] + block[1] * 1.0j + block = block.reshape(self.naux_full, self.nmo, self.nmo) + b0, b1 = b1, b1 + block.shape[0] + buf[ki, b0:b1] = lib.einsum("Lpq,qr->Lrp", block, dm[kk]) + + buf = mpi_helper.allreduce(buf) + + for ki in self.kpts.loop(1, mpi=True): + b1 = 0 + for block in self.with_df.sr_loop((kk, ki), compact=False): + if block[2] == -1: + raise NotImplementedError("Low dimensional integrals") + block = block[0] + block[1] * 1.0j + block = block.reshape(self.naux_full, self.nmo, self.nmo) + b0, b1 = b1, b1 + block.shape[0] + vk[ki] += lib.einsum("Lrp,Lrs->ps", buf[ki, b0:b1], block) + + vk = mpi_helper.allreduce(vk) + + if basis == "mo": + vk = lib.einsum("kpq,kpi,kqj->kij", vk, np.conj(self.mo_coeff), self.mo_coeff) + + vk /= len(self.kpts) + + if ewald: + vk += self.get_ewald(dm, basis=basis) + + return vk + + def get_ewald(self, dm, basis="mo"): + """Build the Ewald exchange divergence matrix.""" + + assert basis in ("ao", "mo") + + if basis == "mo": + ovlp = defaultdict(lambda: np.eye(self.nmo)) + else: + ovlp = self.with_df.cell.pbc_intor("int1e_ovlp", hermi=1, kpts=self.kpts._kpts) + + ew = lib.einsum("kpq,kpi,kqj->kij", dm, ovlp.conj(), ovlp) + + return ew + + @property + def madelung(self): + """ + Return the Madelung constant for the lattice. + """ + if self._madelung is None: + self._madeling = tools.pbc.madelung(self.with_df.cell, self.kpts._kpts) + return self._madelung + + @property + def Lai(self): + """ + Return the compressed (aux, W vir, W occ) array. + """ + return self._blocks["Lai"] + + @property + def nmo(self): + """ + Return the number of MOs. + """ + assert len({c.shape[-1] for c in self.mo_coeff}) == 1 + return self.mo_coeff[0].shape[-1] + + @property + def nocc(self): + """ + Return the number of occupied MOs. + """ + return [np.sum(o > 0) for o in self.mo_occ] + + @property + def nvir(self): + """ + Return the number of virtual MOs. + """ + return [np.sum(o == 0) for o in self.mo_occ] + + @property + def nmo_g(self): + """ + Return the number of MOs for the Green's function. + """ + return [c.shape[-1] for c in self.mo_coeff_g] + + @property + def nmo_w(self): + """ + Return the number of MOs for the screened Coulomb interaction. + """ + return [c.shape[-1] for c in self.mo_coeff_w] + + @property + def nocc_w(self): + """ + Return the number of occupied MOs for the screened Coulomb + interaction. + """ + return [np.sum(o > 0) for o in self.mo_occ_w] + + @property + def nvir_w(self): + """ + Return the number of virtual MOs for the screened Coulomb + interaction. + """ + return [np.sum(o == 0) for o in self.mo_occ_w] + + @property + def naux(self): + """ + Return the number of auxiliary basis functions, after the + compression. + """ + if self._rot is None: + return [self.naux_full] * len(self.kpts) + return [c.shape[-1] if c is not None else self.naux_full for c in self._rot] diff --git a/momentGW/pbc/kpts.py b/momentGW/pbc/kpts.py new file mode 100644 index 00000000..586919b9 --- /dev/null +++ b/momentGW/pbc/kpts.py @@ -0,0 +1,259 @@ +""" +k-points helper utilities. +""" + +import itertools + +import numpy as np +import scipy.linalg +from dyson import Lehmann +from pyscf import lib +from pyscf.agf2 import GreensFunction, SelfEnergy, mpi_helper +from pyscf.pbc.lib import kpts_helper + +# TODO make sure this is rigorous + + +def allow_single_kpt(output_is_kpts=False): + """ + Decorator to allow `kpts` arguments to be passed as a single + k-point. + """ + + def decorator(func): + def wrapper(self, kpts, *args, **kwargs): + shape = kpts.shape + kpts = kpts.reshape(-1, 3) + res = func(self, kpts, *args, **kwargs) + if output_is_kpts: + return res.reshape(shape) + else: + return res + + return wrapper + + return decorator + + +class KPoints: + def __init__(self, cell, kpts, tol=1e-8, wrap_around=True): + self.cell = cell + self.tol = tol + + if wrap_around: + kpts = self.wrap_around(kpts) + self._kpts = kpts + + self._kconserv = kpts_helper.get_kconserv(cell, kpts) + self._kpts_hash = {self.hash_kpts(kpt): k for k, kpt in enumerate(self._kpts)} + + @allow_single_kpt(output_is_kpts=True) + def get_scaled_kpts(self, kpts): + """ + Convert absolute k-points to scaled k-points for the current + cell. + """ + return self.cell.get_scaled_kpts(kpts) + + @allow_single_kpt(output_is_kpts=True) + def get_abs_kpts(self, kpts): + """ + Convert scaled k-points to absolute k-points for the current + cell. + """ + return self.cell.get_abs_kpts(kpts) + + @allow_single_kpt(output_is_kpts=True) + def wrap_around(self, kpts, window=(-0.5, 0.5)): + """ + Handle the wrapping of k-points into the first Brillouin zone. + """ + + kpts = self.get_scaled_kpts(kpts) % 1.0 + kpts = lib.cleanse(kpts, axis=0, tol=self.tol) + kpts = kpts.round(decimals=self.tol_decimals) % 1.0 + + kpts[kpts < window[0]] += 1.0 + kpts[kpts >= window[1]] -= 1.0 + + kpts = self.get_abs_kpts(kpts) + + return kpts + + @allow_single_kpt(output_is_kpts=False) + def hash_kpts(self, kpts): + """ + Convert k-points to a unique, hashable representation. + """ + return tuple(np.rint(kpts / (self.tol)).ravel().astype(int)) + + @property + def tol_decimals(self): + """Convert the tolerance into a number of decimal places.""" + return int(-np.log10(self.tol + 1e-16)) + 2 + + def conserve(self, ki, kj, kk): + """ + Get the index of the k-point that conserves momentum. + """ + return self._kconserv[ki, kj, kk] + + def loop(self, depth, mpi=False): + """ + Iterate over all combinations of k-points up to a given depth. + """ + + if depth == 1: + seq = range(len(self)) + else: + seq = itertools.product(range(len(self)), repeat=depth) + + if mpi: + size = len(self) * depth + split = lambda x: x * size // mpi_helper.size + + p0 = split(mpi_helper.rank) + p1 = size if mpi_helper.rank == (mpi_helper.size - 1) else split(mpi_helper.rank + 1) + + seq = itertools.islice(seq, p0, p1) + + yield from seq + + def loop_size(self, depth=1): + """ + Return the size of `loop`. Without MPI, this is equivalent to + `len(self)**depth`. + """ + + size = len(self) * depth + split = lambda x: x * size // mpi_helper.size + + p0 = split(mpi_helper.rank) + p1 = size if mpi_helper.rank == (mpi_helper.size - 1) else split(mpi_helper.rank + 1) + + return p1 - p0 + + @allow_single_kpt(output_is_kpts=False) + def is_zero(self, kpts): + """ + Check if the k-point is zero. + """ + return np.max(np.abs(kpts)) < self.tol + + @property + def kmesh(self): + """Guess the k-mesh.""" + kpts = self.get_scaled_kpts(self._kpts).round(self.tol_decimals) + kmesh = [len(np.unique(kpts[:, i])) for i in range(3)] + return kmesh + + def translation_vectors(self): + """ + Translation vectors to construct supercell of which the gamma + point is identical to the k-point mesh of the primitive cell. + """ + + kmesh = self.kmesh + + r_rel = [np.arange(kmesh[i]) for i in range(3)] + r_vec_rel = lib.cartesian_prod(r_rel) + r_vec_abs = np.dot(r_vec_rel, self.cell.lattice_vectors()) + + return r_vec_abs + + def interpolate(self, other, fk): + """ + Interpolate a function `f` from the current grid of k-points to + those of `other`. Input must be in a localised basis, i.e. AOs. + + Parameters + ---------- + other : KPoints + The k-points to interpolate to. + fk : numpy.ndarray + The function to interpolate, expressed on the current + k-point grid. Must be a matrix-valued array expressed in + k-space, *in a localised basis*. + """ + + if len(other) % len(self): + raise ValueError( + "Size of destination k-point mesh must be divisible by the size of the source " + "k-point mesh for interpolation." + ) + nimg = len(other) // len(self) + nao = fk.shape[-1] + + r_vec_abs = self.translation_vectors() + kR = np.exp(1.0j * np.dot(self._kpts, r_vec_abs.T)) / np.sqrt(len(r_vec_abs)) + + r_vec_abs = other.translation_vectors() + kL = np.exp(1.0j * np.dot(other._kpts, r_vec_abs.T)) / np.sqrt(len(r_vec_abs)) + + # k -> bvk + fg = lib.einsum("kR,kij,kS->RiSj", kR, fk, kR.conj()) + if np.max(np.abs(fg.imag)) > 1e-6: + raise ValueError("Interpolated function has non-zero imaginary part.") + fg = fg.real + fg = fg.reshape(len(self) * nao, len(self) * nao) + + # tile in bvk + fg = scipy.linalg.block_diag(*[fg for i in range(nimg)]) + + # bvk -> k + fg = fg.reshape(len(other), nao, len(other), nao) + fl = lib.einsum("kR,RiSj,kS->kij", kL.conj(), fg, kL) + + return fl + + def member(self, kpt): + """ + Find the index of the k-point in the k-point list. + """ + if kpt not in self: + raise ValueError(f"{kpt} is not in list") + return self._kpts_hash[self.hash_kpts(kpt)] + + index = member + + def __contains__(self, kpt): + """ + Check if the k-point is in the k-point list. + """ + return self.hash_kpts(kpt) in self._kpts_hash + + def __getitem__(self, index): + """ + Get the k-point at the given index. + """ + return self._kpts[index] + + def __len__(self): + """ + Get the number of k-points. + """ + return len(self._kpts) + + def __iter__(self): + """ + Iterate over the k-points. + """ + return iter(self._kpts) + + def __repr__(self): + """ + Get a string representation of the k-points. + """ + return repr(self._kpts) + + def __str__(self): + """ + Get a string representation of the k-points. + """ + return str(self._kpts) + + def __array__(self): + """ + Get the k-points as a numpy array. + """ + return np.asarray(self._kpts) diff --git a/momentGW/pbc/qsgw.py b/momentGW/pbc/qsgw.py new file mode 100644 index 00000000..76a357c3 --- /dev/null +++ b/momentGW/pbc/qsgw.py @@ -0,0 +1,114 @@ +""" +Spin-restricted quasiparticle self-consistent GW via self-energy moment +constraints for periodic systems. +""" + +import numpy as np +from pyscf import lib +from pyscf.agf2 import GreensFunction, mpi_helper +from pyscf.agf2.dfragf2 import get_jk +from pyscf.ao2mo import _ao2mo +from pyscf.lib import logger + +from momentGW import util +from momentGW.pbc.evgw import evKGW +from momentGW.pbc.gw import KGW +from momentGW.qsgw import qsGW + + +class qsKGW(KGW, qsGW): + __doc__ = qsGW.__doc__.replace("molecules", "periodic systems", 1) + + # --- Default qsKGW options + + solver = KGW + + _opts = util.list_union(KGW._opts, qsGW._opts) + + @property + def name(self): + return "qsKGW" + + @staticmethod + def project_basis(matrix, ovlp, mo1, mo2): + """ + Project a matrix from one basis to another. + + Parameters + ---------- + matrix : numpy.ndarray or tuple of (GreensFunction or SelfEnergy) + Matrix to project at each k-point. Can also be a tuple of + `GreensFunction` or `SelfEnergy` objects, in which case the + `couplings` attributes are projected. + ovlp : numpy.ndarray + Overlap matrix in the shared (AO) basis at each k-point. + mo1 : numpy.ndarray + First basis, rotates from the shared (AO) basis into the + basis of `matrix` at each k-point. + mo2 : numpy.ndarray + Second basis, rotates from the shared (AO) basis into the + desired basis of the output at each k-point. + + Returns + ------- + projected_matrix : numpy.ndarray or tuple of (GreensFunction or SelfEnergy) + Matrix projected into the desired basis at each k-point. + """ + + proj = lib.einsum("k...pq,k...pi,k...qj->k...ij", ovlp, np.conj(mo1), mo2) + + if isinstance(matrix, np.ndarray): + projected_matrix = lib.einsum( + "k...pq,k...pi,k...qj->k...ij", matrix, np.conj(proj), proj + ) + else: + projected_matrix = [] + for k, m in enumerate(matrix): + coupling = lib.einsum("pk,pi->ik", m.coupling, np.conj(proj[k])) + projected_m = m.copy() + projected_m.coupling = coupling + projected_matrix.append(projected_m) + + return projected_matrix + + @staticmethod + def self_energy_to_moments(se, nmom_max): + """ + Return the hole and particle moments for a self-energy. + + Parameters + ---------- + se : tuple of SelfEnergy + Self-energy to compute the moments of at each k-point. + + Returns + ------- + th : numpy.ndarray + Hole moments at each k-point. + tp : numpy.ndarray + Particle moments at each k-point. + """ + th = np.array([s.get_occupied().moment(range(nmom_max + 1)) for s in se]) + tp = np.array([s.get_virtual().moment(range(nmom_max + 1)) for s in se]) + return th, tp + + def build_static_potential(self, mo_energy, se): + """ + Build the static potential approximation to the self-energy. + + Parameters + ---------- + mo_energy : numpy.ndarray + Molecular orbital energies at each k-point. + se : tuple of SelfEnergy + Self-energy to approximate at each k-point. + + Returns + ------- + se_qp : numpy.ndarray + Static potential approximation to the self-energy at each + k-point. + """ + return np.array([qsGW.build_static_potential(self, mo, s) for mo, s in zip(mo_energy, se)]) + + check_convergence = evKGW.check_convergence diff --git a/momentGW/pbc/rpa.py b/momentGW/pbc/rpa.py new file mode 100644 index 00000000..ab78bca5 --- /dev/null +++ b/momentGW/pbc/rpa.py @@ -0,0 +1,128 @@ +""" +Construct RPA moments with periodic boundary conditions. +""" + +import numpy as np +from pyscf import lib +from pyscf.pbc.lib.kpts_helper import get_kconserv + +from momentGW.pbc import df + + +def build_se_moments_drpa_exact( + gw, + nmom_max, + Lpq, + Lia, + mo_energy=None, +): + """ + Compute the self-energy moments using exact dRPA. Scales as + the sixth power of the number of orbitals. + + Parameters + ---------- + gw : BaseKGW + GW object. + nmom_max : int + Maximum moment number to calculate. + Lpq : numpy.ndarray + Density-fitted ERI tensor. `p` is in the basis of MOs, `q` is + in the basis of the Green's function. + Lia : numpy.ndarray + Density-fitted ERI tensor for the occupied-virtual slice. `i` + and `a` are in the basis of the screened Coulomb interaction. + mo_energy : numpy.ndarray, optional + Molecular orbital energies. Default value is that of + `gw._scf.mo_energy`. + + Returns + ------- + se_moments_hole : numpy.ndarray + Moments of the hole self-energy at each k-point. If + `self.diagonal_se`, non-diagonal elements are set to zero. + se_moments_part : numpy.ndarray + Moments of the particle self-energy at each k-point. If + `self.diagonal_se`, non-diagonal elements are set to zero. + """ + + if mo_energy is None: + mo_energy = gw._scf.mo_energy + + nkpts = gw.nkpts + nmo = gw.nmo + nocc = np.array(gw.nocc) + nov = nocc * (nmo - nocc) + + hole_moms = np.zeros((nkpts, nmom_max + 1, nmo, nmo), dtype=np.complex128) + part_moms = np.zeros((nkpts, nmom_max + 1, nmo, nmo), dtype=np.complex128) + + scaled_kpts = gw.mol.get_scaled_kpts(gw.kpts) + scaled_kpts -= scaled_kpts[0] + kpt_dict = {df.hash_array(kpt): k for k, kpt in enumerate(scaled_kpts)} + + def wrap_around(kpt): + # Handle wrap around for a single scaled k-point + kpt[kpt >= (1.0 - df.KPT_DIFF_TOL)] -= 1.0 + return kpt + + # For now + assert len(set(nocc)) == 1 + + blocks = {} + for q in range(nkpts): + for ki in range(nkpts): + for kj in range(nkpts): + transfer = scaled_kpts[q] + scaled_kpts[ki] - scaled_kpts[kj] + conserved = np.linalg.norm(np.round(transfer) - transfer) < 1e-12 + if conserved: + ki_p_q = kpt_dict[df.hash_array(wrap_around(scaled_kpts[ki] + scaled_kpts[q]))] + kj_p_q = kpt_dict[df.hash_array(wrap_around(scaled_kpts[kj] + scaled_kpts[q]))] + + ei = mo_energy[ki][: nocc[ki]] + ea = mo_energy[ki_p_q][nocc[ki_p_q] :] + + Via = Lpq[ki, ki_p_q, :, : nocc[ki], nocc[ki_p_q] :] + Vjb = Lpq[kj, kj_p_q, :, : nocc[kj], nocc[kj_p_q] :] + Vbj = Lpq[kj_p_q, kj, :, nocc[kj_p_q] :, : nocc[kj]] + iajb = lib.einsum("Lia,Ljb->iajb", Via, Vjb) + iabj = lib.einsum("Lia,Lbj->iajb", Via, Vbj) + + blocks["a", q, ki, kj] = np.diag( + (ea[:, None] - ei[None]).ravel().astype(iajb.dtype) + ) + blocks["a", q, ki, kj] += iabj.reshape(blocks["a", q, ki, kj].shape) + blocks["b", q, ki, kj] = iajb.reshape(blocks["a", q, ki, kj].shape) + + z = np.zeros((nov[0], nov[0]), dtype=np.complex128) + for q in range(nkpts): + a = np.block( + [[blocks.get(("a", q, ki, kj), z) for kj in range(nkpts)] for ki in range(nkpts)] + ) + b = np.block( + [[blocks.get(("b", q, ki, kj), z) for kj in range(nkpts)] for ki in range(nkpts)] + ) + mat = np.block([[a, b], [-b.conj(), -a.conj()]]) + omega, xy = np.linalg.eigh(mat) + x, y = xy[:, : np.sum(nov)], xy[:, np.sum(nov) :] + + +if __name__ == "__main__": + from pyscf.pbc import gto, scf + + from momentGW.pbc.gw import KGW + + cell = gto.Cell() + cell.atom = "He 1 1 1; He 3 2 3" + cell.a = np.eye(3) * 3 + cell.basis = "6-31g" + cell.verbose = 0 + cell.build() + + mf = scf.KRHF(cell) + mf.kpts = cell.make_kpts([3, 2, 1]) + mf.with_df = df.GDF(cell, mf.kpts) + mf.kernel() + + gw = KGW(mf) + build_se_moments_drpa_exact(gw, 2, *gw.ao2mo(mf.mo_coeff)) diff --git a/momentGW/pbc/scgw.py b/momentGW/pbc/scgw.py new file mode 100644 index 00000000..0b93917c --- /dev/null +++ b/momentGW/pbc/scgw.py @@ -0,0 +1,52 @@ +""" +Spin-restricted self-consistent GW via self-energy moment constraitns +for periodic systems. +""" + +import numpy as np +from pyscf import lib +from pyscf.agf2 import GreensFunction, mpi_helper +from pyscf.ao2mo import _ao2mo +from pyscf.lib import logger + +from momentGW import util +from momentGW.pbc.evgw import evKGW +from momentGW.pbc.gw import KGW +from momentGW.scgw import scGW + + +class scKGW(KGW, scGW): + __doc__ = scGW.__doc__.replace("molecules", "periodic systems", 1) + + _opts = util.list_union(KGW._opts, scGW._opts) + + @property + def name(self): + return "scKG%sW%s" % ("0" if self.g0 else "", "0" if self.w0 else "") + + def init_gf(self, mo_energy=None): + """Initialise the mean-field Green's function. + + Parameters + ---------- + mo_energy : numpy.ndarray, optional + Molecular orbital energies at each k-point. Default value is + `self.mo_energy`. + + Returns + ------- + gf : tuple of GreensFunction + Mean-field Green's function at each k-point. + """ + + if mo_energy is None: + mo_energy = self.mo_energy + + gf = [] + for k in self.kpts.loop(1): + chempot = 0.5 * (mo_energy[k][self.nocc[k] - 1] + mo_energy[k][self.nocc[k]]) + gf.append(GreensFunction(mo_energy[k], np.eye(self.nmo), chempot=chempot)) + + return gf + + check_convergence = evKGW.check_convergence diff --git a/momentGW/pbc/tda.py b/momentGW/pbc/tda.py new file mode 100644 index 00000000..a044827d --- /dev/null +++ b/momentGW/pbc/tda.py @@ -0,0 +1,301 @@ +""" +Construct TDA moments with periodic boundary conditions. +""" + +import numpy as np +import scipy.special +from pyscf import lib +from pyscf.agf2 import mpi_helper +from pyscf.pbc.gw.krgw_ac import get_qij + +from momentGW.tda import TDA as MolTDA + + +class TDA(MolTDA): + """ + Compute the self-energy moments using dTDA and numerical integration + + with periodic boundary conditions. + Parameters + ---------- + gw : BaseKGW + GW object. + nmom_max : int + Maximum moment number to calculate. + Lpx : numpy.ndarray + Density-fitted ERI tensor, where the first two indices + enumerate the k-points, the third index is the auxiliary + basis function index, and the fourth and fifth indices are + the MO and Green's function orbital indices, respectively. + integrals : KIntegrals + Density-fitted integrals. + mo_energy : numpy.ndarray or tuple of numpy.ndarray, optional + Molecular orbital energies at each k-point. If a tuple is passed, + the first element corresponds to the Green's function basis and + the second to the screened Coulomb interaction. Default value is + that of `gw.mo_energy`. + mo_occ : numpy.ndarray or tuple of numpy.ndarray, optional + Molecular orbital occupancies at each k-point. If a tuple is + passed, the first element corresponds to the Green's function basis + and the second to the screened Coulomb interaction. Default value + is that of `gw.mo_occ`. + """ + + def __init__( + self, + gw, + nmom_max, + integrals, + mo_energy=None, + mo_occ=None, + ): + self.gw = gw + self.nmom_max = nmom_max + self.integrals = integrals + + # Get the MO energies for G and W + if mo_energy is None: + self.mo_energy_g = self.mo_energy_w = gw.mo_energy + elif isinstance(mo_energy, tuple): + self.mo_energy_g, self.mo_energy_w = mo_energy + else: + self.mo_energy_g = self.mo_energy_w = mo_energy + + # Get the MO occupancies for G and W + if mo_occ is None: + self.mo_occ_g = self.mo_occ_w = gw.mo_occ + elif isinstance(mo_occ, tuple): + self.mo_occ_g, self.mo_occ_w = mo_occ + else: + self.mo_occ_g = self.mo_occ_w = mo_occ + + # Options and thresholds + self.report_quadrature_error = True + if self.gw.compression and "ia" in self.gw.compression.split(","): + self.compression_tol = gw.compression_tol + else: + self.compression_tol = None + + def build_dd_moments(self): + """Build the moments of the density-density response.""" + + cput0 = (lib.logger.process_clock(), lib.logger.perf_counter()) + lib.logger.info(self.gw, "Building density-density moments") + lib.logger.debug(self.gw, "Memory usage: %.2f GB", self._memory_usage()) + + kpts = self.kpts + moments = np.zeros((self.nkpts, self.nkpts, self.nmom_max + 1), dtype=object) + + # Get the zeroth order moment + for q in kpts.loop(1): + for kj in kpts.loop(1, mpi=True): + kb = kpts.member(kpts.wrap_around(kpts[q] + kpts[kj])) + moments[q, kb, 0] += self.integrals.Lia[kj, kb] / self.nkpts + cput1 = lib.logger.timer(self.gw, "zeroth moment", *cput0) + + # Get the higher order moments + for i in range(1, self.nmom_max + 1): + for q in kpts.loop(1): + for kj in kpts.loop(1, mpi=True): + kb = kpts.member(kpts.wrap_around(kpts[q] + kpts[kj])) + + d = lib.direct_sum( + "a-i->ia", + self.mo_energy_w[kb][self.mo_occ_w[kb] == 0], + self.mo_energy_w[kj][self.mo_occ_w[kj] > 0], + ) + moments[q, kb, i] += moments[q, kb, i - 1] * d.ravel()[None] + + tmp = np.zeros((self.naux[q], self.naux[q]), dtype=complex) + for ki in kpts.loop(1, mpi=True): + ka = kpts.member(kpts.wrap_around(kpts[q] + kpts[ki])) + + tmp += np.dot(moments[q, ka, i - 1], self.integrals.Lia[ki, ka].T.conj()) + + tmp = mpi_helper.allreduce(tmp) + tmp *= 2.0 + tmp /= self.nkpts + + for kj in kpts.loop(1, mpi=True): + kb = kpts.member(kpts.wrap_around(kpts[q] + kpts[kj])) + + moments[q, kb, i] += np.dot(tmp, self.integrals.Lai[kj, kb].conj()) + + cput1 = lib.logger.timer(self.gw, "moment %d" % i, *cput1) + + return moments + + def convolve(self, eta): + """Handle the convolution of the moments of G and W.""" + + kpts = self.kpts + + # Setup dependent on diagonal SE + if self.gw.diagonal_se: + pqchar = pchar = qchar = "p" + fproc = lambda x: np.diag(x) + else: + pqchar, pchar, qchar = "pq", "p", "q" + fproc = lambda x: x + + moments_occ = np.zeros((self.nkpts, self.nmom_max + 1, self.nmo, self.nmo), dtype=complex) + moments_vir = np.zeros((self.nkpts, self.nmom_max + 1, self.nmo, self.nmo), dtype=complex) + moms = np.arange(self.nmom_max + 1) + for n in moms: + fp = scipy.special.binom(n, moms) + fh = fp * (-1) ** moms + for q in kpts.loop(1): + for kp in kpts.loop(1, mpi=True): + kx = kpts.member(kpts.wrap_around(kpts[kp] - kpts[q])) + subscript = f"t,kt,kt{pqchar}->{pqchar}" + + eo = np.power.outer(self.mo_energy_g[kx][self.mo_occ_g[kx] > 0], n - moms) + to = lib.einsum(subscript, fh, eo, eta[kp, q][self.mo_occ_g[kx] > 0]) + moments_occ[kp, n] += fproc(to) + + ev = np.power.outer(self.mo_energy_g[kx][self.mo_occ_g[kx] == 0], n - moms) + tv = lib.einsum(subscript, fp, ev, eta[kp, q][self.mo_occ_g[kx] == 0]) + moments_vir[kp, n] += fproc(tv) + + # Numerical integration can lead to small non-hermiticity + for n in range(self.nmom_max + 1): + for k in kpts.loop(1, mpi=True): + moments_occ[k, n] = 0.5 * (moments_occ[k, n] + moments_occ[k, n].T.conj()) + moments_vir[k, n] = 0.5 * (moments_vir[k, n] + moments_vir[k, n].T.conj()) + + moments_occ = mpi_helper.allreduce(moments_occ) + moments_vir = mpi_helper.allreduce(moments_vir) + + return moments_occ, moments_vir + + def build_se_moments(self, moments_dd): + """Build the moments of the self-energy via convolution.""" + + cput0 = (lib.logger.process_clock(), lib.logger.perf_counter()) + lib.logger.info(self.gw, "Building self-energy moments") + lib.logger.debug(self.gw, "Memory usage: %.2f GB", self._memory_usage()) + + kpts = self.kpts + + # Setup dependent on diagonal SE + if self.gw.diagonal_se: + pqchar = pchar = qchar = "p" + eta_shape = lambda k: (self.mo_energy_g[k].size, self.nmom_max + 1, self.nmo) + else: + pqchar, pchar, qchar = "pq", "p", "q" + eta_shape = lambda k: (self.mo_energy_g[k].size, self.nmom_max + 1, self.nmo, self.nmo) + eta = np.zeros((self.nkpts, self.nkpts), dtype=object) + + # Get the moments in (aux|aux) and rotate to (mo|mo) + for n in range(self.nmom_max + 1): + for q in kpts.loop(1): + eta_aux = 0 + for kj in kpts.loop(1, mpi=True): + kb = kpts.member(kpts.wrap_around(kpts[q] + kpts[kj])) + eta_aux += np.dot(moments_dd[q, kb, n], self.integrals.Lia[kj, kb].T.conj()) + + eta_aux = mpi_helper.allreduce(eta_aux) + eta_aux *= 2.0 + eta_aux /= self.nkpts + + for kp in kpts.loop(1, mpi=True): + kx = kpts.member(kpts.wrap_around(kpts[kp] - kpts[q])) + + if not isinstance(eta[kp, q], np.ndarray): + eta[kp, q] = np.zeros(eta_shape(kx), dtype=eta_aux.dtype) + + for x in range(self.mo_energy_g[kx].size): + Lp = self.integrals.Lpx[kp, kx][:, :, x] + subscript = f"P{pchar},Q{qchar},PQ->{pqchar}" + eta[kp, q][x, n] += lib.einsum(subscript, Lp, Lp.conj(), eta_aux) + + cput1 = lib.logger.timer(self.gw, "rotating DD moments", *cput0) + + # Construct the self-energy moments + moments_occ, moments_vir = self.convolve(eta) + cput1 = lib.logger.timer(self.gw, "constructing SE moments", *cput1) + + return moments_occ, moments_vir + + def build_dd_moments_exact(self): + raise NotImplementedError + + def build_dd_moments_fc(self): + """ + Build the moments of the "head" (G=0, G'=0) and "wing" + (G=P, G'=0) density-density response. + """ + + kpts = self.kpts + integrals = self.integrals + + # q-point for q->0 finite size correction + qpt = np.array([1e-3, 0.0, 0.0]) + qpt = self.kpts.get_abs_kpts(qpt) + + # 1/sqrt(Ω) * ⟨Ψ_{ik}|e^{iqr}|Ψ_{ak-q}⟩ + qij = get_qij(self, qpt, self.mo_coeff) + + # Build the DD moments for the "head" (G=0, G'=0) correction + moments_head = np.zeros((self.nkpts, self.nmom_max + 1), dtype=complex) + for k in kpts.loop(1): + d = lib.direct_sum( + "a-i->ia", + self.mo_energy_w[k][self.mo_occ_w[k] == 0], + self.mo_energy_w[k][self.mo_occ_w[k] > 0], + ) + dn = np.ones_like(d) + for n in range(self.nmom_max + 1): + moments_head[k, n] = lib.einsum("ia,ia,ia->", qij[k], qij[k].conj(), dn) + dn *= d + + # Build the DD moments for the "wing" (G=P, G'=0) correction + moments_tail = np.zeros((self.nkpts, self.nmom_max + 1), dtype=object) + for k in kpts.loop(1): + d = lib.direct_sum( + "a-i->ia", + self.mo_energy_w[k][self.mo_occ_w[k] == 0], + self.mo_energy_w[k][self.mo_occ_w[k] > 0], + ) + dn = np.ones_like(d) + for n in range(self.nmom_max + 1): + moments_wing[k, n] = lib.einsum("Lia,ia,ia->L", integrals.Lia, qij[k].conj(), dn) + dn *= d + + moments_head *= -4.0 * np.pi + moments_wing *= -4.0 * np.pi + + return moments_head, moments_wing + + def build_se_moments_fc(self, moments_head, moments_wing): + """ + Build the moments of the self-energy corresponding to the + "wing" (G=P, G'=0) and "head" (G=0, G'=0) density-density + response via convolution. + """ + + kpts = self.kpts + integrals = self.integrals + + @property + def naux(self): + """Number of auxiliaries.""" + return self.integrals.naux + + @property + def nov(self): + """Number of ov states in W.""" + return np.multiply.outer( + [np.sum(occ > 0) for occ in self.mo_occ_w], + [np.sum(occ == 0) for occ in self.mo_occ_w], + ) + + @property + def kpts(self): + """k-points.""" + return self.gw.kpts + + @property + def nkpts(self): + """Number of k-points.""" + return self.gw.nkpts diff --git a/momentGW/qsgw.py b/momentGW/qsgw.py index 113d2fbc..1867dbce 100644 --- a/momentGW/qsgw.py +++ b/momentGW/qsgw.py @@ -12,6 +12,7 @@ from momentGW import util from momentGW.base import BaseGW +from momentGW.evgw import evGW from momentGW.gw import GW from momentGW.ints import Integrals @@ -53,6 +54,8 @@ def kernel( Green's function object se : pyscf.agf2.SelfEnergy Self-energy object + qp_energy : numpy.ndarray + Quasiparticle energies. """ logger.warn(gw, "qsGW is untested!") @@ -63,61 +66,49 @@ def kernel( if integrals is None: integrals = gw.ao2mo() - nmo = gw.nmo - nocc = gw.nocc - naux = gw.with_df.get_naoaux() mo_energy = mo_energy.copy() mo_energy_ref = mo_energy.copy() mo_coeff = mo_coeff.copy() mo_coeff_ref = mo_coeff.copy() - dm = np.eye(gw.nmo) * 2 - dm[nocc:, nocc:] = 0 - h1e = np.linalg.multi_dot((mo_coeff.T, gw._scf.get_hcore(), mo_coeff)) + # Get the overlap + ovlp = gw._scf.get_ovlp() + sc = lib.einsum("...pq,...qi->...pi", ovlp, mo_coeff) - diis = util.DIIS() - diis.space = gw.diis_space + # Get the density matrix + dm = gw._scf.make_rdm1(mo_coeff, gw.mo_occ) + dm = lib.einsum("...pq,...pi,...qj->...ij", dm, np.conj(sc), sc) - ovlp = gw._scf.get_ovlp() + # Get the core Hamiltonian + h1e = gw._scf.get_hcore() + h1e = lib.einsum("...pq,...pi,...qj->...ij", h1e, np.conj(mo_coeff), mo_coeff) - def project_basis(m, c1, c2): - # Project m from MO basis c1 to MO basis c2 - p = np.linalg.multi_dot((c1.T, ovlp, c2)) - m = lib.einsum("...pq,pi,qj->...ij", m, p, p) - return m + diis = util.DIIS() + diis.space = gw.diis_space # Get the self-energy - subgw = gw.solver(gw._scf, **(gw.solver_options if gw.solver_options else {})) + solver_options = {} if not gw.solver_options else gw.solver_options.copy() + for key in gw.solver._opts: + solver_options[key] = solver_options.get(key, getattr(gw, key)) + subgw = gw.solver(gw._scf, **solver_options) subgw.verbose = 0 subgw.mo_energy = mo_energy subgw.mo_coeff = mo_coeff - subconv, gf, se = subgw.kernel(nmom_max=nmom_max, integrals=integrals) + subconv, gf, se, _ = subgw.kernel(nmom_max=nmom_max, integrals=integrals) # Get the moments - th = se.get_occupied().moment(range(nmom_max + 1)) - tp = se.get_virtual().moment(range(nmom_max + 1)) + th, tp = gw.self_energy_to_moments(se, nmom_max) conv = False for cycle in range(1, gw.max_cycle + 1): logger.info(gw, "%s iteration %d", gw.name, cycle) # Build the static potential - if gw.srg == 0.0: - denom = lib.direct_sum( - "p-q-q->pq", mo_energy, se.energy, np.sign(se.energy) * 1.0j * gw.eta - ) - se_qp = lib.einsum("pk,qk,pk->pq", se.coupling, se.coupling, 1 / denom).real - else: - denom = lib.direct_sum("p-q->pq", mo_energy, se.energy) - d2p = lib.direct_sum("pk,qk->pqk", denom**2, denom**2) - reg = 1 - np.exp(-d2p * gw.srg) - reg *= lib.direct_sum("pk,qk->pqk", denom, denom) - reg /= d2p - se_qp = lib.einsum("pk,qk,pqk->pq", se.coupling, se.coupling, reg).real - - se_qp = 0.5 * (se_qp + se_qp.T) - se_qp = project_basis(se_qp, mo_coeff, mo_coeff_ref) + se_qp_prev = se_qp if cycle > 1 else None + se_qp = gw.build_static_potential(mo_energy, se) se_qp = diis.update(se_qp) + if gw.damping != 0.0 and cycle > 1: + se_qp = (1.0 - gw.damping) * se_qp + gw.damping * se_qp_prev # Update the MO energies and orbitals - essentially a Fock # loop using the folded static self-energy. @@ -133,10 +124,10 @@ def project_basis(m, c1, c2): mo_energy, u = np.linalg.eigh(fock_eff) u = mpi_helper.bcast(u, root=0) - mo_coeff = np.dot(mo_coeff_ref, u) + mo_coeff = lib.einsum("...pq,...qi->...pi", mo_coeff_ref, u) dm_prev = dm - dm = np.dot(u[:, :nocc], u[:, :nocc].T) * 2 + dm = gw._scf.make_rdm1(u, gw.mo_occ) error = np.max(np.abs(dm - dm_prev)) if error < gw.conv_tol_qp: conv_qp = True @@ -150,37 +141,22 @@ def project_basis(m, c1, c2): # Update the self-energy subgw.mo_energy = mo_energy subgw.mo_coeff = mo_coeff - _, gf, se = subgw.kernel(nmom_max=nmom_max) + subconv, gf, se, _ = subgw.kernel(nmom_max=nmom_max) + gf = gw.project_basis(gf, ovlp, mo_coeff, mo_coeff_ref) + se = gw.project_basis(se, ovlp, mo_coeff, mo_coeff_ref) # Update the moments th_prev, tp_prev = th, tp - th = se.get_occupied().moment(range(nmom_max + 1)) - th = project_basis(th, mo_coeff, mo_coeff_ref) - tp = se.get_virtual().moment(range(nmom_max + 1)) - tp = project_basis(tp, mo_coeff, mo_coeff_ref) + th, tp = gw.self_energy_to_moments(se, nmom_max) # Check for convergence - error_homo = abs(mo_energy[nocc - 1] - mo_energy_prev[nocc - 1]) - error_lumo = abs(mo_energy[nocc] - mo_energy_prev[nocc]) - error_th = gw._moment_error(th, th_prev) - error_tp = gw._moment_error(tp, tp_prev) + conv = gw.check_convergence(mo_energy, mo_energy_prev, th, th_prev, tp, tp_prev) th_prev = th.copy() tp_prev = tp.copy() - logger.info(gw, "Change in QPs: HOMO = %.6g LUMO = %.6g", error_homo, error_lumo) - logger.info(gw, "Change in moments: occ = %.6g vir = %.6g", error_th, error_tp) - if gw.conv_logical( - ( - max(error_homo, error_lumo) < gw.conv_tol, - max(error_th, error_tp) < gw.conv_tol_moms, - conv_qp, - ) - ): - conv = True + if conv: break - gf = GreensFunction(mo_energy, np.dot(mo_coeff_ref.T, ovlp, mo_coeff), chempot=gf.chempot) - - return conv, gf, se + return conv, gf, se, mo_energy class qsGW(GW): @@ -212,6 +188,8 @@ class qsGW(GW): diis_space_qp : int, optional Size of the DIIS extrapolation space in the quasiparticle loop. Default value is 8. + damping : float, optional + Damping parameter. Default value is 0.0. eta : float, optional Small value to regularise the self-energy. Default value is `1e-1`. @@ -239,6 +217,7 @@ class qsGW(GW): conv_logical = all diis_space = 8 diis_space_qp = 8 + damping = 0.0 eta = 1e-1 srg = 0.0 solver = GW @@ -253,6 +232,7 @@ class qsGW(GW): "conv_logical", "diis_space", "diis_space_qp", + "damping", "eta", "srg", "solver", @@ -263,52 +243,108 @@ class qsGW(GW): def name(self): return "qsGW" - def kernel( - self, - nmom_max, - mo_energy=None, - mo_coeff=None, - moments=None, - integrals=None, - ): - if mo_coeff is None: - mo_coeff = self._scf.mo_coeff - if mo_energy is None: - mo_energy = self._scf.mo_energy - - cput0 = (logger.process_clock(), logger.perf_counter()) - self.dump_flags() - logger.info(self, "nmom_max = %d", nmom_max) - - self.converged, self.gf, self.se = kernel( - self, - nmom_max, - mo_energy, - mo_coeff, - integrals=integrals, - ) - - gf_occ = self.gf.get_occupied() - gf_occ.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_occ.naux)): - en = -gf_occ.energy[-(n + 1)] - vn = gf_occ.coupling[:, -(n + 1)] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "IP energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - gf_vir = self.gf.get_virtual() - gf_vir.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_vir.naux)): - en = gf_vir.energy[n] - vn = gf_vir.coupling[:, n] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "EA energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - if self.converged: - logger.note(self, "%s converged", self.name) + _kernel = kernel + + @staticmethod + def project_basis(matrix, ovlp, mo1, mo2): + """ + Project a matrix from one basis to another. + + Parameters + ---------- + matrix : numpy.ndarray or GreensFunction or SelfEnergy + Matrix to project. Can also be a `GreensFunction` or + `SelfEnergy` object, in which case the `couplings` + attribute is projected. + ovlp : numpy.ndarray + Overlap matrix in the shared (AO) basis. + mo1 : numpy.ndarray + First basis, rotates from the shared (AO) basis into the + basis of `matrix`. + mo2 : numpy.ndarray + Second basis, rotates from the shared (AO) basis into the + desired basis of the output. + + Returns + ------- + projected_matrix : numpy.ndarray or GreensFunction or SelfEnergy + Matrix projected into the desired basis. + """ + + proj = lib.einsum("...pq,...pi,...qj->...ij", ovlp, mo1, mo2) + + if isinstance(matrix, np.ndarray): + projected_matrix = lib.einsum("...pq,...pi,...qj->...ij", matrix, proj, proj) + else: + coupling = lib.einsum("...pk,...pi->...ik", matrix.coupling, proj) + projected_matrix = matrix.copy() + projected_matrix.coupling = coupling + + return projected_matrix + + @staticmethod + def self_energy_to_moments(se, nmom_max): + """ + Return the hole and particle moments for a self-energy. + + Parameters + ---------- + se : SelfEnergy + Self-energy to compute the moments of. + + Returns + ------- + th : numpy.ndarray + Hole moments. + tp : numpy.ndarray + Particle moments. + """ + th = se.get_occupied().moment(range(nmom_max + 1)) + tp = se.get_virtual().moment(range(nmom_max + 1)) + return th, tp + + def build_static_potential(self, mo_energy, se): + """ + Build the static potential approximation to the self-energy. + + Parameters + ---------- + mo_energy : numpy.ndarray + Molecular orbital energies. + se : SelfEnergy + Self-energy to approximate. + + Returns + ------- + se_qp : numpy.ndarray + Static potential approximation to the self-energy. + """ + + if self.srg == 0.0: + eta = np.sign(se.energy) * self.eta * 1.0j + denom = lib.direct_sum("p-q-q->pq", mo_energy, se.energy, eta) + se_i = lib.einsum("pk,qk,pk->pq", se.coupling, np.conj(se.coupling), 1 / denom) + se_j = lib.einsum("pk,qk,qk->pq", se.coupling, np.conj(se.coupling), 1 / denom) else: - logger.note(self, "%s failed to converge", self.name) + denom = lib.direct_sum("p-q->pq", mo_energy, se.energy) + d2p = lib.direct_sum("pk,qk->pqk", denom**2, denom**2) + reg = 1 - np.exp(-d2p * self.srg) + reg *= lib.direct_sum("pk,qk->pqk", denom, denom) + reg /= d2p + se_i = lib.einsum("pk,qk,pqk->pq", se.coupling, np.conj(se.coupling), reg) + se_j = se_i.T.conj() + + se_ij = 0.5 * (se_i + se_j) + + if not np.iscomplexobj(se.coupling): + se_ij = se_ij.real + else: + se_ij[np.diag_indices_from(se_ij)] = se_ij[np.diag_indices_from(se_ij)].real + + return se_ij - logger.timer(self, self.name, *cput0) + check_convergence = evGW.check_convergence - return self.converged, self.gf, self.se + @property + def has_fock_loop(self): + return True diff --git a/momentGW/scgw.py b/momentGW/scgw.py index 78405f13..760961b2 100644 --- a/momentGW/scgw.py +++ b/momentGW/scgw.py @@ -52,6 +52,9 @@ def kernel( Green's function object se : pyscf.agf2.SelfEnergy Self-energy object + qp_energy : numpy.ndarray + Quasiparticle energies. Always None for scGW, returned for + compatibility with other scGW methods. """ logger.warn(gw, "scGW is untested!") @@ -59,16 +62,10 @@ def kernel( if gw.polarizability == "drpa-exact": raise NotImplementedError("%s for polarizability=%s" % (gw.name, gw.polarizability)) - nmo = gw.nmo - nocc = gw.nocc - naux = gw.with_df.get_naoaux() - if integrals is None: integrals = gw.ao2mo() - chempot = 0.5 * (mo_energy[nocc - 1] + mo_energy[nocc]) - gf = GreensFunction(mo_energy, np.eye(mo_energy.size), chempot=chempot) - gf_ref = gf.copy() + gf_ref = gf = gw.init_gf(mo_energy) diis = util.DIIS() diis.space = gw.diis_space @@ -81,21 +78,25 @@ def kernel( ) conv = False - th_prev = tp_prev = np.zeros((nmom_max + 1, nmo, nmo)) + th_prev = tp_prev = None for cycle in range(1, gw.max_cycle + 1): logger.info(gw, "%s iteration %d", gw.name, cycle) if cycle > 1: # Rotate ERIs into (MO, QMO) and (QMO occ, QMO vir) - # TODO reimplement out keyword - mo_coeff_g = None if gw.g0 else np.dot(mo_coeff, gf.coupling) - mo_coeff_w = None if gw.w0 else np.dot(mo_coeff, gf.coupling) - mo_occ_w = ( - None - if gw.w0 - else np.array([2] * gf.get_occupied().naux + [0] * gf.get_virtual().naux) + integrals.update_coeffs( + mo_coeff_g=( + None + if gw.g0 + else lib.einsum("...pq,...qi->...pi", mo_coeff, gw._gf_to_coupling(gf)) + ), + mo_coeff_w=( + None + if gw.w0 + else lib.einsum("...pq,...qi->...pi", mo_coeff, gw._gf_to_coupling(gf)) + ), + mo_occ_w=None if gw.w0 else gw._gf_to_occ(gf), ) - integrals.update_coeffs(mo_coeff_g=mo_coeff_g, mo_coeff_w=mo_coeff_w, mo_occ_w=mo_occ_w) # Update the moments of the SE if moments is not None and cycle == 1: @@ -105,8 +106,8 @@ def kernel( nmom_max, integrals, mo_energy=( - gf.energy if not gw.g0 else gf_ref.energy, - gf.energy if not gw.w0 else gf_ref.energy, + gw._gf_to_energy(gf if not gw.g0 else gf_ref), + gw._gf_to_energy(gf if not gw.w0 else gf_ref), ), mo_occ=( gw._gf_to_occ(gf if not gw.g0 else gf_ref), @@ -116,44 +117,30 @@ def kernel( # Extrapolate the moments try: - th, tp = diis.update_with_scaling(np.array((th, tp)), (2, 3)) + th, tp = diis.update_with_scaling(np.array((th, tp)), (-2, -1)) except Exception as e: - logger.debug(gw, "DIIS step failed at iteration %d: %s", cycle, repr(e)) + logger.debug(gw, "DIIS step failed at iteration %d", cycle) # Damp the moments - if gw.damping != 0.0: + if gw.damping != 0.0 and cycle > 1: th = gw.damping * th_prev + (1.0 - gw.damping) * th tp = gw.damping * tp_prev + (1.0 - gw.damping) * tp # Solve the Dyson equation - gf_prev = gf.copy() gf, se = gw.solve_dyson(th, tp, se_static, integrals=integrals) + # Update the MO energies + mo_energy_prev = mo_energy.copy() + mo_energy = gw._gf_to_mo_energy(gf) + # Check for convergence - error_homo = abs( - gf.energy[np.argmax(gf.coupling[nocc - 1] ** 2)] - - gf_prev.energy[np.argmax(gf_prev.coupling[nocc - 1] ** 2)] - ) - error_lumo = abs( - gf.energy[np.argmax(gf.coupling[nocc] ** 2)] - - gf_prev.energy[np.argmax(gf_prev.coupling[nocc] ** 2)] - ) - error_th = gw._moment_error(th, th_prev) - error_tp = gw._moment_error(tp, tp_prev) + conv = gw.check_convergence(mo_energy, mo_energy_prev, th, th_prev, tp, tp_prev) th_prev = th.copy() tp_prev = tp.copy() - logger.info(gw, "Change in QPs: HOMO = %.6g LUMO = %.6g", error_homo, error_lumo) - logger.info(gw, "Change in moments: occ = %.6g vir = %.6g", error_th, error_tp) - if gw.conv_logical( - ( - max(error_homo, error_lumo) < gw.conv_tol, - max(error_th, error_tp) < gw.conv_tol_moms, - ) - ): - conv = True + if conv: break - return conv, gf, se + return conv, gf, se, None class scGW(evGW): @@ -184,52 +171,27 @@ class scGW(evGW): def name(self): return "scG%sW%s" % ("0" if self.g0 else "", "0" if self.w0 else "") - def kernel( - self, - nmom_max, - mo_energy=None, - mo_coeff=None, - moments=None, - integrals=None, - ): - if mo_coeff is None: - mo_coeff = self.mo_coeff + _kernel = kernel + + def init_gf(self, mo_energy=None): + """Initialise the mean-field Green's function. + + Parameters + ---------- + mo_energy : numpy.ndarray, optional + Molecular orbital energies. Default value is + `self.mo_energy`. + + Returns + ------- + gf : GreensFunction + Mean-field Green's function. + """ + if mo_energy is None: mo_energy = self.mo_energy - cput0 = (logger.process_clock(), logger.perf_counter()) - self.dump_flags() - logger.info(self, "nmom_max = %d", nmom_max) - - self.converged, self.gf, self.se = kernel( - self, - nmom_max, - mo_energy, - mo_coeff, - integrals=integrals, - ) - - gf_occ = self.gf.get_occupied() - gf_occ.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_occ.naux)): - en = -gf_occ.energy[-(n + 1)] - vn = gf_occ.coupling[:, -(n + 1)] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "IP energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - gf_vir = self.gf.get_virtual() - gf_vir.remove_uncoupled(tol=1e-1) - for n in range(min(5, gf_vir.naux)): - en = gf_vir.energy[n] - vn = gf_vir.coupling[:, n] - qpwt = np.linalg.norm(vn) ** 2 - logger.note(self, "EA energy level %d E = %.16g QP weight = %0.6g", n, en, qpwt) - - if self.converged: - logger.note(self, "%s converged", self.name) - else: - logger.note(self, "%s failed to converge", self.name) - - logger.timer(self, self.name, *cput0) + chempot = 0.5 * (mo_energy[self.nocc - 1] + mo_energy[self.nocc]) + gf = GreensFunction(mo_energy, np.eye(self.nmo), chempot=chempot) - return self.converged, self.gf, self.se + return gf diff --git a/momentGW/tda.py b/momentGW/tda.py index de87ae6e..b4aefcdc 100644 --- a/momentGW/tda.py +++ b/momentGW/tda.py @@ -24,12 +24,12 @@ class TDA: Molecular orbital energies. If a tuple is passed, the first element corresponds to the Green's function basis and the second to the screened Coulomb interaction. Default value is that of - `gw._scf.mo_energy`. + `gw.mo_energy`. mo_occ : numpy.ndarray or tuple of numpy.ndarray, optional Molecular orbital occupancies. If a tuple is passed, the first element corresponds to the Green's function basis and the second to the screened Coulomb interaction. Default value is that of - `gw._scf.mo_occ`. + `gw.mo_occ`. """ def __init__( @@ -46,7 +46,7 @@ def __init__( # Get the MO energies for G and W if mo_energy is None: - self.mo_energy_g = self.mo_energy_w = gw._scf.mo_energy + self.mo_energy_g = self.mo_energy_w = gw.mo_energy elif isinstance(mo_energy, tuple): self.mo_energy_g, self.mo_energy_w = mo_energy else: @@ -54,7 +54,7 @@ def __init__( # Get the MO occupancies for G and W if mo_occ is None: - self.mo_occ_g = self.mo_occ_w = gw._scf.mo_occ + self.mo_occ_g = self.mo_occ_w = gw.mo_occ elif isinstance(mo_occ, tuple): self.mo_occ_g, self.mo_occ_w = mo_occ else: @@ -62,7 +62,7 @@ def __init__( # Options and thresholds self.report_quadrature_error = True - if "ia" in getattr(self.gw, "compression", "").split(","): + if self.gw.compression and "ia" in self.gw.compression.split(","): self.compression_tol = gw.compression_tol else: self.compression_tol = None @@ -79,19 +79,6 @@ def kernel(self, exact=False): self.__class__.__name__, self.nmom_max, ) - if mpi_helper.size > 1: - lib.logger.info( - self.gw, - "Slice of W space on proc %d: [%d, %d]", - mpi_helper.rank, - *self.mpi_slice(self.nov), - ) - lib.logger.info( - self.gw, - "Slice of G space on proc %d: [%d, %d]", - mpi_helper.rank, - *self.mpi_slice(self.mo_energy_g.size), - ) if exact: moments_dd = self.build_dd_moments_exact() @@ -124,15 +111,8 @@ def build_dd_moments(self): moments[0] = self.integrals.Lia cput1 = lib.logger.timer(self.gw, "zeroth moment", *cput0) - # Get the first order moment - moments[1] = self.integrals.Lia * d[None] - tmp = np.dot(self.integrals.Lia, self.integrals.Lia.T) - tmp = mpi_helper.allreduce(tmp) - moments[1] += np.dot(tmp, self.integrals.Lia) * 2.0 - cput1 = lib.logger.timer(self.gw, "first moment", *cput1) - # Get the higher order moments - for i in range(2, self.nmom_max + 1): + for i in range(1, self.nmom_max + 1): moments[i] = moments[i - 1] * d[None] tmp = np.dot(moments[i - 1], self.integrals.Lia.T) tmp = mpi_helper.allreduce(tmp) @@ -145,54 +125,72 @@ def build_dd_moments(self): def build_dd_moments_exact(self): raise NotImplementedError - def build_se_moments(self, moments_dd): - """Build the moments of the self-energy via convolution.""" - - cput0 = (lib.logger.process_clock(), lib.logger.perf_counter()) - lib.logger.info(self.gw, "Building self-energy moments") - lib.logger.debug(self.gw, "Memory usage: %.2f GB", self._memory_usage()) - - p0, p1 = self.mpi_slice(self.nov) - q0, q1 = self.mpi_slice(self.mo_energy_g.size) + def convolve(self, eta): + """Handle the convolution of the moments of G and W.""" # Setup dependent on diagonal SE + q0, q1 = self.mpi_slice(self.mo_energy_g.size) if self.gw.diagonal_se: pq = p = q = "p" - eta = np.zeros((q1 - q0, self.nmom_max + 1, self.nmo)) fproc = lambda x: np.diag(x) else: pq, p, q = "pq", "p", "q" - eta = np.zeros((q1 - q0, self.nmom_max + 1, self.nmo, self.nmo)) fproc = lambda x: x - # Get the moments in (aux|aux) and rotate to (mo|mo) - for n in range(self.nmom_max + 1): - eta_aux = np.dot(moments_dd[n], self.integrals.Lia.T) # aux^2 o v - eta_aux = mpi_helper.allreduce(eta_aux) - for x in range(q1 - q0): - Lp = self.integrals.Lpx[:, :, x] - eta[x, n] = lib.einsum(f"P{p},Q{q},PQ->{pq}", Lp, Lp, eta_aux) * 2.0 - cput1 = lib.logger.timer(self.gw, "rotating DD moments", *cput0) - - # Construct the self-energy moments moments_occ = np.zeros((self.nmom_max + 1, self.nmo, self.nmo)) moments_vir = np.zeros((self.nmom_max + 1, self.nmo, self.nmo)) moms = np.arange(self.nmom_max + 1) + for n in moms: fp = scipy.special.binom(n, moms) fh = fp * (-1) ** moms + if np.any(self.mo_occ_g[q0:q1] > 0): eo = np.power.outer(self.mo_energy_g[q0:q1][self.mo_occ_g[q0:q1] > 0], n - moms) to = lib.einsum(f"t,kt,kt{pq}->{pq}", fh, eo, eta[self.mo_occ_g[q0:q1] > 0]) moments_occ[n] += fproc(to) + if np.any(self.mo_occ_g[q0:q1] == 0): ev = np.power.outer(self.mo_energy_g[q0:q1][self.mo_occ_g[q0:q1] == 0], n - moms) tv = lib.einsum(f"t,ct,ct{pq}->{pq}", fp, ev, eta[self.mo_occ_g[q0:q1] == 0]) moments_vir[n] += fproc(tv) + moments_occ = mpi_helper.allreduce(moments_occ) moments_vir = mpi_helper.allreduce(moments_vir) + + # Numerical integration can lead to small non-hermiticity moments_occ = 0.5 * (moments_occ + moments_occ.swapaxes(1, 2)) moments_vir = 0.5 * (moments_vir + moments_vir.swapaxes(1, 2)) + + return moments_occ, moments_vir + + def build_se_moments(self, moments_dd): + """Build the moments of the self-energy via convolution.""" + + cput0 = (lib.logger.process_clock(), lib.logger.perf_counter()) + lib.logger.info(self.gw, "Building self-energy moments") + lib.logger.debug(self.gw, "Memory usage: %.2f GB", self._memory_usage()) + + # Setup dependent on diagonal SE + q0, q1 = self.mpi_slice(self.mo_energy_g.size) + if self.gw.diagonal_se: + eta = np.zeros((q1 - q0, self.nmom_max + 1, self.nmo)) + pq = p = q = "p" + else: + eta = np.zeros((q1 - q0, self.nmom_max + 1, self.nmo, self.nmo)) + pq, p, q = "pq", "p", "q" + + # Get the moments in (aux|aux) and rotate to (mo|mo) + for n in range(self.nmom_max + 1): + eta_aux = np.dot(moments_dd[n], self.integrals.Lia.T) # aux^2 o v + eta_aux = mpi_helper.allreduce(eta_aux) + for x in range(q1 - q0): + Lp = self.integrals.Lpx[:, :, x] + eta[x, n] = lib.einsum(f"P{p},Q{q},PQ->{pq}", Lp, Lp, eta_aux) * 2.0 + cput1 = lib.logger.timer(self.gw, "rotating DD moments", *cput0) + + # Construct the self-energy moments + moments_occ, moments_vir = self.convolve(eta) cput1 = lib.logger.timer(self.gw, "constructing SE moments", *cput1) return moments_occ, moments_vir diff --git a/momentGW/thc.py b/momentGW/thc.py new file mode 100644 index 00000000..37183b4b --- /dev/null +++ b/momentGW/thc.py @@ -0,0 +1,258 @@ +import numpy as np +from h5py import File +from pyscf import lib +from pyscf.agf2 import mpi_helper +from scipy.special import binom + +from momentGW import tda + + +class Integrals: + """ + Container for the integrals required for GW methods. + """ + + def __init__( + self, + with_df, + mo_coeff, + mo_occ, + thc_opts, + ): + self.with_df = with_df + self.mo_coeff = mo_coeff + self.mo_occ = mo_occ + self.filepath = thc_opts["file_path"] + + self._blocks = {} + + def transform(self): + """ + Imports a H5PY file containing a dictionary. Inside the dict, a + 'collocation_matrix' and a 'coulomb_matrix' must be contained + with shapes (aux, MO) and (aux,aux) respectively. + """ + + if self.filepath is None: + raise ValueError("filepath cannot be None for THC implementation") + + thc_eri = File(self.filepath, "r") + coll = np.array(thc_eri["collocation_matrix"]).T[0].T + cou = np.array(thc_eri["coulomb_matrix"][0]).T[0].T + Xip = coll[: self.nocc, :] + Xap = coll[self.nocc :, :] + + self._blocks["coll"] = coll + self._blocks["cou"] = cou + self._blocks["Xip"] = Xip + self._blocks["Xap"] = Xap + + @property + def Coll(self): + """ + Return the (aux, MO) array. + """ + return self._blocks["coll"] + + @property + def Cou(self): + """ + Return the (aux, aux) array. + """ + return self._blocks["cou"] + + @property + def Xip(self): + """ + Return the (aux, W occ) array. + """ + return self._blocks["Xip"] + + @property + def Xap(self): + """ + Return the (aux, W vir) array. + """ + return self._blocks["Xap"] + + @property + def nmo(self): + """ + Return the number of MOs. + """ + return self.mo_coeff.shape[-1] + + @property + def nocc(self): + """ + Return the number of occupied MOs. + """ + return np.sum(self.mo_occ > 0) + + @property + def naux(self): + """ + Return the number of auxiliary basis functions, after the + compression. + """ + return self.Cou.shape[0] + + +class TDA(tda.TDA): + def __init__( + self, + gw, + nmom_max, + integrals, + mo_energy=None, + mo_occ=None, + ): + self.gw = gw + self.integrals = integrals + self.nmom_max = nmom_max + + # Get the MO energies for G and W + if mo_energy is None: + self.mo_energy_g = self.mo_energy_w = gw._scf.mo_energy + elif isinstance(mo_energy, tuple): + self.mo_energy_g, self.mo_energy_w = mo_energy + else: + self.mo_energy_g = self.mo_energy_w = mo_energy + + # Get the MO occupancies for G and W + if mo_occ is None: + self.mo_occ_g = self.mo_occ_w = gw._scf.mo_occ + elif isinstance(mo_occ, tuple): + self.mo_occ_g, self.mo_occ_w = mo_occ + else: + self.mo_occ_g = self.mo_occ_w = mo_occ + + # Options and thresholds + self.report_quadrature_error = True + if "ia" in getattr(self.gw, "compression", "").split(","): + self.compression_tol = gw.compression_tol + else: + self.compression_tol = None + + def build_Z_prime(self): + """ + Form the X_iP X_aP X_iQ X_aQ = Z_X contraction at N^3 cost. + """ + + Y_i_PQ = np.einsum("iP,iQ->PQ", self.XiP, self.XiP) + Y_a_PQ = np.einsum("aP,aQ->PQ", self.XaP, self.XaP) + Z_X_PQ = np.einsum("PQ,PQ->PQ", Y_i_PQ, Y_a_PQ) + + return Z_X_PQ + + def build_dd_moments(self): + """ + Calcualte the moments recusively, in a form similiar to that of + a density-density response, at N^3 cost using only THC elements. + """ + + cput0 = (lib.logger.process_clock(), lib.logger.perf_counter()) + lib.logger.info(self.gw, "Building density-density moments") + lib.logger.debug(self.gw, "Memory usage: %.2f GB", self._memory_usage()) + + zeta = np.zeros((self.total_nmom, self.XiP.shape[1], self.XiP.shape[1])) + ZD_left = np.zeros((self.total_nmom, self.naux, self.naux)) + ZD_only = np.zeros((self.total_nmom, self.naux, self.naux)) + + self.Z_prime = self.build_Z_prime() + self.ZZ = np.einsum("PQ,QR->PR", self.Z, self.Z_prime) + + zeta[0] = self.Z_prime + + cput1 = lib.logger.timer(self.gw, "Zeta zero", *cput0) + + YaP = np.einsum("aP,aQ->PQ", self.XaP, self.XaP) + YiP = np.einsum("aP,aQ->PQ", self.XiP, self.XiP) + + Z_left = np.eye((self.naux)) + + for i in range(1, self.total_nmom): + ZD_left[0] = Z_left + ZD_left = np.roll(ZD_left, 1, axis=0) + + Z_left = np.einsum("PQ,QR->PR", self.ZZ, Z_left) * 2 + + Yei_max = np.einsum("i,iP,iQ->PQ", (-1) ** (i) * self.ei ** (i), self.XiP, self.XiP) + Yea_max = np.einsum("a,aP,aQ->PQ", self.ea ** (i), self.XaP, self.XaP) + ZD_only[i] = np.einsum("PQ,PQ->PQ", Yea_max, YiP) + np.einsum("PQ,PQ->PQ", Yei_max, YaP) + ZD_temp = np.zeros((self.naux, self.naux)) + for j in range(1, i): + Yei = np.einsum("i,iP,iQ->PQ", (-1) ** (j) * self.ei ** (j), self.XiP, self.XiP) + Yea = np.einsum("a,aP,aQ->PQ", binom(i, j) * self.ea ** (i - j), self.XaP, self.XaP) + ZD_only[i] += np.einsum("PQ,PQ->PQ", Yea, Yei) + if j == i - 1: + Z_left += np.einsum("PQ,QR->PR", self.Z, ZD_only[j]) * 2 + else: + Z_left += ( + np.einsum("PQ,QR,RS->PS", self.Z, ZD_only[i - 1 - j], ZD_left[i - j]) * 2 + ) + ZD_temp += np.einsum("PQ,QR->PR", ZD_only[j], ZD_left[j]) + zeta[i] = ZD_only[i] + ZD_temp + np.einsum("PQ,QR->PR", self.Z_prime, Z_left) + cput1 = lib.logger.timer(self.gw, "Zeta %d" % i, *cput1) + + return zeta + + def build_se_moments(self, zeta): + """ + Build the moments of the self-energy via convolution. + """ + + cput0 = (lib.logger.process_clock(), lib.logger.perf_counter()) + lib.logger.info(self.gw, "Building self-energy moments") + lib.logger.debug(self.gw, "Memory usage: %.2f GB", self._memory_usage()) + + # Setup dependent on diagonal SE + q0, q1 = self.mpi_slice(self.mo_energy_g.size) + if self.gw.diagonal_se: + eta = np.zeros((q1 - q0, self.nmom_max + 1, self.nmo)) + pq = p = q = "p" + else: + eta = np.zeros((q1 - q0, self.nmom_max + 1, self.nmo, self.nmo)) + pq, p, q = "pq", "p", "q" + + # Get the moments in (aux|aux) and rotate to (mo|mo) + for n in range(self.nmom_max + 1): + zeta_prime = np.linalg.multi_dot((self.Z, zeta[n], self.Z)) + for x in range(q1 - q0): + Lp = lib.einsum("pP,P->Pp", self.integrals.Coll, self.integrals.Coll[x]) + eta[x, n] = np.einsum(f"P{p},Q{q},PQ->{pq}", Lp, Lp, zeta_prime) * 2.0 + cput1 = lib.logger.timer(self.gw, "rotating DD moments", *cput0) + + # Construct the self-energy moments + moments_occ, moments_vir = self.convolve(eta) + cput1 = lib.logger.timer(self.gw, "constructing SE moments", *cput1) + + return moments_occ, moments_vir + + @property + def total_nmom(self): + return self.nmom_max + 1 + + @property + def total_nmom(self): + return self.nmom_max + 1 + + @property + def XiP(self): + return self.integrals.Xip + + @property + def XaP(self): + return self.integrals.XaP + + @property + def Z(self): + return self.integrals.Cou + + @property + def ea(self): + return self.mo_energy_w[self.mo_occ_w == 0] + + @property + def ea(self): + return self.mo_energy_w[self.mo_occ_w > 0] diff --git a/momentGW/util.py b/momentGW/util.py index efc7d7fe..78f92b69 100644 --- a/momentGW/util.py +++ b/momentGW/util.py @@ -23,14 +23,44 @@ def update_with_scaling(self, x, axis, xerr=None): scale = np.max(np.abs(x), axis=axis, keepdims=True) + # Scale x = x / scale if xerr: xerr = xerr / scale + + # Execute DIIS x = self.update(x, xerr=xerr) + + # Rescale x = x * scale return x + def update_with_complex_unravel(self, x, xerr=None): + """Execute DIIS where the error vectors are unravelled to + concatenate the real and imaginary parts. + """ + + if not np.iscomplexobj(x): + return self.update(x, xerr=xerr) + + shape = x.shape + size = x.size + + # Concatenate + x = np.concatenate([np.real(x).ravel(), np.imag(x).ravel()]) + if xerr is not None: + xerr = np.concatenate([np.real(xerr).ravel(), np.imag(xerr).ravel()]) + + # Execute DIIS + x = self.update(x, xerr=xerr) + + # Unravel + x = x[:size] + 1j * x[size:] + x = x.reshape(shape) + + return x + def extrapolate(self, nd=None): if nd is None: nd = self.get_num_vec() @@ -57,13 +87,10 @@ def extrapolate(self, nd=None): if np.all(abs(c) < 1e-14): raise np.linalg.linalg.LinAlgError("DIIS vectors are fully linearly dependent.") - xnew = None + xnew = 0.0 for i, ci in enumerate(c[1:]): - xi = self.get_vec(i) - if xnew is None: - xnew = np.zeros(xi.size, c.dtype) - for p0, p1 in lib.prange(0, xi.size, lib.diis.BLOCK_SIZE): - xnew[p0:p1] += xi[p0:p1] * ci + xnew += self.get_vec(i) * ci + return xnew @@ -93,3 +120,20 @@ def __exit__(self, exc_type, exc_value, traceback): self.mf.verbose = self._mf_verbose if getattr(self.mf, "with_df", None): self.mf.with_df.verbose = self._df_verbose + + +def list_union(*args): + """ + Find the union of a list of lists, with the elements sorted + by their first occurrence. + """ + + cache = set() + out = [] + for arg in args: + for x in arg: + if x not in cache: + cache.add(x) + out.append(x) + + return out diff --git a/tests/test_evgw.py b/tests/test_evgw.py index 69068fe8..b0d54c82 100644 --- a/tests/test_evgw.py +++ b/tests/test_evgw.py @@ -41,7 +41,7 @@ def test_nelec(self): gw = evGW(self.mf) gw.diagonal_se = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, @@ -49,7 +49,7 @@ def test_nelec(self): ) gw.optimise_chempot = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, @@ -57,7 +57,7 @@ def test_nelec(self): ) gw.fock_loop = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, diff --git a/tests/test_evkgw.py b/tests/test_evkgw.py new file mode 100644 index 00000000..db5f3be3 --- /dev/null +++ b/tests/test_evkgw.py @@ -0,0 +1,153 @@ +""" +Tests for `pbc/evgw.py` +""" + +import unittest + +import numpy as np +import pytest +from pyscf.pbc import gto, dft +from pyscf.pbc.tools import k2gamma +from pyscf.agf2 import mpi_helper + +from momentGW import evGW +from momentGW import evKGW + + +class Test_evKGW(unittest.TestCase): + @classmethod + def setUpClass(cls): + cell = gto.Cell() + cell.atom = "He 0 0 0; He 1 1 1" + cell.basis = "6-31g" + cell.a = np.eye(3) * 3 + cell.verbose = 0 + cell.build() + + kmesh = [3, 1, 1] + kpts = cell.make_kpts(kmesh) + + mf = dft.KRKS(cell, kpts, xc="hf") + mf = mf.density_fit(auxbasis="weigend") + mf.conv_tol = 1e-10 + mf.kernel() + + for k in range(len(kpts)): + mf.mo_coeff[k] = mpi_helper.bcast_dict(mf.mo_coeff[k], root=0) + mf.mo_energy[k] = mpi_helper.bcast_dict(mf.mo_energy[k], root=0) + + smf = k2gamma.k2gamma(mf, kmesh=kmesh) + smf = smf.density_fit(auxbasis="weigend") + + cls.cell, cls.kpts, cls.mf, cls.smf = cell, kpts, mf, smf + + @classmethod + def tearDownClass(cls): + del cls.cell, cls.kpts, cls.mf, cls.smf + + def test_supercell_valid(self): + # Require real MOs for supercell comparison + + scell, phase = k2gamma.get_phase(self.cell, self.kpts) + nk, nao, nmo = np.shape(self.mf.mo_coeff) + nr, _ = np.shape(phase) + + k_conj_groups = k2gamma.group_by_conj_pairs(self.cell, self.kpts, return_kpts_pairs=False) + k_phase = np.eye(nk, dtype=np.complex128) + r2x2 = np.array([[1., 1j], [1., -1j]]) * .5**.5 + pairs = [[k, k_conj] for k, k_conj in k_conj_groups + if k_conj is not None and k != k_conj] + for idx in np.array(pairs): + k_phase[idx[:, None], idx] = r2x2 + + c_gamma = np.einsum('Rk,kum,kh->Ruhm', phase, self.mf.mo_coeff, k_phase) + c_gamma = c_gamma.reshape(nao*nr, nk*nmo) + c_gamma[:, abs(c_gamma.real).max(axis=0) < 1e-5] *= -1j + + self.assertAlmostEqual(np.max(np.abs(np.array(c_gamma).imag)), 0, 8) + + def _test_vs_supercell(self, gw, kgw, full=False, check_convergence=True): + if check_convergence: + self.assertTrue(gw.converged) + self.assertTrue(kgw.converged) + e1 = np.concatenate([gf.energy for gf in kgw.gf]) + w1 = np.concatenate([np.linalg.norm(gf.coupling, axis=0)**2 for gf in kgw.gf]) + mask = np.argsort(e1) + e1 = e1[mask] + w1 = w1[mask] + e2 = gw.gf.energy + w2 = np.linalg.norm(gw.gf.coupling, axis=0)**2 + if full: + np.testing.assert_allclose(e1, e2, atol=1e-8) + else: + np.testing.assert_allclose(e1[w1 > 0.5], e2[w2 > 0.5], atol=1e-8) + + def test_dtda_vs_supercell(self): + nmom_max = 3 + + kgw = evKGW(self.mf) + kgw.polarizability = "dtda" + kgw.max_cycle = 50 + kgw.conv_tol = 1e-8 + kgw.damping = 0.5 + kgw.compression = None + kgw.kernel(nmom_max) + + gw = evGW(self.smf) + gw.polarizability = "dtda" + gw.max_cycle = 50 + gw.conv_tol = 1e-8 + gw.damping = 0.5 + gw.compression = None + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw) + + def test_dtda_vs_supercell_diagonal_w0(self): + nmom_max = 1 + + kgw = evKGW(self.mf) + kgw.polarizability = "dtda" + kgw.max_cycle = 200 + kgw.conv_tol = 1e-8 + kgw.diagonal_se = True + kgw.w0 = True + kgw.compression = None + kgw.kernel(nmom_max) + + gw = evGW(self.smf) + gw.polarizability = "dtda" + gw.max_cycle = 200 + gw.conv_tol = 1e-8 + gw.diagonal_se = True + gw.w0 = True + gw.compression = None + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw) + + def test_dtda_vs_supercell_g0(self): + nmom_max = 1 + + kgw = evKGW(self.mf) + kgw.polarizability = "dtda" + kgw.max_cycle = 5 + kgw.damping = 0.5 + kgw.g0 = True + kgw.compression = None + kgw.kernel(nmom_max) + + gw = evGW(self.smf) + gw.polarizability = "dtda" + gw.max_cycle = 5 + gw.damping = 0.5 + gw.g0 = True + gw.compression = None + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw, full=True, check_convergence=False) + + +if __name__ == "__main__": + print("Running tests for evKGW") + unittest.main() diff --git a/tests/test_gw.py b/tests/test_gw.py index 49219e03..4bf931a9 100644 --- a/tests/test_gw.py +++ b/tests/test_gw.py @@ -45,7 +45,7 @@ def test_vs_pyscf_vhf_df(self): gw = GW(self.mf) gw.diagonal_se = True gw.vhf_df = True - conv, gf, se = gw.kernel(nmom_max=7) + conv, gf, se, _ = gw.kernel(nmom_max=7) gf.remove_uncoupled(tol=1e-8) self.assertAlmostEqual( gf.get_occupied().energy.max(), @@ -62,7 +62,7 @@ def test_vs_pyscf_no_vhf_df(self): gw = GW(self.mf) gw.diagonal_se = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=7) + conv, gf, se, _ = gw.kernel(nmom_max=7) gf.remove_uncoupled(tol=1e-8) self.assertAlmostEqual( gf.get_occupied().energy.max(), @@ -79,7 +79,7 @@ def test_nelec(self): gw = GW(self.mf) gw.diagonal_se = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, @@ -87,7 +87,7 @@ def test_nelec(self): ) gw.optimise_chempot = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, @@ -99,7 +99,7 @@ def test_moments(self): gw.diagonal_se = True gw.vhf_df = False th1, tp1 = gw.build_se_moments(5, gw.ao2mo()) - conv, gf, se = gw.kernel(nmom_max=5) + conv, gf, se, _ = gw.kernel(nmom_max=5) th2 = se.get_occupied().moment(range(5)) tp2 = se.get_virtual().moment(range(5)) diff --git a/tests/test_kgw.py b/tests/test_kgw.py new file mode 100644 index 00000000..a43433be --- /dev/null +++ b/tests/test_kgw.py @@ -0,0 +1,136 @@ +""" +Tests for `pbc/gw.py` +""" + +import unittest + +import numpy as np +import pytest +from pyscf.pbc import gto, dft, scf +from pyscf.pbc.tools import k2gamma +from pyscf.agf2 import mpi_helper + +from momentGW import GW +from momentGW import KGW + + +class Test_KGW(unittest.TestCase): + @classmethod + def setUpClass(cls): + cell = gto.Cell() + cell.atom = "He 0 0 0; He 1 1 1" + cell.basis = "6-31g" + cell.a = np.eye(3) * 3 + cell.max_memory = 1e10 + cell.verbose = 0 + cell.build() + + kmesh = [3, 1, 1] + kpts = cell.make_kpts(kmesh) + + mf = dft.KRKS(cell, kpts, xc="hf") + #mf = scf.KRHF(cell, kpts) + mf = mf.density_fit(auxbasis="weigend") + mf.with_df._prefer_ccdf = True + mf.with_df.force_dm_kbuild = True + mf.exxdiv = None + mf.conv_tol = 1e-10 + mf.kernel() + + for k in range(len(kpts)): + mf.mo_coeff[k] = mpi_helper.bcast_dict(mf.mo_coeff[k], root=0) + mf.mo_energy[k] = mpi_helper.bcast_dict(mf.mo_energy[k], root=0) + + smf = k2gamma.k2gamma(mf, kmesh=kmesh) + smf = smf.density_fit(auxbasis="weigend") + smf.exxdiv = None + smf.with_df._prefer_ccdf = True + smf.with_df.force_dm_kbuild = True + + cls.cell, cls.kpts, cls.mf, cls.smf = cell, kpts, mf, smf + + @classmethod + def tearDownClass(cls): + del cls.cell, cls.kpts, cls.mf, cls.smf + + def test_supercell_valid(self): + # Require real MOs for supercell comparison + + scell, phase = k2gamma.get_phase(self.cell, self.kpts) + nk, nao, nmo = np.shape(self.mf.mo_coeff) + nr, _ = np.shape(phase) + + k_conj_groups = k2gamma.group_by_conj_pairs(self.cell, self.kpts, return_kpts_pairs=False) + k_phase = np.eye(nk, dtype=np.complex128) + r2x2 = np.array([[1., 1j], [1., -1j]]) * .5**.5 + pairs = [[k, k_conj] for k, k_conj in k_conj_groups + if k_conj is not None and k != k_conj] + for idx in np.array(pairs): + k_phase[idx[:, None], idx] = r2x2 + + c_gamma = np.einsum('Rk,kum,kh->Ruhm', phase, self.mf.mo_coeff, k_phase) + c_gamma = c_gamma.reshape(nao*nr, nk*nmo) + c_gamma[:, abs(c_gamma.real).max(axis=0) < 1e-5] *= -1j + + self.assertAlmostEqual(np.max(np.abs(np.array(c_gamma).imag)), 0, 8) + + def _test_vs_supercell(self, gw, kgw, full=False, tol=1e-8): + e1 = np.concatenate([gf.energy for gf in kgw.gf]) + w1 = np.concatenate([np.linalg.norm(gf.coupling, axis=0)**2 for gf in kgw.gf]) + mask = np.argsort(e1) + e1 = e1[mask] + w1 = w1[mask] + e2 = gw.gf.energy + w2 = np.linalg.norm(gw.gf.coupling, axis=0)**2 + if full: + np.testing.assert_allclose(e1, e2, atol=tol) + else: + np.testing.assert_allclose(e1[w1 > 1e-1], e2[w2 > 1e-1], atol=tol) + + def test_dtda_vs_supercell(self): + nmom_max = 5 + + kgw = KGW(self.mf) + kgw.polarizability = "dtda" + kgw.kernel(nmom_max) + + gw = GW(self.smf) + gw.__dict__.update({opt: getattr(kgw, opt) for opt in kgw._opts}) + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw, full=True) + + def test_dtda_vs_supercell_fock_loop(self): + nmom_max = 5 + + kgw = KGW(self.mf) + kgw.polarizability = "dtda" + kgw.fock_loop = True + kgw.compression = None + kgw.kernel(nmom_max) + + gw = GW(self.smf) + gw.__dict__.update({opt: getattr(kgw, opt) for opt in kgw._opts}) + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw) + + def test_dtda_vs_supercell_compression(self): + nmom_max = 5 + + kgw = KGW(self.mf) + kgw.polarizability = "dtda" + kgw.compression = "ov,oo,vv" + kgw.compression_tol = 1e-7 + kgw.kernel(nmom_max) + + gw = GW(self.smf) + gw.__dict__.update({opt: getattr(kgw, opt) for opt in kgw._opts}) + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw, full=False, tol=1e-6) + + +if __name__ == "__main__": + print("Running tests for KGW") + unittest.main() diff --git a/tests/test_qsgw.py b/tests/test_qsgw.py index 6dde7c3a..a365af26 100644 --- a/tests/test_qsgw.py +++ b/tests/test_qsgw.py @@ -15,69 +15,48 @@ class Test_qsGW(unittest.TestCase): @classmethod def setUpClass(cls): - mol = gto.Mole() - mol.atom = "Li 0 0 0; H 0 0 1.64" - mol.basis = "cc-pvdz" - mol.verbose = 0 - mol.build() - - mf = dft.RKS(mol) - mf.xc = "hf" - mf.conv_tol = 1e-11 - mf.kernel() - mf.mo_coeff = mpi_helper.bcast_dict(mf.mo_coeff, root=0) - mf.mo_energy = mpi_helper.bcast_dict(mf.mo_energy, root=0) - - mf = mf.density_fit(auxbasis="cc-pv5z-ri") - mf.with_df.build() - - cls.mol, cls.mf = mol, mf + pass @classmethod def tearDownClass(cls): - del cls.mol, cls.mf - - def test_nelec(self): - gw = qsGW(self.mf) - gw.diagonal_se = True - gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) - self.assertAlmostEqual( - gf.make_rdm1().trace(), - self.mol.nelectron, - 1, - ) - gw.optimise_chempot = True - gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) - self.assertAlmostEqual( - gf.make_rdm1().trace(), - self.mol.nelectron, - 8, - ) + pass - def _test_regression(self, xc, kwargs, nmom_max, ip, ea, name=""): + def _test_regression(self, xc, kwargs, nmom_max, ip, ea, ip_full, ea_full, name=""): mol = gto.M(atom="H 0 0 0; Li 0 0 1.64", basis="6-31g", verbose=0) mf = dft.RKS(mol, xc=xc).density_fit().run() mf.mo_coeff = mpi_helper.bcast_dict(mf.mo_coeff, root=0) mf.mo_energy = mpi_helper.bcast_dict(mf.mo_energy, root=0) gw = qsGW(mf, **kwargs) gw.max_cycle = 200 + gw.conv_tol = 1e-10 + gw.conv_tol_qp = 1e-10 + gw.damping = 0.5 gw.kernel(nmom_max) - gw.gf.remove_uncoupled(tol=0.1) + gw.gf.remove_uncoupled(tol=0.5) + qp_energy = gw.qp_energy self.assertTrue(gw.converged) - self.assertAlmostEqual(gw.gf.get_occupied().energy[-1], ip, 7, msg=name) - self.assertAlmostEqual(gw.gf.get_virtual().energy[0], ea, 7, msg=name) + self.assertAlmostEqual(np.max(qp_energy[mf.mo_occ > 0]), ip, 7, msg=name) + self.assertAlmostEqual(np.min(qp_energy[mf.mo_occ == 0]), ea, 7, msg=name) + self.assertAlmostEqual(gw.gf.get_occupied().energy[-1], ip_full, 7, msg=name) + self.assertAlmostEqual(gw.gf.get_virtual().energy[0], ea_full, 7, msg=name) def test_regression_simple(self): - ip = -0.283719805037 - ea = 0.007318176449 - self._test_regression("hf", dict(), 1, ip, ea, "simple") + # Quasiparticle energies: + ip = -0.283873786007 + ea = 0.007418993395 + # GF poles: + ip_full = -0.265327792151 + ea_full = 0.005099478300 + self._test_regression("hf", dict(), 1, ip, ea, ip_full, ea_full, "simple") def test_regression_pbe_srg(self): - ip = -0.298283765946 - ea = 0.008369048047 - self._test_regression("pbe", dict(srg=1e-3), 1, ip, ea, "pbe srg") + # Quasiparticle energies: + ip = -0.278223798218 + ea = 0.006428331070 + # GF poles: + ip_full = -0.382666250130 + ea_full = 0.056791348829 + self._test_regression("pbe", dict(srg=1000), 3, ip, ea, ip_full, ea_full, "pbe srg") if __name__ == "__main__": diff --git a/tests/test_qskgw.py b/tests/test_qskgw.py new file mode 100644 index 00000000..ea9002f9 --- /dev/null +++ b/tests/test_qskgw.py @@ -0,0 +1,127 @@ +""" +Tests for `pbc/qsgw.py` +""" + +import unittest + +import numpy as np +import pytest +from pyscf.pbc import gto, dft +from pyscf.pbc.tools import k2gamma +from pyscf.agf2 import mpi_helper + +from momentGW import qsGW +from momentGW import qsKGW + + +class Test_qsKGW(unittest.TestCase): + @classmethod + def setUpClass(cls): + cell = gto.Cell() + cell.atom = "He 0 0 0; He 1 1 1" + cell.basis = "6-31g" + cell.a = np.eye(3) * 3 + cell.verbose = 0 + cell.precision = 1e-12 + cell.build() + + kmesh = [3, 1, 1] + kpts = cell.make_kpts(kmesh) + + mf = dft.KRKS(cell, kpts, xc="hf") + mf = mf.density_fit(auxbasis="weigend") + mf.conv_tol = 1e-11 + mf.kernel() + + for k in range(len(kpts)): + mf.mo_coeff[k] = mpi_helper.bcast_dict(mf.mo_coeff[k], root=0) + mf.mo_energy[k] = mpi_helper.bcast_dict(mf.mo_energy[k], root=0) + + smf = k2gamma.k2gamma(mf, kmesh=kmesh) + smf = smf.density_fit(auxbasis="weigend") + + cls.cell, cls.kpts, cls.mf, cls.smf = cell, kpts, mf, smf + + @classmethod + def tearDownClass(cls): + del cls.cell, cls.kpts, cls.mf, cls.smf + + def test_supercell_valid(self): + # Require real MOs for supercell comparison + + scell, phase = k2gamma.get_phase(self.cell, self.kpts) + nk, nao, nmo = np.shape(self.mf.mo_coeff) + nr, _ = np.shape(phase) + + k_conj_groups = k2gamma.group_by_conj_pairs(self.cell, self.kpts, return_kpts_pairs=False) + k_phase = np.eye(nk, dtype=np.complex128) + r2x2 = np.array([[1., 1j], [1., -1j]]) * .5**.5 + pairs = [[k, k_conj] for k, k_conj in k_conj_groups + if k_conj is not None and k != k_conj] + for idx in np.array(pairs): + k_phase[idx[:, None], idx] = r2x2 + + c_gamma = np.einsum('Rk,kum,kh->Ruhm', phase, self.mf.mo_coeff, k_phase) + c_gamma = c_gamma.reshape(nao*nr, nk*nmo) + c_gamma[:, abs(c_gamma.real).max(axis=0) < 1e-5] *= -1j + + self.assertAlmostEqual(np.max(np.abs(np.array(c_gamma).imag)), 0, 8) + + def _test_vs_supercell(self, gw, kgw, full=False, check_convergence=True): + if check_convergence: + self.assertTrue(gw.converged) + self.assertTrue(kgw.converged) + if full: + e1 = np.sort(np.concatenate([gf.energy for gf in kgw.gf])) + e2 = gw.gf.energy + else: + e1 = np.sort(kgw.qp_energy.ravel()) + e2 = gw.qp_energy + np.testing.assert_allclose(e1, e2, atol=1e-8) + + def test_dtda_vs_supercell(self): + nmom_max = 3 + + kgw = qsKGW(self.mf) + kgw.polarizability = "dtda" + kgw.compression = None + kgw.conv_tol_qp = 1e-10 + kgw.conv_tol = 1e-10 + kgw.eta = 1e-2 + kgw.kernel(nmom_max) + + gw = qsGW(self.smf) + gw.polarizability = "dtda" + gw.compression = None + gw.conv_tol_qp = 1e-10 + gw.conv_tol = 1e-10 + gw.eta = 1e-2 + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw) + + def test_dtda_vs_supercell_srg(self): + nmom_max = 5 + + kgw = qsKGW(self.mf) + kgw.polarizability = "dtda" + kgw.srg = 100 + kgw.compression = None + kgw.conv_tol_qp = 1e-12 + kgw.conv_tol = 1e-10 + kgw.kernel(nmom_max) + + gw = qsGW(self.smf) + gw.polarizability = "dtda" + gw.srg = 100 + gw.compression = None + gw.conv_tol_qp = 1e-12 + gw.conv_tol = 1e-10 + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw) + + +if __name__ == "__main__": + print("Running tests for qsKGW") + unittest.main() diff --git a/tests/test_scgw.py b/tests/test_scgw.py index 23dc7501..2a5d6f86 100644 --- a/tests/test_scgw.py +++ b/tests/test_scgw.py @@ -41,7 +41,7 @@ def test_nelec(self): gw = scGW(self.mf) gw.diagonal_se = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, @@ -49,7 +49,7 @@ def test_nelec(self): ) gw.optimise_chempot = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, @@ -57,7 +57,7 @@ def test_nelec(self): ) gw.fock_loop = True gw.vhf_df = False - conv, gf, se = gw.kernel(nmom_max=1) + conv, gf, se, _ = gw.kernel(nmom_max=1) self.assertAlmostEqual( gf.make_rdm1().trace(), self.mol.nelectron, diff --git a/tests/test_sckgw.py b/tests/test_sckgw.py new file mode 100644 index 00000000..138a0476 --- /dev/null +++ b/tests/test_sckgw.py @@ -0,0 +1,150 @@ +""" +Tests for `pbc/scgw.py` +""" + +import unittest + +import numpy as np +import pytest +from pyscf.pbc import gto, dft +from pyscf.pbc.tools import k2gamma +from pyscf.agf2 import mpi_helper + +from momentGW import scGW +from momentGW import scKGW + + +class Test_scKGW(unittest.TestCase): + @classmethod + def setUpClass(cls): + cell = gto.Cell() + cell.atom = "He 0 0 0; He 1 1 1" + cell.basis = "6-31g" + cell.a = np.eye(3) * 3 + cell.precision = 1e-11 + cell.verbose = 0 + cell.build() + + kmesh = [3, 1, 1] + kpts = cell.make_kpts(kmesh) + + mf = dft.KRKS(cell, kpts, xc="hf") + mf = mf.density_fit(auxbasis="weigend") + mf.conv_tol = 1e-11 + mf.kernel() + + for k in range(len(kpts)): + mf.mo_coeff[k] = mpi_helper.bcast_dict(mf.mo_coeff[k], root=0) + mf.mo_energy[k] = mpi_helper.bcast_dict(mf.mo_energy[k], root=0) + + smf = k2gamma.k2gamma(mf, kmesh=kmesh) + smf = smf.density_fit(auxbasis="weigend") + + cls.cell, cls.kpts, cls.mf, cls.smf = cell, kpts, mf, smf + + @classmethod + def tearDownClass(cls): + del cls.cell, cls.kpts, cls.mf, cls.smf + + def test_supercell_valid(self): + # Require real MOs for supercell comparison + + scell, phase = k2gamma.get_phase(self.cell, self.kpts) + nk, nao, nmo = np.shape(self.mf.mo_coeff) + nr, _ = np.shape(phase) + + k_conj_groups = k2gamma.group_by_conj_pairs(self.cell, self.kpts, return_kpts_pairs=False) + k_phase = np.eye(nk, dtype=np.complex128) + r2x2 = np.array([[1., 1j], [1., -1j]]) * .5**.5 + pairs = [[k, k_conj] for k, k_conj in k_conj_groups + if k_conj is not None and k != k_conj] + for idx in np.array(pairs): + k_phase[idx[:, None], idx] = r2x2 + + c_gamma = np.einsum('Rk,kum,kh->Ruhm', phase, self.mf.mo_coeff, k_phase) + c_gamma = c_gamma.reshape(nao*nr, nk*nmo) + c_gamma[:, abs(c_gamma.real).max(axis=0) < 1e-5] *= -1j + + self.assertAlmostEqual(np.max(np.abs(np.array(c_gamma).imag)), 0, 8) + + def _test_vs_supercell(self, gw, kgw, full=False, check_convergence=True): + if check_convergence: + self.assertTrue(gw.converged) + self.assertTrue(kgw.converged) + if full: + e1 = np.sort(np.concatenate([gf.energy for gf in kgw.gf])) + e2 = gw.gf.energy + else: + e1 = np.sort(kgw.qp_energy.ravel()) + e2 = gw.qp_energy + np.testing.assert_allclose(e1, e2, atol=1e-8) + + def test_dtda_vs_supercell(self): + nmom_max = 3 + + kgw = scKGW(self.mf) + kgw.polarizability = "dtda" + kgw.max_cycle = 50 + kgw.conv_tol = 1e-8 + kgw.damping = 0.5 + kgw.compression = None + kgw.kernel(nmom_max) + + gw = scGW(self.smf) + gw.polarizability = "dtda" + gw.max_cycle = 50 + gw.conv_tol = 1e-8 + gw.damping = 0.5 + gw.compression = None + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw) + + def test_dtda_vs_supercell_diagonal_w0(self): + nmom_max = 1 + + kgw = scKGW(self.mf) + kgw.polarizability = "dtda" + kgw.max_cycle = 200 + kgw.conv_tol = 1e-8 + kgw.diagonal_se = True + kgw.w0 = True + kgw.compression = None + kgw.kernel(nmom_max) + + gw = scGW(self.smf) + gw.polarizability = "dtda" + gw.max_cycle = 200 + gw.conv_tol = 1e-8 + gw.diagonal_se = True + gw.w0 = True + gw.compression = None + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw) + + def test_dtda_vs_supercell_g0(self): + nmom_max = 1 + + kgw = scKGW(self.mf) + kgw.polarizability = "dtda" + kgw.max_cycle = 200 + kgw.conv_tol = 1e-8 + kgw.g0 = True + kgw.compression = None + kgw.kernel(nmom_max) + + gw = scGW(self.smf) + gw.polarizability = "dtda" + gw.max_cycle = 200 + gw.conv_tol = 1e-8 + gw.g0 = True + gw.compression = None + gw.kernel(nmom_max) + + self._test_vs_supercell(gw, kgw, full=True, check_convergence=False) + + +if __name__ == "__main__": + print("Running tests for scKGW") + unittest.main()