From f55a50bb64ff15573c448fdca2c8ee6570e8174d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Thu, 4 Jan 2024 16:31:37 -0500 Subject: [PATCH 01/18] Switch LogSAS to have phi bin last --- include/BinType.h | 2 +- tests/test_ggg.py | 268 +++++++++++++++++++++--------------------- tests/test_kkk.py | 182 ++++++++++++++-------------- tests/test_nnn.py | 72 ++++++------ treecorr/corr3base.py | 51 ++++---- 5 files changed, 283 insertions(+), 292 deletions(-) diff --git a/include/BinType.h b/include/BinType.h index a687ab52..5360b307 100644 --- a/include/BinType.h +++ b/include/BinType.h @@ -1223,7 +1223,7 @@ struct BinTypeHelper Assert(kphi < nphibins); xdbg<<"d1,d2,d3,phi = "<= 0); Assert(index < ntot); diff --git a/tests/test_ggg.py b/tests/test_ggg.py index ebd83afe..5c2692c0 100644 --- a/tests/test_ggg.py +++ b/tests/test_ggg.py @@ -2256,12 +2256,12 @@ def test_direct_logsas(): log_max_sep = np.log(max_sep) bin_size = (log_max_sep - log_min_sep) / nbins phi_bin_size = np.pi/nphi_bins - true_ntri = np.zeros((nbins, nphi_bins, nbins), dtype=int) - true_weight = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_gam0 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam1 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam2 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam3 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) + true_ntri = np.zeros((nbins, nbins, nphi_bins), dtype=int) + true_weight = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_gam0 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam1 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam2 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam3 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) for i in range(ngal): for j in range(ngal): if i == j: continue @@ -2286,8 +2286,8 @@ def test_direct_logsas(): kr3 = int(np.floor( (np.log(r3)-log_min_sep) / bin_size )) kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kr2 < nbins - assert 0 <= kphi < nphi_bins assert 0 <= kr3 < nbins + assert 0 <= kphi < nphi_bins # Rotate shears to coordinates where line connecting to center is horizontal. cenx = (x[i] + x[j] + x[k])/3. @@ -2309,12 +2309,12 @@ def test_direct_logsas(): gam2 = www * g1p * np.conjugate(g2p) * g3p gam3 = www * g1p * g2p * np.conjugate(g3p) - true_ntri[kr2,kphi,kr3] += 1 - true_weight[kr2,kphi,kr3] += www - true_gam0[kr2,kphi,kr3] += gam0 - true_gam1[kr2,kphi,kr3] += gam1 - true_gam2[kr2,kphi,kr3] += gam2 - true_gam3[kr2,kphi,kr3] += gam3 + true_ntri[kr2,kr3,kphi] += 1 + true_weight[kr2,kr3,kphi] += www + true_gam0[kr2,kr3,kphi] += gam0 + true_gam1[kr2,kr3,kphi] += gam1 + true_gam2[kr2,kr3,kphi] += gam2 + true_gam3[kr2,kr3,kphi] += gam3 pos = true_weight > 0 true_gam0[pos] /= true_weight[pos] @@ -2493,12 +2493,12 @@ def test_direct_logsas_spherical(): r = np.sqrt(x**2 + y**2 + z**2) x /= r; y /= r; z /= r - true_ntri = np.zeros((nbins, nphi_bins, nbins), dtype=int) - true_weight = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_gam0 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam1 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam2 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam3 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) + true_ntri = np.zeros((nbins, nbins, nphi_bins), dtype=int) + true_weight = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_gam0 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam1 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam2 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam3 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) rad_min_sep = min_sep * coord.degrees / coord.radians rad_max_sep = max_sep * coord.degrees / coord.radians @@ -2529,8 +2529,8 @@ def test_direct_logsas_spherical(): kr3 = int(np.floor(np.log(d3/rad_min_sep) / bin_size)) kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kr2 < nbins - assert 0 <= kphi < nphi_bins assert 0 <= kr3 < nbins + assert 0 <= kphi < nphi_bins # Rotate shears to coordinates where line connecting to center is horizontal. # Original orientation is where north is up. @@ -2555,12 +2555,12 @@ def test_direct_logsas_spherical(): gam2 = www * g1p * np.conjugate(g2p) * g3p gam3 = www * g1p * g2p * np.conjugate(g3p) - true_ntri[kr2,kphi,kr3] += 1 - true_weight[kr2,kphi,kr3] += www - true_gam0[kr2,kphi,kr3] += gam0 - true_gam1[kr2,kphi,kr3] += gam1 - true_gam2[kr2,kphi,kr3] += gam2 - true_gam3[kr2,kphi,kr3] += gam3 + true_ntri[kr2,kr3,kphi] += 1 + true_weight[kr2,kr3,kphi] += www + true_gam0[kr2,kr3,kphi] += gam0 + true_gam1[kr2,kr3,kphi] += gam1 + true_gam2[kr2,kr3,kphi] += gam2 + true_gam3[kr2,kr3,kphi] += gam3 pos = true_weight > 0 true_gam0[pos] /= true_weight[pos] @@ -2651,42 +2651,42 @@ def test_direct_logsas_cross(): ggg.process(cat1, cat2, cat3, num_threads=2) # Figure out the correct answer for each permutation - true_ntri_123 = np.zeros((nbins, nphi_bins, nbins)) - true_ntri_132 = np.zeros((nbins, nphi_bins, nbins)) - true_ntri_213 = np.zeros((nbins, nphi_bins, nbins)) - true_ntri_231 = np.zeros((nbins, nphi_bins, nbins)) - true_ntri_312 = np.zeros((nbins, nphi_bins, nbins)) - true_ntri_321 = np.zeros((nbins, nphi_bins, nbins)) - true_gam0_123 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam0_132 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam0_213 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam0_231 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam0_312 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam0_321 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam1_123 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam1_132 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam1_213 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam1_231 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam1_312 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam1_321 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam2_123 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam2_132 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam2_213 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam2_231 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam2_312 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam2_321 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam3_123 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam3_132 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam3_213 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam3_231 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam3_312 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_gam3_321 = np.zeros((nbins, nphi_bins, nbins), dtype=complex ) - true_weight_123 = np.zeros((nbins, nphi_bins, nbins)) - true_weight_132 = np.zeros((nbins, nphi_bins, nbins)) - true_weight_213 = np.zeros((nbins, nphi_bins, nbins)) - true_weight_231 = np.zeros((nbins, nphi_bins, nbins)) - true_weight_312 = np.zeros((nbins, nphi_bins, nbins)) - true_weight_321 = np.zeros((nbins, nphi_bins, nbins)) + true_ntri_123 = np.zeros((nbins, nbins, nphi_bins)) + true_ntri_132 = np.zeros((nbins, nbins, nphi_bins)) + true_ntri_213 = np.zeros((nbins, nbins, nphi_bins)) + true_ntri_231 = np.zeros((nbins, nbins, nphi_bins)) + true_ntri_312 = np.zeros((nbins, nbins, nphi_bins)) + true_ntri_321 = np.zeros((nbins, nbins, nphi_bins)) + true_gam0_123 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam0_132 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam0_213 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam0_231 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam0_312 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam0_321 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam1_123 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam1_132 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam1_213 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam1_231 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam1_312 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam1_321 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam2_123 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam2_132 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam2_213 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam2_231 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam2_312 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam2_321 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam3_123 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam3_132 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam3_213 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam3_231 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam3_312 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_gam3_321 = np.zeros((nbins, nbins, nphi_bins), dtype=complex ) + true_weight_123 = np.zeros((nbins, nbins, nphi_bins)) + true_weight_132 = np.zeros((nbins, nbins, nphi_bins)) + true_weight_213 = np.zeros((nbins, nbins, nphi_bins)) + true_weight_231 = np.zeros((nbins, nbins, nphi_bins)) + true_weight_312 = np.zeros((nbins, nbins, nphi_bins)) + true_weight_321 = np.zeros((nbins, nbins, nphi_bins)) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) bin_size = (log_max_sep - log_min_sep) / nbins @@ -2736,23 +2736,23 @@ def test_direct_logsas_cross(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_123[kr2,kphi,kr3] += 1 - true_weight_123[kr2,kphi,kr3] += www - true_gam0_123[kr2,kphi,kr3] += gam0 - true_gam1_123[kr2,kphi,kr3] += gam1 - true_gam2_123[kr2,kphi,kr3] += gam2 - true_gam3_123[kr2,kphi,kr3] += gam3 + true_ntri_123[kr2,kr3,kphi] += 1 + true_weight_123[kr2,kr3,kphi] += www + true_gam0_123[kr2,kr3,kphi] += gam0 + true_gam1_123[kr2,kr3,kphi] += gam1 + true_gam2_123[kr2,kr3,kphi] += gam2 + true_gam3_123[kr2,kr3,kphi] += gam3 phi = 2*np.pi - phi if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_132[kr3,kphi,kr2] += 1 - true_weight_132[kr3,kphi,kr2] += www - true_gam0_132[kr3,kphi,kr2] += gam0 - true_gam1_132[kr3,kphi,kr2] += gam1 - true_gam2_132[kr3,kphi,kr2] += gam3 - true_gam3_132[kr3,kphi,kr2] += gam2 + true_ntri_132[kr3,kr2,kphi] += 1 + true_weight_132[kr3,kr2,kphi] += www + true_gam0_132[kr3,kr2,kphi] += gam0 + true_gam1_132[kr3,kr2,kphi] += gam1 + true_gam2_132[kr3,kr2,kphi] += gam3 + true_gam3_132[kr3,kr2,kphi] += gam2 if d1 >= min_sep and d1 < max_sep and d3 >= min_sep and d3 < max_sep: assert 0 <= kr1 < nbins @@ -2764,24 +2764,24 @@ def test_direct_logsas_cross(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_231[kr3,kphi,kr1] += 1 - true_weight_231[kr3,kphi,kr1] += www - true_gam0_231[kr3,kphi,kr1] += gam0 - true_gam1_231[kr3,kphi,kr1] += gam2 - true_gam2_231[kr3,kphi,kr1] += gam3 - true_gam3_231[kr3,kphi,kr1] += gam1 + true_ntri_231[kr3,kr1,kphi] += 1 + true_weight_231[kr3,kr1,kphi] += www + true_gam0_231[kr3,kr1,kphi] += gam0 + true_gam1_231[kr3,kr1,kphi] += gam2 + true_gam2_231[kr3,kr1,kphi] += gam3 + true_gam3_231[kr3,kr1,kphi] += gam1 # 213 phi = 2*np.pi - phi if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_213[kr1,kphi,kr3] += 1 - true_weight_213[kr1,kphi,kr3] += www - true_gam0_213[kr1,kphi,kr3] += gam0 - true_gam1_213[kr1,kphi,kr3] += gam2 - true_gam2_213[kr1,kphi,kr3] += gam1 - true_gam3_213[kr1,kphi,kr3] += gam3 + true_ntri_213[kr1,kr3,kphi] += 1 + true_weight_213[kr1,kr3,kphi] += www + true_gam0_213[kr1,kr3,kphi] += gam0 + true_gam1_213[kr1,kr3,kphi] += gam2 + true_gam2_213[kr1,kr3,kphi] += gam1 + true_gam3_213[kr1,kr3,kphi] += gam3 if d1 >= min_sep and d1 < max_sep and d2 >= min_sep and d2 < max_sep: assert 0 <= kr1 < nbins @@ -2793,24 +2793,24 @@ def test_direct_logsas_cross(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_312[kr1,kphi,kr2] += 1 - true_weight_312[kr1,kphi,kr2] += www - true_gam0_312[kr1,kphi,kr2] += gam0 - true_gam1_312[kr1,kphi,kr2] += gam3 - true_gam2_312[kr1,kphi,kr2] += gam1 - true_gam3_312[kr1,kphi,kr2] += gam2 + true_ntri_312[kr1,kr2,kphi] += 1 + true_weight_312[kr1,kr2,kphi] += www + true_gam0_312[kr1,kr2,kphi] += gam0 + true_gam1_312[kr1,kr2,kphi] += gam3 + true_gam2_312[kr1,kr2,kphi] += gam1 + true_gam3_312[kr1,kr2,kphi] += gam2 # 321 phi = 2*np.pi - phi if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_321[kr2,kphi,kr1] += 1 - true_weight_321[kr2,kphi,kr1] += www - true_gam0_321[kr2,kphi,kr1] += gam0 - true_gam1_321[kr2,kphi,kr1] += gam3 - true_gam2_321[kr2,kphi,kr1] += gam2 - true_gam3_321[kr2,kphi,kr1] += gam1 + true_ntri_321[kr2,kr1,kphi] += 1 + true_weight_321[kr2,kr1,kphi] += www + true_gam0_321[kr2,kr1,kphi] += gam0 + true_gam1_321[kr2,kr1,kphi] += gam3 + true_gam2_321[kr2,kr1,kphi] += gam2 + true_gam3_321[kr2,kr1,kphi] += gam1 n_list = [true_ntri_123, true_ntri_132, true_ntri_213, true_ntri_231, true_ntri_312, true_ntri_321] @@ -2963,24 +2963,24 @@ def test_direct_logsas_cross12(): ggg.process(cat1, cat2, num_threads=2) # Figure out the correct answer for each permutation - true_ntri_122 = np.zeros((nbins, nphi_bins, nbins)) - true_ntri_212 = np.zeros((nbins, nphi_bins, nbins)) - true_ntri_221 = np.zeros((nbins, nphi_bins, nbins)) - true_gam0_122 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam0_212 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam0_221 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam1_122 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam1_212 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam1_221 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam2_122 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam2_212 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam2_221 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam3_122 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam3_212 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_gam3_221 = np.zeros((nbins, nphi_bins, nbins), dtype=complex) - true_weight_122 = np.zeros((nbins, nphi_bins, nbins)) - true_weight_212 = np.zeros((nbins, nphi_bins, nbins)) - true_weight_221 = np.zeros((nbins, nphi_bins, nbins)) + true_ntri_122 = np.zeros((nbins, nbins, nphi_bins)) + true_ntri_212 = np.zeros((nbins, nbins, nphi_bins)) + true_ntri_221 = np.zeros((nbins, nbins, nphi_bins)) + true_gam0_122 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam0_212 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam0_221 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam1_122 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam1_212 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam1_221 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam2_122 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam2_212 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam2_221 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam3_122 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam3_212 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_gam3_221 = np.zeros((nbins, nbins, nphi_bins), dtype=complex) + true_weight_122 = np.zeros((nbins, nbins, nphi_bins)) + true_weight_212 = np.zeros((nbins, nbins, nphi_bins)) + true_weight_221 = np.zeros((nbins, nbins, nphi_bins)) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) bin_size = (log_max_sep - log_min_sep) / nbins @@ -3031,12 +3031,12 @@ def test_direct_logsas_cross12(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_122[kr2,kphi,kr3] += 1 - true_weight_122[kr2,kphi,kr3] += www - true_gam0_122[kr2,kphi,kr3] += gam0 - true_gam1_122[kr2,kphi,kr3] += gam1 - true_gam2_122[kr2,kphi,kr3] += gam2 - true_gam3_122[kr2,kphi,kr3] += gam3 + true_ntri_122[kr2,kr3,kphi] += 1 + true_weight_122[kr2,kr3,kphi] += www + true_gam0_122[kr2,kr3,kphi] += gam0 + true_gam1_122[kr2,kr3,kphi] += gam1 + true_gam2_122[kr2,kr3,kphi] += gam2 + true_gam3_122[kr2,kr3,kphi] += gam3 # 231 if d1 >= min_sep and d1 < max_sep and d3 >= min_sep and d3 < max_sep: @@ -3048,12 +3048,12 @@ def test_direct_logsas_cross12(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_221[kr3,kphi,kr1] += 1 - true_weight_221[kr3,kphi,kr1] += www - true_gam0_221[kr3,kphi,kr1] += gam0 - true_gam1_221[kr3,kphi,kr1] += gam2 - true_gam2_221[kr3,kphi,kr1] += gam3 - true_gam3_221[kr3,kphi,kr1] += gam1 + true_ntri_221[kr3,kr1,kphi] += 1 + true_weight_221[kr3,kr1,kphi] += www + true_gam0_221[kr3,kr1,kphi] += gam0 + true_gam1_221[kr3,kr1,kphi] += gam2 + true_gam2_221[kr3,kr1,kphi] += gam3 + true_gam3_221[kr3,kr1,kphi] += gam1 # 312 if d1 >= min_sep and d1 < max_sep and d2 >= min_sep and d2 < max_sep: @@ -3065,12 +3065,12 @@ def test_direct_logsas_cross12(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_212[kr1,kphi,kr2] += 1 - true_weight_212[kr1,kphi,kr2] += www - true_gam0_212[kr1,kphi,kr2] += gam0 - true_gam1_212[kr1,kphi,kr2] += gam3 - true_gam2_212[kr1,kphi,kr2] += gam1 - true_gam3_212[kr1,kphi,kr2] += gam2 + true_ntri_212[kr1,kr2,kphi] += 1 + true_weight_212[kr1,kr2,kphi] += www + true_gam0_212[kr1,kr2,kphi] += gam0 + true_gam1_212[kr1,kr2,kphi] += gam3 + true_gam2_212[kr1,kr2,kphi] += gam1 + true_gam3_212[kr1,kr2,kphi] += gam2 n_list = [true_ntri_122, true_ntri_212, true_ntri_221] w_list = [true_weight_122, true_weight_212, true_weight_221] diff --git a/tests/test_kkk.py b/tests/test_kkk.py index 3dccd3ea..e22318cc 100644 --- a/tests/test_kkk.py +++ b/tests/test_kkk.py @@ -1209,9 +1209,9 @@ def test_direct_logsas(): log_max_sep = np.log(max_sep) bin_size = (log_max_sep - log_min_sep) / nbins phi_bin_size = np.pi/nphi_bins - true_ntri = np.zeros((nbins, nphi_bins, nbins), dtype=int) - true_weight = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_zeta = np.zeros((nbins, nphi_bins, nbins), dtype=float) + true_ntri = np.zeros((nbins, nbins, nphi_bins), dtype=int) + true_weight = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_zeta = np.zeros((nbins, nbins, nphi_bins), dtype=float) for i in range(ngal): for j in range(ngal): if i == j: continue @@ -1236,15 +1236,15 @@ def test_direct_logsas(): kr3 = int(np.floor( (np.log(r3)-log_min_sep) / bin_size )) kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kr2 < nbins - assert 0 <= kphi < nphi_bins assert 0 <= kr3 < nbins + assert 0 <= kphi < nphi_bins www = w[i] * w[j] * w[k] zeta = www * kap[i] * kap[j] * kap[k] - true_ntri[kr2,kphi,kr3] += 1 - true_weight[kr2,kphi,kr3] += www - true_zeta[kr2,kphi,kr3] += zeta + true_ntri[kr2,kr3,kphi] += 1 + true_weight[kr2,kr3,kphi] += www + true_zeta[kr2,kr3,kphi] += zeta pos = true_weight > 0 true_zeta[pos] /= true_weight[pos] @@ -1382,20 +1382,20 @@ def test_direct_logsas_spherical(): r = np.sqrt(x**2 + y**2 + z**2) x /= r; y /= r; z /= r - true_ntri = np.zeros((nbins, nphi_bins, nbins), dtype=int) - true_weight = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_zeta = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meand1 = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meand2 = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meand3 = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meanphi = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_ntri_arc = np.zeros((nbins, nphi_bins, nbins), dtype=int) - true_weight_arc = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_zeta_arc = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meand1_arc = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meand2_arc = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meand3_arc = np.zeros((nbins, nphi_bins, nbins), dtype=float) - true_meanphi_arc = np.zeros((nbins, nphi_bins, nbins), dtype=float) + true_ntri = np.zeros((nbins, nbins, nphi_bins), dtype=int) + true_weight = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_zeta = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meand1 = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meand2 = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meand3 = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meanphi = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_ntri_arc = np.zeros((nbins, nbins, nphi_bins), dtype=int) + true_weight_arc = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_zeta_arc = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meand1_arc = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meand2_arc = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meand3_arc = np.zeros((nbins, nbins, nphi_bins), dtype=float) + true_meanphi_arc = np.zeros((nbins, nbins, nphi_bins), dtype=float) rad_min_sep = min_sep * coord.degrees / coord.radians rad_max_sep = max_sep * coord.degrees / coord.radians @@ -1429,16 +1429,16 @@ def test_direct_logsas_spherical(): kr3 = int(np.floor(np.log(d3/rad_min_sep) / bin_size)) kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kr2 < nbins - assert 0 <= kphi < nphi_bins assert 0 <= kr3 < nbins + assert 0 <= kphi < nphi_bins - true_ntri[kr2,kphi,kr3] += 1 - true_weight[kr2,kphi,kr3] += www - true_zeta[kr2,kphi,kr3] += zeta - true_meand1[kr2,kphi,kr3] += www * d1 - true_meand2[kr2,kphi,kr3] += www * d2 - true_meand3[kr2,kphi,kr3] += www * d3 - true_meanphi[kr2,kphi,kr3] += www * phi + true_ntri[kr2,kr3,kphi] += 1 + true_weight[kr2,kr3,kphi] += www + true_zeta[kr2,kr3,kphi] += zeta + true_meand1[kr2,kr3,kphi] += www * d1 + true_meand2[kr2,kr3,kphi] += www * d2 + true_meand3[kr2,kr3,kphi] += www * d3 + true_meanphi[kr2,kr3,kphi] += www * phi # For Arc metric, use spherical geometry for phi definition. # Law of cosines in spherical geom: @@ -1460,16 +1460,16 @@ def test_direct_logsas_spherical(): kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kr2 < nbins - assert 0 <= kphi < nphi_bins assert 0 <= kr3 < nbins + assert 0 <= kphi < nphi_bins - true_ntri_arc[kr2,kphi,kr3] += 1 - true_weight_arc[kr2,kphi,kr3] += www - true_zeta_arc[kr2,kphi,kr3] += zeta - true_meand1_arc[kr2,kphi,kr3] += www * c - true_meand2_arc[kr2,kphi,kr3] += www * a - true_meand3_arc[kr2,kphi,kr3] += www * b - true_meanphi_arc[kr2,kphi,kr3] += www * phi + true_ntri_arc[kr2,kr3,kphi] += 1 + true_weight_arc[kr2,kr3,kphi] += www + true_zeta_arc[kr2,kr3,kphi] += zeta + true_meand1_arc[kr2,kr3,kphi] += www * c + true_meand2_arc[kr2,kr3,kphi] += www * a + true_meand3_arc[kr2,kr3,kphi] += www * b + true_meanphi_arc[kr2,kr3,kphi] += www * phi pos = true_weight > 0 true_zeta[pos] /= true_weight[pos] @@ -1597,24 +1597,24 @@ def test_direct_logsas_cross(): kkk.process(cat1, cat2, cat3, num_threads=2) # Figure out the correct answer for each permutation - true_ntri_123 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_132 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_213 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_231 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_312 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_321 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_123 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_132 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_213 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_231 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_312 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_321 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_123 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_132 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_213 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_231 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_312 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_321 = np.zeros( (nbins, nphi_bins, nbins) ) + true_ntri_123 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_132 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_213 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_231 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_312 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_321 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_123 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_132 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_213 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_231 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_312 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_321 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_123 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_132 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_213 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_231 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_312 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_321 = np.zeros( (nbins, nbins, nphi_bins) ) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) bin_size = (log_max_sep - log_min_sep) / nbins @@ -1647,17 +1647,17 @@ def test_direct_logsas_cross(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_123[kr2,kphi,kr3] += 1 - true_weight_123[kr2,kphi,kr3] += www - true_zeta_123[kr2,kphi,kr3] += zeta + true_ntri_123[kr2,kr3,kphi] += 1 + true_weight_123[kr2,kr3,kphi] += www + true_zeta_123[kr2,kr3,kphi] += zeta phi = 2*np.pi - phi if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_132[kr3,kphi,kr2] += 1 - true_weight_132[kr3,kphi,kr2] += www - true_zeta_132[kr3,kphi,kr2] += zeta + true_ntri_132[kr3,kr2,kphi] += 1 + true_weight_132[kr3,kr2,kphi] += www + true_zeta_132[kr3,kr2,kphi] += zeta if d1 >= min_sep and d1 < max_sep and d3 >= min_sep and d3 < max_sep: assert 0 <= kr1 < nbins @@ -1669,18 +1669,18 @@ def test_direct_logsas_cross(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_231[kr3,kphi,kr1] += 1 - true_weight_231[kr3,kphi,kr1] += www - true_zeta_231[kr3,kphi,kr1] += zeta + true_ntri_231[kr3,kr1,kphi] += 1 + true_weight_231[kr3,kr1,kphi] += www + true_zeta_231[kr3,kr1,kphi] += zeta # 213 phi = 2*np.pi - phi if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_213[kr1,kphi,kr3] += 1 - true_weight_213[kr1,kphi,kr3] += www - true_zeta_213[kr1,kphi,kr3] += zeta + true_ntri_213[kr1,kr3,kphi] += 1 + true_weight_213[kr1,kr3,kphi] += www + true_zeta_213[kr1,kr3,kphi] += zeta if d1 >= min_sep and d1 < max_sep and d2 >= min_sep and d2 < max_sep: assert 0 <= kr1 < nbins @@ -1692,18 +1692,18 @@ def test_direct_logsas_cross(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_312[kr1,kphi,kr2] += 1 - true_weight_312[kr1,kphi,kr2] += www - true_zeta_312[kr1,kphi,kr2] += zeta + true_ntri_312[kr1,kr2,kphi] += 1 + true_weight_312[kr1,kr2,kphi] += www + true_zeta_312[kr1,kr2,kphi] += zeta # 321 phi = 2*np.pi - phi if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_321[kr2,kphi,kr1] += 1 - true_weight_321[kr2,kphi,kr1] += www - true_zeta_321[kr2,kphi,kr1] += zeta + true_ntri_321[kr2,kr1,kphi] += 1 + true_weight_321[kr2,kr1,kphi] += www + true_zeta_321[kr2,kr1,kphi] += zeta n_list = [true_ntri_123, true_ntri_132, true_ntri_213, true_ntri_231, true_ntri_312, true_ntri_321] @@ -1812,15 +1812,15 @@ def test_direct_logsas_cross12(): kkk.process(cat1, cat2, num_threads=2) # Figure out the correct answer for each permutation - true_ntri_122 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_212 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_221 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_122 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_212 = np.zeros( (nbins, nphi_bins, nbins) ) - true_zeta_221 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_122 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_212 = np.zeros( (nbins, nphi_bins, nbins) ) - true_weight_221 = np.zeros( (nbins, nphi_bins, nbins) ) + true_ntri_122 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_212 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_221 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_122 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_212 = np.zeros( (nbins, nbins, nphi_bins) ) + true_zeta_221 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_122 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_212 = np.zeros( (nbins, nbins, nphi_bins) ) + true_weight_221 = np.zeros( (nbins, nbins, nphi_bins) ) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) bin_size = (log_max_sep - log_min_sep) / nbins @@ -1854,9 +1854,9 @@ def test_direct_logsas_cross12(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_122[kr2,kphi,kr3] += 1 - true_weight_122[kr2,kphi,kr3] += www - true_zeta_122[kr2,kphi,kr3] += zeta + true_ntri_122[kr2,kr3,kphi] += 1 + true_weight_122[kr2,kr3,kphi] += www + true_zeta_122[kr2,kr3,kphi] += zeta # 231 if d1 >= min_sep and d1 < max_sep and d3 >= min_sep and d3 < max_sep: @@ -1868,9 +1868,9 @@ def test_direct_logsas_cross12(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_221[kr3,kphi,kr1] += 1 - true_weight_221[kr3,kphi,kr1] += www - true_zeta_221[kr3,kphi,kr1] += zeta + true_ntri_221[kr3,kr1,kphi] += 1 + true_weight_221[kr3,kr1,kphi] += www + true_zeta_221[kr3,kr1,kphi] += zeta # 312 if d1 >= min_sep and d1 < max_sep and d2 >= min_sep and d2 < max_sep: @@ -1882,9 +1882,9 @@ def test_direct_logsas_cross12(): if phi >= 0 and phi < np.pi: kphi = int(np.floor( phi / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_212[kr1,kphi,kr2] += 1 - true_weight_212[kr1,kphi,kr2] += www - true_zeta_212[kr1,kphi,kr2] += zeta + true_ntri_212[kr1,kr2,kphi] += 1 + true_weight_212[kr1,kr2,kphi] += www + true_zeta_212[kr1,kr2,kphi] += zeta n_list = [true_ntri_122, true_ntri_212, true_ntri_221] w_list = [true_weight_122, true_weight_212, true_weight_221] diff --git a/tests/test_nnn.py b/tests/test_nnn.py index c2e869ab..9412daa5 100644 --- a/tests/test_nnn.py +++ b/tests/test_nnn.py @@ -451,20 +451,20 @@ def check_arrays(nnn): np.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) ) np.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size) - np.testing.assert_equal(nnn.logd2.shape, (nnn.nbins, nnn.nphi_bins, nnn.nbins)) + np.testing.assert_equal(nnn.logd2.shape, (nnn.nbins, nnn.nbins, nnn.nphi_bins)) np.testing.assert_almost_equal(nnn.logd2[:,0,0], nnn.logr1d) np.testing.assert_almost_equal(nnn.logd2[:,-1,-1], nnn.logr1d) - np.testing.assert_equal(nnn.logd3.shape, (nnn.nbins, nnn.nphi_bins, nnn.nbins)) - np.testing.assert_almost_equal(nnn.logd3[0,0,:], nnn.logr1d) - np.testing.assert_almost_equal(nnn.logd3[-1,-1,:], nnn.logr1d) + np.testing.assert_equal(nnn.logd3.shape, (nnn.nbins, nnn.nbins, nnn.nphi_bins)) + np.testing.assert_almost_equal(nnn.logd3[0,:,0], nnn.logr1d) + np.testing.assert_almost_equal(nnn.logd3[-1,:,-1], nnn.logr1d) assert len(nnn.logd2) == nnn.nbins assert len(nnn.logd3) == nnn.nbins np.testing.assert_equal(nnn.phi1d.shape, (nnn.nphi_bins,) ) np.testing.assert_almost_equal(nnn.phi1d[0], nnn.min_phi + 0.5*nnn.phi_bin_size) np.testing.assert_almost_equal(nnn.phi1d[-1], nnn.max_phi - 0.5*nnn.phi_bin_size) - np.testing.assert_equal(nnn.phi.shape, (nnn.nbins, nnn.nphi_bins, nnn.nbins)) - np.testing.assert_almost_equal(nnn.phi[0,:,0], nnn.phi1d) - np.testing.assert_almost_equal(nnn.phi[-1,:,-1], nnn.phi1d) + np.testing.assert_equal(nnn.phi.shape, (nnn.nbins, nnn.nbins, nnn.nphi_bins)) + np.testing.assert_almost_equal(nnn.phi[0,0,:], nnn.phi1d) + np.testing.assert_almost_equal(nnn.phi[-1,-1,:], nnn.phi1d) def check_default_phi(nnn): assert nnn.min_phi == 0. @@ -678,8 +678,8 @@ def check_default_phi(nnn): np.testing.assert_almost_equal(nnn.logd2[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logd2[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd2) == nnn.nbins - np.testing.assert_almost_equal(nnn.logd3[:,:,0], math.log(5) + 0.5*nnn.bin_size) - np.testing.assert_almost_equal(nnn.logd3[:,:,-1], math.log(20) - 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,0,:], math.log(5) + 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,-1,:], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd3) == nnn.nbins check_default_phi(nnn) @@ -695,8 +695,8 @@ def check_default_phi(nnn): np.testing.assert_almost_equal(nnn.logd2[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logd2[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd2) == nnn.nbins - np.testing.assert_almost_equal(nnn.logd3[:,:,0], math.log(5) + 0.5*nnn.bin_size) - np.testing.assert_almost_equal(nnn.logd3[:,:,-1], math.log(20) - 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,0,:], math.log(5) + 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,-1,:], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd3) == nnn.nbins check_default_phi(nnn) @@ -712,8 +712,8 @@ def check_default_phi(nnn): np.testing.assert_almost_equal(nnn.logd2[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logd2[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd2) == nnn.nbins - np.testing.assert_almost_equal(nnn.logd3[:,:,0], math.log(5) + 0.5*nnn.bin_size) - np.testing.assert_almost_equal(nnn.logd3[:,:,-1], math.log(20) - 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,0,:], math.log(5) + 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,-1,:], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd3) == nnn.nbins check_default_phi(nnn) @@ -729,8 +729,8 @@ def check_default_phi(nnn): np.testing.assert_almost_equal(nnn.logd2[0], math.log(5) + 0.5*nnn.bin_size) np.testing.assert_almost_equal(nnn.logd2[-1], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd2) == nnn.nbins - np.testing.assert_almost_equal(nnn.logd3[:,:,0], math.log(5) + 0.5*nnn.bin_size) - np.testing.assert_almost_equal(nnn.logd3[:,:,-1], math.log(20) - 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,0,:], math.log(5) + 0.5*nnn.bin_size) + np.testing.assert_almost_equal(nnn.logd3[:,-1,:], math.log(20) - 0.5*nnn.bin_size) assert len(nnn.logd3) == nnn.nbins check_default_phi(nnn) @@ -2843,7 +2843,7 @@ def test_direct_logsas_auto(): log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) - true_ntri = np.zeros( (nbins, nphi_bins, nbins) ) + true_ntri = np.zeros( (nbins, nbins, nphi_bins) ) bin_size = (log_max_sep - log_min_sep) / nbins phi_bin_size = (max_phi-min_phi) / nphi_bins for i in range(ngal): @@ -2870,9 +2870,9 @@ def test_direct_logsas_auto(): kr3 = int(np.floor( (np.log(d3)-log_min_sep) / bin_size )) kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kr2 < nbins - assert 0 <= kphi < nphi_bins assert 0 <= kr3 < nbins - true_ntri[kr2,kphi,kr3] += 1 + assert 0 <= kphi < nphi_bins + true_ntri[kr2,kr3,kphi] += 1 np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -3033,12 +3033,12 @@ def test_direct_logsas_cross(): log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) - true_ntri_123 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_132 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_213 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_231 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_312 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_321 = np.zeros( (nbins, nphi_bins, nbins) ) + true_ntri_123 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_132 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_213 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_231 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_312 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_321 = np.zeros( (nbins, nbins, nphi_bins) ) bin_size = (log_max_sep - log_min_sep) / nbins phi_bin_size = (max_phi-min_phi) / nphi_bins t0 = time.time() @@ -3066,13 +3066,13 @@ def test_direct_logsas_cross(): if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_123[kr2,kphi,kr3] += 1 + true_ntri_123[kr2,kr3,kphi] += 1 phi = 2*np.pi - phi if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_132[kr3,kphi,kr2] += 1 + true_ntri_132[kr3,kr2,kphi] += 1 if d1 >= min_sep and d1 < max_sep and d3 >= min_sep and d3 < max_sep: assert 0 <= kr1 < nbins @@ -3084,14 +3084,14 @@ def test_direct_logsas_cross(): if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_231[kr3,kphi,kr1] += 1 + true_ntri_231[kr3,kr1,kphi] += 1 # 213 phi = 2*np.pi - phi if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_213[kr1,kphi,kr3] += 1 + true_ntri_213[kr1,kr3,kphi] += 1 if d1 >= min_sep and d1 < max_sep and d2 >= min_sep and d2 < max_sep: assert 0 <= kr1 < nbins @@ -3103,14 +3103,14 @@ def test_direct_logsas_cross(): if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_312[kr1,kphi,kr2] += 1 + true_ntri_312[kr1,kr2,kphi] += 1 # 321 phi = 2*np.pi - phi if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_321[kr2,kphi,kr1] += 1 + true_ntri_321[kr2,kr1,kphi] += 1 t1 = time.time() print('Python brute: ',t1-t0) @@ -3261,9 +3261,9 @@ def test_direct_logsas_cross12(): log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) - true_ntri_122 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_212 = np.zeros( (nbins, nphi_bins, nbins) ) - true_ntri_221 = np.zeros( (nbins, nphi_bins, nbins) ) + true_ntri_122 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_212 = np.zeros( (nbins, nbins, nphi_bins) ) + true_ntri_221 = np.zeros( (nbins, nbins, nphi_bins) ) bin_size = (log_max_sep - log_min_sep) / nbins phi_bin_size = (max_phi-min_phi) / nphi_bins t0 = time.time() @@ -3292,7 +3292,7 @@ def test_direct_logsas_cross12(): if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_122[kr2,kphi,kr3] += 1 + true_ntri_122[kr2,kr3,kphi] += 1 # 231 if d1 >= min_sep and d1 < max_sep and d3 >= min_sep and d3 < max_sep: @@ -3304,7 +3304,7 @@ def test_direct_logsas_cross12(): if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_221[kr3,kphi,kr1] += 1 + true_ntri_221[kr3,kr1,kphi] += 1 # 312 if d1 >= min_sep and d1 < max_sep and d2 >= min_sep and d2 < max_sep: @@ -3316,7 +3316,7 @@ def test_direct_logsas_cross12(): if phi >= min_phi and phi < max_phi: kphi = int(np.floor( (phi-min_phi) / phi_bin_size )) assert 0 <= kphi < nphi_bins - true_ntri_212[kr1,kphi,kr2] += 1 + true_ntri_212[kr1,kr2,kphi] += 1 t1 = time.time() print('Python brute: ',t1-t0) diff --git a/treecorr/corr3base.py b/treecorr/corr3base.py index 8706b4b0..5ecb3254 100644 --- a/treecorr/corr3base.py +++ b/treecorr/corr3base.py @@ -539,8 +539,8 @@ def __init__(self, config=None, *, logger=None, rng=None, **kwargs): self._ro.bv = 0.1 else: # LogSAS - if self.phi_bin_size <= 0.1: - self._ro.bu = self.phi_bin_size + if self._ro.ubin_size <= 0.1: + self._ro.bu = self._ro.ubin_size else: self._ro.bu = 0.1 self._ro.bv = 0 @@ -551,7 +551,7 @@ def __init__(self, config=None, *, logger=None, rng=None, **kwargs): self._ro.bv = self.vbin_size * self.bin_slop else: # LogSAS - self._ro.bu = self.phi_bin_size * self.bin_slop + self._ro.bu = self._ro.ubin_size * self.bin_slop self._ro.bv = 0 if self.b > 0.100001: # Add some numerical slop @@ -601,11 +601,11 @@ def __init__(self, config=None, *, logger=None, rng=None, **kwargs): num=self.nphi_bins, endpoint=False) self._ro.phi1d += self.min_phi + 0.5*self.phi_bin_size self._ro.logd2 = np.tile(self.logr1d[:, np.newaxis, np.newaxis], - (1, self.nphi_bins, self.nbins)) - self._ro.phi = np.tile(self.phi1d[np.newaxis, :, np.newaxis], - (self.nbins, 1, self.nbins)) - self._ro.logd3 = np.tile(self.logr1d[np.newaxis, np.newaxis, :], - (self.nbins, self.nphi_bins, 1)) + (1, self.nbins, self.nphi_bins)) + self._ro.logd3 = np.tile(self.logr1d[np.newaxis, :, np.newaxis], + (self.nbins, 1, self.nphi_bins)) + self._ro.phi = np.tile(self.phi1d[np.newaxis, np.newaxis, :], + (self.nbins, self.nbins, 1)) self._ro.d2nom = np.exp(self.logd2) self._ro.d3nom = np.exp(self.logd3) self._ro._nbins = len(self._ro.logd2.ravel()) @@ -777,6 +777,10 @@ def phi(self): def phi_units(self): return self._ro.phi_units @property def _phi_units(self): return self._ro._phi_units + @property + def meanphi(self): + assert self.bin_type == 'LogSAS' + return self.meanu def _equal_binning(self, other, brief=False): # A helper function to test if two Corr3 objects have the same binning parameters @@ -804,7 +808,7 @@ def _equal_binning(self, other, brief=False): return eq else: return (self.sep_units == other.sep_units and - (self.bin_type == 'LogRUV' or self.phi_units == other.phi_units) and + (self.bin_type != 'LogSAS' or self.phi_units == other.phi_units) and self.coords == other.coords and self.bin_slop == other.bin_slop and self.xperiod == other.xperiod and @@ -813,25 +817,18 @@ def _equal_binning(self, other, brief=False): def _equal_bin_data(self, other): # A helper function to test if two Corr3 objects have the same measured bin values + equal_d = (np.array_equal(self.meand1, other.meand1) and + np.array_equal(self.meanlogd1, other.meanlogd1) and + np.array_equal(self.meand2, other.meand2) and + np.array_equal(self.meanlogd2, other.meanlogd2) and + np.array_equal(self.meand3, other.meand3) and + np.array_equal(self.meanlogd3, other.meanlogd3)) if self.bin_type == 'LogRUV': - return (other.bin_type == 'LogRUV' and - np.array_equal(self.meand1, other.meand1) and - np.array_equal(self.meanlogd1, other.meanlogd1) and - np.array_equal(self.meand2, other.meand2) and - np.array_equal(self.meanlogd2, other.meanlogd2) and - np.array_equal(self.meand3, other.meand3) and - np.array_equal(self.meanlogd3, other.meanlogd3) and + return (other.bin_type == 'LogRUV' and equal_d and np.array_equal(self.meanu, other.meanu) and np.array_equal(self.meanv, other.meanv)) else: - # LogSAS - return (other.bin_type == 'LogSAS' and - np.array_equal(self.meand1, other.meand1) and - np.array_equal(self.meanlogd1, other.meanlogd1) and - np.array_equal(self.meand2, other.meand2) and - np.array_equal(self.meanlogd2, other.meanlogd2) and - np.array_equal(self.meand3, other.meand3) and - np.array_equal(self.meanlogd3, other.meanlogd3) and + return (other.bin_type == 'LogSAS' and equal_d and np.array_equal(self.meanphi, other.meanphi)) @property @@ -867,12 +864,6 @@ def var_method(self): return self._ro.var_method @property def num_bootstrap(self): return self._ro.num_bootstrap - # Alias names for some of the LogSAS arrays, which use the LogRUV names internally. - @property - def meanphi(self): - assert self.bin_type == 'LogSAS' - return self.meanu - def __getstate__(self): d = self.__dict__.copy() d.pop('_corr',None) From 9db3b27f5a8bf6e3bbc26ceb297f5a30691a6a22 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Tue, 9 Jan 2024 11:16:25 -0500 Subject: [PATCH 02/18] Don't use getData where not needed --- include/BinType.h | 10 ++++------ include/Metric.h | 6 +++--- include/ProjectHelper.h | 38 +++++++++++++++++++------------------- include/Split.h | 6 +++--- src/Corr3.cpp | 35 +++++++++++++++-------------------- 5 files changed, 44 insertions(+), 51 deletions(-) diff --git a/include/BinType.h b/include/BinType.h index 5360b307..93571933 100644 --- a/include/BinType.h +++ b/include/BinType.h @@ -747,8 +747,7 @@ struct BinTypeHelper Assert(kv < nvbins); // Now account for negative v - if (!metric.CCW(c1.getData().getPos(), c2.getData().getPos(), - c3.getData().getPos())) { + if (!metric.CCW(c1.getPos(), c2.getPos(), c3.getPos())) { v = -v; kv = nvbins - kv - 1; } else { @@ -900,7 +899,7 @@ struct BinTypeHelper // If we are not swapping 2,3, stop if orientation cannot be counter-clockwise. if (O > 1 && - !metric.CCW(c1.getData().getPos(), c3.getData().getPos(), c2.getData().getPos())) { + !metric.CCW(c1.getPos(), c3.getPos(), c2.getPos())) { // For skinny triangles, be careful that the points can't flip to the other side. // This is similar to the calculation below. We effecively check that cosphi can't // increase to 1. @@ -1181,12 +1180,12 @@ struct BinTypeHelper } if (O > 1 && - !metric.CCW(c1.getData().getPos(), c3.getData().getPos(), c2.getData().getPos())) { + !metric.CCW(c1.getPos(), c3.getPos(), c2.getPos())) { xdbg<<"Triangle is not CCW.\n"; return false; } - XAssert(metric.CCW(c1.getData().getPos(), c3.getData().getPos(), c2.getData().getPos())); + XAssert(metric.CCW(c1.getPos(), c3.getPos(), c2.getPos())); if (phi < minphi || phi >= maxphi) { xdbg<<"phi not in minphi .. maxphi\n"; @@ -1240,6 +1239,5 @@ struct BinTypeHelper }; - #endif diff --git a/include/Metric.h b/include/Metric.h index d865a109..1a600551 100644 --- a/include/Metric.h +++ b/include/Metric.h @@ -712,9 +712,9 @@ struct MetricHelper // Rather than do trig though, recompute the chord lengths so we can compute // cos(phi) with just regular arithmetic and a sqrt. // - double d1sq = (c2.getData().getPos() - c3.getData().getPos()).normSq(); - double d2sq = (c1.getData().getPos() - c3.getData().getPos()).normSq(); - double d3sq = (c1.getData().getPos() - c2.getData().getPos()).normSq(); + double d1sq = (c2.getPos() - c3.getPos()).normSq(); + double d2sq = (c1.getPos() - c3.getPos()).normSq(); + double d3sq = (c1.getPos() - c2.getPos()).normSq(); double cosphi = (d2sq + d3sq - 0.5*d2sq*d3sq - d1sq); cosphi /= 2. * std::sqrt( d2sq * d3sq * (1.-0.25*d2sq) * (1.-0.25*d3sq) ); diff --git a/include/ProjectHelper.h b/include/ProjectHelper.h index 76ebc6f6..bf60bcd7 100644 --- a/include/ProjectHelper.h +++ b/include/ProjectHelper.h @@ -72,7 +72,7 @@ struct ProjectHelper const Cell& c1, const Cell& c2, std::complex& z2) { // Project given spin-s quantity to the line connecting them. - std::complex r(c2.getData().getPos() - c1.getData().getPos()); + std::complex r(c2.getPos() - c1.getPos()); z2 *= _expmsialpha(r); } @@ -82,7 +82,7 @@ struct ProjectHelper std::complex& z1, std::complex& z2) { // Project given spin-s quantities to the line connecting them. - std::complex r(c2.getData().getPos() - c1.getData().getPos()); + std::complex r(c2.getPos() - c1.getPos()); std::complex expmsialpha = _expmsialpha(r); z1 *= expmsialpha; z2 *= expmsialpha; @@ -94,9 +94,9 @@ struct ProjectHelper std::complex& z1, std::complex& z2, std::complex& z3) { // Project given spin-s quantities to the line connecting each to the centroid. - const Position& p1 = c1.getData().getPos(); - const Position& p2 = c2.getData().getPos(); - const Position& p3 = c3.getData().getPos(); + const Position& p1 = c1.getPos(); + const Position& p2 = c2.getPos(); + const Position& p3 = c3.getPos(); Position cen = (p1 + p2 + p3)/3.; std::complex r1(cen - p1); std::complex r2(cen - p2); @@ -189,8 +189,8 @@ struct ProjectHelper static void Project( const Cell& c1, const Cell& c2, std::complex& z2) { - const Position& p1 = c1.getData().getPos(); - const Position& p2 = c2.getData().getPos(); + const Position& p1 = c1.getPos(); + const Position& p2 = c2.getPos(); std::complex r = calculate_direction(p1,p2); z2 *= _expmsialpha(r); } @@ -200,8 +200,8 @@ struct ProjectHelper const Cell& c1, const Cell& c2, std::complex& z1, std::complex& z2) { - const Position& p1 = c1.getData().getPos(); - const Position& p2 = c2.getData().getPos(); + const Position& p1 = c1.getPos(); + const Position& p2 = c2.getPos(); std::complex r12 = calculate_direction(p1,p2); std::complex expmsialpha = _expmsialpha(r12); z2 *= expmsialpha; @@ -220,9 +220,9 @@ struct ProjectHelper const Cell& c1, const Cell& c2, const Cell& c3, std::complex& z1, std::complex& z2, std::complex& z3) { - const Position& p1 = c1.getData().getPos(); - const Position& p2 = c2.getData().getPos(); - const Position& p3 = c3.getData().getPos(); + const Position& p1 = c1.getPos(); + const Position& p2 = c2.getPos(); + const Position& p3 = c3.getPos(); Position cen((p1 + p2 + p3)/3.); z1 *= _expmsialpha(calculate_direction(cen,p1)); z2 *= _expmsialpha(calculate_direction(cen,p2)); @@ -240,8 +240,8 @@ struct ProjectHelper static void Project( const Cell& c1, const Cell& c2, std::complex& z2) { - const Position& p1 = c1.getData().getPos(); - const Position& p2 = c2.getData().getPos(); + const Position& p1 = c1.getPos(); + const Position& p2 = c2.getPos(); Position sp1(p1); Position sp2(p2); std::complex r = ProjectHelper::calculate_direction(sp1,sp2); @@ -253,8 +253,8 @@ struct ProjectHelper const Cell& c1, const Cell& c2, std::complex& z1, std::complex& z2) { - const Position& p1 = c1.getData().getPos(); - const Position& p2 = c2.getData().getPos(); + const Position& p1 = c1.getPos(); + const Position& p2 = c2.getPos(); Position sp1(p1); Position sp2(p2); std::complex r12 = ProjectHelper::calculate_direction(sp1,sp2); @@ -271,9 +271,9 @@ struct ProjectHelper const Cell& c1, const Cell& c2, const Cell& c3, std::complex& z1, std::complex& z2, std::complex& z3) { - const Position& p1 = c1.getData().getPos(); - const Position& p2 = c2.getData().getPos(); - const Position& p3 = c3.getData().getPos(); + const Position& p1 = c1.getPos(); + const Position& p2 = c2.getPos(); + const Position& p3 = c3.getPos(); Position sp1(p1); Position sp2(p2); Position sp3(p3); diff --git a/include/Split.h b/include/Split.h index d869509c..fafb4acc 100644 --- a/include/Split.h +++ b/include/Split.h @@ -126,11 +126,11 @@ inline bool Check( // Checks that d1,d2,d3 are correct for the three Cells given. // Used as a debugging check. bool ok=true; - if (Dist(c3.getData().getPos(),c2.getData().getPos())-d1 > 0.0001) + if (Dist(c3.getPos(),c2.getPos())-d1 > 0.0001) { std::cerr<<"d1\n"; ok = false; } - if (Dist(c1.getData().getPos(),c3.getData().getPos())-d2 > 0.0001) + if (Dist(c1.getPos(),c3.getPos())-d2 > 0.0001) { std::cerr<<"d2\n"; ok = false; } - if (Dist(c2.getData().getPos(),c1.getData().getPos())-d3 > 0.0001) + if (Dist(c2.getPos(),c1.getPos())-d3 > 0.0001) { std::cerr<<"d3\n"; ok = false; } if (d1 > d2+d3+0.0001) { std::cerr<<"sum d1\n"; ok = false; } if (d2 > d1+d3+0.0001) { std::cerr<<"sum d2\n"; ok = false; } diff --git a/src/Corr3.cpp b/src/Corr3.cpp index 120215bf..8a920a58 100644 --- a/src/Corr3.cpp +++ b/src/Corr3.cpp @@ -439,7 +439,7 @@ template void BaseCorr3::process3(const BaseCell& c1, const MetricHelper& metric) { // Does all triangles with 3 points in c1 - xdbg<<"Process3: c1 = "<& c1, const BaseCell& c2, const MetricHelper& metric) { // Does all triangles with one point in c1 and the other two points in c2 - xdbg<<"Process12: c1 = "<& c1, const BaseCell& c2, } double s1 = c1.getSize(); - double rsq = metric.DistSq(c1.getData().getPos(), c2.getData().getPos(), s1, s2); + double rsq = metric.DistSq(c1.getPos(), c2.getPos(), s1, s2); double s1ps2 = s1 + s2; // If all possible triangles will have d2 < minsep, then abort the recursion here. @@ -555,11 +555,11 @@ void BaseCorr3::process111( // Calculate the distances if they aren't known yet double s=0.; if (d1sq == 0.) - d1sq = metric.DistSq(c2.getData().getPos(), c3.getData().getPos(), s, s); + d1sq = metric.DistSq(c2.getPos(), c3.getPos(), s, s); if (d2sq == 0.) - d2sq = metric.DistSq(c1.getData().getPos(), c3.getData().getPos(), s, s); + d2sq = metric.DistSq(c1.getPos(), c3.getPos(), s, s); if (d3sq == 0.) - d3sq = metric.DistSq(c1.getData().getPos(), c2.getData().getPos(), s, s); + d3sq = metric.DistSq(c1.getPos(), c2.getPos(), s, s); inc_ws(); if (O == 0) { @@ -601,8 +601,7 @@ void BaseCorr3::process111( xdbg<<":set1\n"; // If the BinType doesn't want sorting, then make sure we get all the cells // into the first location, and switch to ordered = 1. - if (!metric.CCW(c1.getData().getPos(), c3.getData().getPos(), - c2.getData().getPos())) { + if (!metric.CCW(c1.getPos(), c3.getPos(), c2.getPos())) { xdbg<<"132\n"; process111Sorted(c1, c3, c2, metric, d1sq, d3sq, d2sq); xdbg<<"213\n"; @@ -633,8 +632,7 @@ void BaseCorr3::process111( } else { // For the non-sorting BinTypes (i.e. LogSAS so far), we just need to make sure // 1-3-2 is counter-clockwise - if (!metric.CCW(c1.getData().getPos(), c3.getData().getPos(), - c2.getData().getPos())) { + if (!metric.CCW(c1.getPos(), c3.getPos(), c2.getPos())) { xdbg<<":swap23\n"; // Swap 2,3 process111Sorted(c1, c3, c2, metric, d1sq, d3sq, d2sq); @@ -659,9 +657,9 @@ void BaseCorr3::process111Sorted( const double s2 = c2.getSize(); const double s3 = c3.getSize(); - xdbg<<"Process111Sorted: c1 = "< template static void ProcessZeta( const Cell& , const Cell& , const Cell&, - const double , const double , const double , ZetaData& , int ) {} }; @@ -810,7 +807,6 @@ struct DirectHelper template static void ProcessZeta( const Cell& c1, const Cell& c2, const Cell& c3, - const double , const double , const double , ZetaData& zeta, int index) { zeta.zeta[index] += c1.getData().getWK() * c2.getData().getWK() * c3.getData().getWK(); @@ -823,7 +819,6 @@ struct DirectHelper template static void ProcessZeta( const Cell& c1, const Cell& c2, const Cell& c3, - const double d1, const double d2, const double d3, ZetaData& zeta, int index) { std::complex g1 = c1.getData().getWG(); @@ -886,11 +881,11 @@ void Corr3::finishProcess( const double d1, const double d2, const double d3, const double u, const double v, const double logd1, const double logd2, const double logd3, const int index) { - double nnn = double(c1.getData().getN()) * c2.getData().getN() * c3.getData().getN(); + double nnn = double(c1.getN()) * c2.getN() * c3.getN(); _ntri[index] += nnn; dbg< "<<_ntri[index]<::finishProcess( static_cast&>(c1), static_cast&>(c2), static_cast&>(c3), - d1, d2, d3, _zeta, index); + _zeta, index); } template From 5d2bcb352c6bca6975579a8af34af845a70b84b5 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 13 Jan 2024 15:06:24 -0500 Subject: [PATCH 03/18] Use different logger for Catalog and Corr2/3 to avoid clashes. --- treecorr/catalog.py | 9 ++++++--- treecorr/config.py | 8 ++++---- treecorr/corr2base.py | 9 ++++++--- treecorr/corr3base.py | 9 ++++++--- treecorr/nncorrelation.py | 1 + treecorr/nnncorrelation.py | 1 + 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/treecorr/catalog.py b/treecorr/catalog.py index e067b487..c40d3bbf 100644 --- a/treecorr/catalog.py +++ b/treecorr/catalog.py @@ -635,9 +635,11 @@ def __init__(self, file_name=None, config=None, *, num=0, logger=None, is_rand=F if logger is not None: self.logger = logger + self._logger_name = logger.name else: + self._logger_name = 'treecorr.Catalog' self.logger = setup_logger(get(self.config,'verbose',int,1), - self.config.get('log_file',None)) + self.config.get('log_file',None), self._logger_name) # Start with everything set to None. Overwrite as appropriate. self._x = None @@ -2739,8 +2741,9 @@ def __getstate__(self): def __setstate__(self, d): self.__dict__ = d - self.logger = setup_logger(get(self.config,'verbose',int,1), - self.config.get('log_file',None)) + if self._logger_name is not None: + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None), self._logger_name) self._field = lambda : None def __repr__(self): diff --git a/treecorr/config.py b/treecorr/config.py index c69d2543..8b7eae2a 100644 --- a/treecorr/config.py +++ b/treecorr/config.py @@ -158,7 +158,7 @@ def _read_params_file(file_name): return config -def setup_logger(verbose, log_file=None): +def setup_logger(verbose, log_file=None, name=None): """Parse the integer verbosity level from the command line args into a logging_level string :param verbose: An integer indicating what verbosity level to use. @@ -174,10 +174,10 @@ def setup_logger(verbose, log_file=None): logging_level = logging_levels[int(verbose)] # Setup logging to go to sys.stdout or (if requested) to an output file - if log_file is None: + if name is None: name = 'treecorr' - else: - name = 'treecorr_' + log_file + if log_file is not None: + name += '_' + log_file logger = logging.getLogger(name) if len(logger.handlers) == 0: # only add handler once! diff --git a/treecorr/corr2base.py b/treecorr/corr2base.py index d41e0464..a505f03f 100644 --- a/treecorr/corr2base.py +++ b/treecorr/corr2base.py @@ -288,10 +288,12 @@ def __init__(self, config=None, *, logger=None, rng=None, **kwargs): self._corr = None # Do this first to make sure we always have it for __del__ self.config = merge_config(config,kwargs,Corr2._valid_params) if logger is None: + self._logger_name = 'treecorr.Corr2' self.logger = setup_logger(get(self.config,'verbose',int,1), - self.config.get('log_file',None)) + self.config.get('log_file',None), self._logger_name) else: self.logger = logger + self._logger_name = logger.name # We'll make a bunch of attributes here, which we put into a namespace called _ro. # These are the core attributes that won't ever be changed after construction. @@ -581,8 +583,9 @@ def __getstate__(self): def __setstate__(self, d): self.__dict__ = d self._corr = None - self.logger = setup_logger(get(self.config,'verbose',int,1), - self.config.get('log_file',None)) + if self._logger_name is not None: + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None), self._logger_name) def clear(self): """Clear all data vectors, the results dict, and any related values. diff --git a/treecorr/corr3base.py b/treecorr/corr3base.py index 5ecb3254..3f7a0364 100644 --- a/treecorr/corr3base.py +++ b/treecorr/corr3base.py @@ -341,10 +341,12 @@ def __init__(self, config=None, *, logger=None, rng=None, **kwargs): self._corr = None # Do this first to make sure we always have it for __del__ self.config = merge_config(config,kwargs,Corr3._valid_params) if logger is None: + self._logger_name = 'treecorr.Corr3' self.logger = setup_logger(get(self.config,'verbose',int,1), - self.config.get('log_file',None)) + self.config.get('log_file',None), self._logger_name) else: self.logger = logger + self._logger_name = logger.name # We'll make a bunch of attributes here, which we put into a namespace called _ro. # These are the core attributes that won't ever be changed after construction. @@ -874,8 +876,9 @@ def __getstate__(self): def __setstate__(self, d): self.__dict__ = d self._corr = None - self.logger = setup_logger(get(self.config,'verbose',int,1), - self.config.get('log_file',None)) + if self._logger_name is not None: + self.logger = setup_logger(get(self.config,'verbose',int,1), + self.config.get('log_file',None), self._logger_name) def clear(self): """Clear all data vectors, the results dict, and any related values. diff --git a/treecorr/nncorrelation.py b/treecorr/nncorrelation.py index 6aca54a2..253598b3 100644 --- a/treecorr/nncorrelation.py +++ b/treecorr/nncorrelation.py @@ -191,6 +191,7 @@ def _zero_copy(self, tot): ret._rr = ret._dr = ret._rd = None ret._write_rr = ret._write_dr = ret._write_rd = None ret._cov = None + ret._logger_name = None # This override is really the main advantage of using this: setattr(ret, '_nonzero', False) return ret diff --git a/treecorr/nnncorrelation.py b/treecorr/nnncorrelation.py index 4c3b1f7a..ce4bad02 100644 --- a/treecorr/nnncorrelation.py +++ b/treecorr/nnncorrelation.py @@ -222,6 +222,7 @@ def _zero_copy(self, tot): ret._corr = None ret._rrr = ret._drr = ret._rdd = None ret._write_rrr = ret._write_drr = ret._write_rdd = None + ret._logger_name = None # This override is really the main advantage of using this: setattr(ret, '_nonzero', False) return ret From 0cb6b4c7bc2fc9565de8b8f4c638114ffac88f7d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 05:32:29 -0500 Subject: [PATCH 04/18] Switch to default ordered=True --- tests/test_ggg.py | 287 +++++++++++++++---------------- tests/test_kkk.py | 230 +++++++++++++------------ tests/test_nnn.py | 334 +++++++++++++++++++------------------ tests/test_patch3pt.py | 82 ++++----- treecorr/exec_corr3.py | 4 +- treecorr/gggcorrelation.py | 34 ++-- treecorr/kkkcorrelation.py | 34 ++-- treecorr/nnncorrelation.py | 41 ++--- 8 files changed, 543 insertions(+), 503 deletions(-) diff --git a/tests/test_ggg.py b/tests/test_ggg.py index 5c2692c0..8142b0de 100644 --- a/tests/test_ggg.py +++ b/tests/test_ggg.py @@ -156,11 +156,10 @@ def test_direct_logruv(): np.testing.assert_allclose(data['gam3i'], ggg.gam3i.flatten(), rtol=1.e-3) # Also check the cross calculation. - # Here, we get 6x as many triangles, since each triangle is discovered 6 times. ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) ggg.process(cat, cat, cat, num_threads=2) - np.testing.assert_array_equal(ggg.ntri, 6*true_ntri) - np.testing.assert_allclose(ggg.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) + np.testing.assert_array_equal(ggg.ntri, true_ntri) + np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0i, true_gam0.imag, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam1r, true_gam1.real, rtol=1.e-5, atol=1.e-8) @@ -170,8 +169,7 @@ def test_direct_logruv(): np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-5, atol=1.e-8) - # But with ordered=True we get just the ones in the given order. - ggg.process(cat, cat, cat, ordered=True) + ggg.process(cat, cat, num_threads=2) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8) @@ -183,14 +181,10 @@ def test_direct_logruv(): np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-5, atol=1.e-8) - # Or with 2 argument version, finds each triangle 3 times. - ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) - t1 = time.time() - ggg.process(cat, cat, num_threads=2) - t2 = time.time() - print('Time for 1-2 cross correlation = ',t2-t1) - np.testing.assert_array_equal(ggg.ntri, 3*true_ntri) - np.testing.assert_allclose(ggg.weight, 3*true_weight, rtol=1.e-5, atol=1.e-8) + # With ordered=False, we get 6x as many triangles, since each triangle is discovered 6 times. + ggg.process(cat, cat, cat, ordered=False) + np.testing.assert_array_equal(ggg.ntri, 6*true_ntri) + np.testing.assert_allclose(ggg.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0i, true_gam0.imag, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam1r, true_gam1.real, rtol=1.e-5, atol=1.e-8) @@ -200,12 +194,11 @@ def test_direct_logruv(): np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-5, atol=1.e-8) - t1 = time.time() - ggg.process(cat, cat, ordered=True, num_threads=2) - t2 = time.time() - print('Time for 1-2 cross correlation, ordered = ',t2-t1) - np.testing.assert_array_equal(ggg.ntri, true_ntri) - np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) + # Or with 2 argument version, finds each triangle 3 times. + ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) + ggg.process(cat, cat, ordered=False) + np.testing.assert_array_equal(ggg.ntri, 3*true_ntri) + np.testing.assert_allclose(ggg.weight, 3*true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0i, true_gam0.imag, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam1r, true_gam1.real, rtol=1.e-5, atol=1.e-8) @@ -231,7 +224,7 @@ def test_direct_logruv(): np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-3, atol=1.e-4) np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-3, atol=1.e-4) - ggg.process(cat, cat, cat, ordered=True) + ggg.process(cat, cat, cat) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8) @@ -243,7 +236,7 @@ def test_direct_logruv(): np.testing.assert_allclose(ggg.gam3r, true_gam3.real, rtol=1.e-3, atol=1.e-4) np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-3, atol=1.e-4) - ggg.process(cat, cat, ordered=True) + ggg.process(cat, cat) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0r, true_gam0.real, rtol=1.e-5, atol=1.e-8) @@ -764,7 +757,6 @@ def test_direct_logruv_cross(): g3_list = [true_gam3_123, true_gam3_132, true_gam3_213, true_gam3_231, true_gam3_312, true_gam3_321] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_gam0_sum = sum(g0_list) @@ -776,14 +768,6 @@ def test_direct_logruv_cross(): true_gam1_sum[pos] /= true_weight_sum[pos] true_gam2_sum[pos] /= true_weight_sum[pos] true_gam3_sum[pos] /= true_weight_sum[pos] - #print('true_ntri = ',true_ntri_sum) - #print('diff = ',ggg.ntri - true_ntri_sum) - np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) - np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) # Now normalize each one individually. for w,g0,g1,g2,g3 in zip(w_list, g0_list, g1_list, g2_list, g3_list): @@ -793,43 +777,42 @@ def test_direct_logruv_cross(): g2[pos] /= w[pos] g3[pos] /= w[pos] - # With ordered=True we get just the ones in the given order. - ggg.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_123) np.testing.assert_allclose(ggg.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_123, rtol=1.e-5) - ggg.process(cat1, cat3, cat2, ordered=True) + + ggg.process(cat1, cat3, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_132) np.testing.assert_allclose(ggg.weight, true_weight_132, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_132, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_132, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_132, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_132, rtol=1.e-5) - ggg.process(cat2, cat1, cat3, ordered=True) + ggg.process(cat2, cat1, cat3) np.testing.assert_array_equal(ggg.ntri, true_ntri_213) np.testing.assert_allclose(ggg.weight, true_weight_213, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_213, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_213, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_213, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_213, rtol=1.e-5) - ggg.process(cat2, cat3, cat1, ordered=True) + ggg.process(cat2, cat3, cat1) np.testing.assert_array_equal(ggg.ntri, true_ntri_231) np.testing.assert_allclose(ggg.weight, true_weight_231, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_231, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_231, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_231, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_231, rtol=1.e-5) - ggg.process(cat3, cat1, cat2, ordered=True) + ggg.process(cat3, cat1, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_312) np.testing.assert_allclose(ggg.weight, true_weight_312, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_312, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_312, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_312, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_312, rtol=1.e-5) - ggg.process(cat3, cat2, cat1, ordered=True) + ggg.process(cat3, cat2, cat1) np.testing.assert_array_equal(ggg.ntri, true_ntri_321) np.testing.assert_allclose(ggg.weight, true_weight_321, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_321, rtol=1.e-5) @@ -837,9 +820,10 @@ def test_direct_logruv_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_321, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_321, rtol=1.e-5) - ggg.process(cat1, cat2, cat3) - #print('binslop > 0: ggg.ntri = ',ggg.ntri) + # With ordered=False, we end up with the sum of all permutations. + #print('true_ntri = ',true_ntri_sum) #print('diff = ',ggg.ntri - true_ntri_sum) + ggg.process(cat1, cat2, cat3, ordered=False) np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) @@ -847,9 +831,13 @@ def test_direct_logruv_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) + # Check bin_slop=0 + ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, + min_u=min_u, max_u=max_u, nubins=nubins, + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_slop=0, verbose=1, max_top=0) + ggg.process(cat1, cat2, cat3, ordered=True) - #print('binslop > 0: ggg.ntri = ',ggg.ntri) - #print('diff = ',ggg.ntri - true_ntri_sum) np.testing.assert_array_equal(ggg.ntri, true_ntri_123) np.testing.assert_allclose(ggg.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_123, rtol=1.e-5) @@ -857,15 +845,7 @@ def test_direct_logruv_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_123, rtol=1.e-5) - # And again with no top-level recursion - ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, - min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) - ggg.process(cat1, cat2, cat3) - #print('max_top = 0: ggg.ntri = ',ggg.ntri) - #print('true_ntri = ',true_ntri_sum) - #print('diff = ',ggg.ntri - true_ntri_sum) + ggg.process(cat1, cat2, cat3, ordered=False) np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) @@ -873,6 +853,12 @@ def test_direct_logruv_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) + # And again with no top-level recursion + ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, + min_u=min_u, max_u=max_u, nubins=nubins, + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_slop=0, verbose=1, max_top=0) + ggg.process(cat1, cat2, cat3, ordered=True) #print('max_top = 0: ggg.ntri = ',ggg.ntri) #print('true_ntri = ',true_ntri_sum) @@ -884,6 +870,17 @@ def test_direct_logruv_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_123, rtol=1.e-5) + ggg.process(cat1, cat2, cat3, ordered=False) + #print('max_top = 0: ggg.ntri = ',ggg.ntri) + #print('true_ntri = ',true_ntri_sum) + #print('diff = ',ggg.ntri - true_ntri_sum) + np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) + np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) + # Error to have cat3, but not cat2 with assert_raises(ValueError): ggg.process(cat1, cat3=cat3) @@ -1073,7 +1070,6 @@ def test_direct_logruv_cross12(): g2_list = [true_gam2_122, true_gam2_212, true_gam2_221] g3_list = [true_gam3_122, true_gam3_212, true_gam3_221] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_gam0_sum = sum(g0_list) @@ -1085,14 +1081,6 @@ def test_direct_logruv_cross12(): true_gam1_sum[pos] /= true_weight_sum[pos] true_gam2_sum[pos] /= true_weight_sum[pos] true_gam3_sum[pos] /= true_weight_sum[pos] - #print('true_ntri = ',true_ntri_sum) - #print('diff = ',ggg.ntri - true_ntri_sum) - np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) - np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) # Now normalize each one individually. for w,g0,g1,g2,g3 in zip(w_list, g0_list, g1_list, g2_list, g3_list): @@ -1103,21 +1091,20 @@ def test_direct_logruv_cross12(): g3[pos] /= w[pos] # With ordered=True we get just the ones in the given order. - ggg.process(cat1, cat2, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) np.testing.assert_allclose(ggg.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_122, rtol=1.e-5) - ggg.process(cat2, cat1, cat2, ordered=True) + ggg.process(cat2, cat1, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_212) np.testing.assert_allclose(ggg.weight, true_weight_212, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_212, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_212, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_212, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_212, rtol=1.e-5) - ggg.process(cat2, cat2, cat1, ordered=True) + ggg.process(cat2, cat2, cat1) np.testing.assert_array_equal(ggg.ntri, true_ntri_221) np.testing.assert_allclose(ggg.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_221, rtol=1.e-5) @@ -1125,13 +1112,9 @@ def test_direct_logruv_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_221, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_221, rtol=1.e-5) - # Repeat with binslop = 0 - ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, - min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) - ggg.process(cat1, cat2) - #print('binslop > 0: ggg.ntri = ',ggg.ntri) + # With ordered=False, we end up with the sum of all permutations. + ggg.process(cat1, cat2, ordered=False) + #print('true_ntri = ',true_ntri_sum) #print('diff = ',ggg.ntri - true_ntri_sum) np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) @@ -1140,9 +1123,13 @@ def test_direct_logruv_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) + # Repeat with binslop = 0 + ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, + min_u=min_u, max_u=max_u, nubins=nubins, + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_slop=0, verbose=1) + ggg.process(cat1, cat2, ordered=True) - #print('binslop > 0: ggg.ntri = ',ggg.ntri) - #print('diff = ',ggg.ntri - true_ntri_sum) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) np.testing.assert_allclose(ggg.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_122, rtol=1.e-5) @@ -1150,24 +1137,21 @@ def test_direct_logruv_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_122, rtol=1.e-5) - # And again with no top-level recursion - ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, - min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) - ggg.process(cat1, cat2) - #print('max_top = 0: ggg.ntri = ',ggg.ntri) - #print('true_ntri = ',true_ntri_sum) - #print('diff = ',ggg.ntri - true_ntri_sum) + ggg.process(cat1, cat2, ordered=False) np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) + + # And again with no top-level recursion + ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, + min_u=min_u, max_u=max_u, nubins=nubins, + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_slop=0, verbose=1, max_top=0) ggg.process(cat1, cat2, ordered=True) - #print('binslop > 0: ggg.ntri = ',ggg.ntri) - #print('diff = ',ggg.ntri - true_ntri_sum) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) np.testing.assert_allclose(ggg.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_122, rtol=1.e-5) @@ -1175,21 +1159,18 @@ def test_direct_logruv_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_122, rtol=1.e-5) - # Split into patches to test the list-based version of the code. - cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g1_1, g2=g2_1, npatch=3) - cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g1_2, g2=g2_2, npatch=3) - - ggg.process(cat1, cat2, num_threads=2) - + ggg.process(cat1, cat2, ordered=False) np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) - ggg.process(cat1, cat2, ordered=True, num_threads=2) + # Split into patches to test the list-based version of the code. + cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g1_1, g2=g2_1, npatch=3) + cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g1_2, g2=g2_2, npatch=3) + ggg.process(cat1, cat2, ordered=True, num_threads=2) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) np.testing.assert_allclose(ggg.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_122, rtol=1.e-5) @@ -1197,6 +1178,14 @@ def test_direct_logruv_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam3, true_gam3_122, rtol=1.e-5) + ggg.process(cat1, cat2, ordered=False, num_threads=2) + np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) + np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-5) + @timer def test_ggg_logruv(): @@ -2354,17 +2343,15 @@ def test_direct_logsas(): np.testing.assert_allclose(data['gam3i'], ggg.gam3i.flatten(), rtol=1.e-3) # Also check the cross calculation. - # Here, we get 6x as many triangles, since each triangle is discovered 6 times. - ggg.process(cat, cat, cat, num_threads=2) - np.testing.assert_array_equal(ggg.ntri, 6*true_ntri) - np.testing.assert_allclose(ggg.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) + ggg.process(cat,cat,cat, num_threads=2) + np.testing.assert_array_equal(ggg.ntri, true_ntri) + np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0, true_gam0, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam1, true_gam1, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam2, true_gam2, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3, true_gam3, rtol=1.e-5, atol=1.e-8) - # But with ordered=True, it only counts each triangle once. - ggg.process(cat,cat,cat, ordered=True, num_threads=2) + ggg.process(cat,cat) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0, true_gam0, rtol=1.e-5, atol=1.e-8) @@ -2372,18 +2359,19 @@ def test_direct_logsas(): np.testing.assert_allclose(ggg.gam2, true_gam2, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3, true_gam3, rtol=1.e-5, atol=1.e-8) - # Or with 2 argument version, finds each triangle 3 times. - ggg.process(cat,cat) - np.testing.assert_array_equal(ggg.ntri, 3*true_ntri) - np.testing.assert_allclose(ggg.weight, 3*true_weight, rtol=1.e-5, atol=1.e-8) + # With ordered=False, we get 6x as many triangles, since each triangle is discovered 6 times. + ggg.process(cat, cat, cat, ordered=False) + np.testing.assert_array_equal(ggg.ntri, 6*true_ntri) + np.testing.assert_allclose(ggg.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0, true_gam0, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam1, true_gam1, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam2, true_gam2, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3, true_gam3, rtol=1.e-5, atol=1.e-8) - ggg.process(cat,cat, ordered=True) - np.testing.assert_array_equal(ggg.ntri, true_ntri) - np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) + # Or with 2 argument version, finds each triangle 3 times. + ggg.process(cat,cat, ordered=False) + np.testing.assert_array_equal(ggg.ntri, 3*true_ntri) + np.testing.assert_allclose(ggg.weight, 3*true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0, true_gam0, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam1, true_gam1, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam2, true_gam2, rtol=1.e-5, atol=1.e-8) @@ -2401,7 +2389,7 @@ def test_direct_logsas(): np.testing.assert_allclose(ggg.gam2, true_gam2, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3, true_gam3, rtol=1.e-5, atol=1.e-8) - ggg.process(cat,cat,cat, ordered=True) + ggg.process(cat,cat,cat) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0, true_gam0, rtol=1.e-5, atol=1.e-8) @@ -2409,7 +2397,7 @@ def test_direct_logsas(): np.testing.assert_allclose(ggg.gam2, true_gam2, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam3, true_gam3, rtol=1.e-5, atol=1.e-8) - ggg.process(cat,cat, ordered=True) + ggg.process(cat,cat) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(ggg.gam0, true_gam0, rtol=1.e-5, atol=1.e-8) @@ -2825,7 +2813,6 @@ def test_direct_logsas_cross(): g3_list = [true_gam3_123, true_gam3_132, true_gam3_213, true_gam3_231, true_gam3_312, true_gam3_321] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_gam0_sum = sum(g0_list) @@ -2837,12 +2824,6 @@ def test_direct_logsas_cross(): true_gam1_sum[pos] /= true_weight_sum[pos] true_gam2_sum[pos] /= true_weight_sum[pos] true_gam3_sum[pos] /= true_weight_sum[pos] - np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) - np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-4, atol=1.e-6) - np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-4, atol=1.e-6) - np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-4, atol=1.e-6) - np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-4, atol=1.e-6) # Now normalize each one individually. for w,g0,g1,g2,g3 in zip(w_list, g0_list, g1_list, g2_list, g3_list): @@ -2853,42 +2834,41 @@ def test_direct_logsas_cross(): g3[pos] /= w[pos] # With ordered=True we get just the ones in the given order. - ggg.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_123) np.testing.assert_allclose(ggg.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_123, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_123, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_123, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_123, rtol=1.e-4, atol=1.e-6) - ggg.process(cat1, cat3, cat2, ordered=True) + ggg.process(cat1, cat3, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_132) np.testing.assert_allclose(ggg.weight, true_weight_132, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_132, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_132, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_132, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_132, rtol=1.e-4, atol=1.e-6) - ggg.process(cat2, cat1, cat3, ordered=True) + ggg.process(cat2, cat1, cat3) np.testing.assert_array_equal(ggg.ntri, true_ntri_213) np.testing.assert_allclose(ggg.weight, true_weight_213, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_213, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_213, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_213, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_213, rtol=1.e-4, atol=1.e-6) - ggg.process(cat2, cat3, cat1, ordered=True) + ggg.process(cat2, cat3, cat1) np.testing.assert_array_equal(ggg.ntri, true_ntri_231) np.testing.assert_allclose(ggg.weight, true_weight_231, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_231, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_231, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_231, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_231, rtol=1.e-4, atol=1.e-6) - ggg.process(cat3, cat1, cat2, ordered=True) + ggg.process(cat3, cat1, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_312) np.testing.assert_allclose(ggg.weight, true_weight_312, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_312, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_312, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_312, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_312, rtol=1.e-4, atol=1.e-6) - ggg.process(cat3, cat2, cat1, ordered=True) + ggg.process(cat3, cat2, cat1) np.testing.assert_array_equal(ggg.ntri, true_ntri_321) np.testing.assert_allclose(ggg.weight, true_weight_321, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_321, rtol=1.e-4, atol=1.e-6) @@ -2896,12 +2876,19 @@ def test_direct_logsas_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_321, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_321, rtol=1.e-4, atol=1.e-6) + # With ordered=False, we end up with the sum of all permutations. + ggg.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) + np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-4, atol=1.e-6) + # Repeat with binslop = 0 ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, nphi_bins=nphi_bins, bin_slop=0, bin_type='LogSAS') - ggg.process(cat1, cat2, cat3) - np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) ggg.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_123) @@ -2911,12 +2898,18 @@ def test_direct_logsas_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_123, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_123, rtol=1.e-4, atol=1.e-6) + ggg.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) + np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-4, atol=1.e-6) + # And again with no top-level recursion ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, nphi_bins=nphi_bins, bin_slop=0, max_top=0, bin_type='LogSAS') - ggg.process(cat1, cat2, cat3) - np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) ggg.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_123) @@ -2926,6 +2919,14 @@ def test_direct_logsas_cross(): np.testing.assert_allclose(ggg.gam2, true_gam2_123, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_123, rtol=1.e-4, atol=1.e-6) + ggg.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) + np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-4, atol=1.e-6) + # Error to have cat3, but not cat2 with assert_raises(ValueError): ggg.process(cat1, cat3=cat3) @@ -3079,7 +3080,6 @@ def test_direct_logsas_cross12(): g2_list = [true_gam2_122, true_gam2_212, true_gam2_221] g3_list = [true_gam3_122, true_gam3_212, true_gam3_221] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_gam0_sum = sum(g0_list) @@ -3091,12 +3091,6 @@ def test_direct_logsas_cross12(): true_gam1_sum[pos] /= true_weight_sum[pos] true_gam2_sum[pos] /= true_weight_sum[pos] true_gam3_sum[pos] /= true_weight_sum[pos] - np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) - np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-4, atol=1.e-6) - np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-4, atol=1.e-6) - np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-4, atol=1.e-6) - np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-4, atol=1.e-6) # Now normalize each one individually. for w,g0,g1,g2,g3 in zip(w_list, g0_list, g1_list, g2_list, g3_list): @@ -3106,22 +3100,20 @@ def test_direct_logsas_cross12(): g2[pos] /= w[pos] g3[pos] /= w[pos] - # With ordered=True we get just the ones in the given order. - ggg.process(cat1, cat2, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) np.testing.assert_allclose(ggg.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_122, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_122, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_122, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_122, rtol=1.e-4, atol=1.e-6) - ggg.process(cat2, cat1, cat2, ordered=True) + ggg.process(cat2, cat1, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_212) np.testing.assert_allclose(ggg.weight, true_weight_212, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_212, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_212, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_212, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_212, rtol=1.e-4, atol=1.e-6) - ggg.process(cat2, cat2, cat1, ordered=True) + ggg.process(cat2, cat2, cat1) np.testing.assert_array_equal(ggg.ntri, true_ntri_221) np.testing.assert_allclose(ggg.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_221, rtol=1.e-4, atol=1.e-6) @@ -3129,11 +3121,8 @@ def test_direct_logsas_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_221, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_221, rtol=1.e-4, atol=1.e-6) - # Split into patches to test the list-based version of the code. - cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g1_1, g2=g2_1, npatch=4) - cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g1_2, g2=g2_2, npatch=4) - - ggg.process(cat1, cat2, num_threads=2) + # With ordered=False, we end up with the sum of all permutations. + ggg.process(cat1, cat2, ordered=False) np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-4, atol=1.e-6) @@ -3141,21 +3130,25 @@ def test_direct_logsas_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-4, atol=1.e-6) - ggg.process(cat1, cat2, ordered=True) + # Split into patches to test the list-based version of the code. + cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g1_1, g2=g2_1, npatch=4) + cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g1_2, g2=g2_2, npatch=4) + + ggg.process(cat1, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) np.testing.assert_allclose(ggg.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_122, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_122, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_122, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_122, rtol=1.e-4, atol=1.e-6) - ggg.process(cat2, cat1, cat2, ordered=True) + ggg.process(cat2, cat1, cat2) np.testing.assert_array_equal(ggg.ntri, true_ntri_212) np.testing.assert_allclose(ggg.weight, true_weight_212, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_212, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam1, true_gam1_212, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam2, true_gam2_212, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_212, rtol=1.e-4, atol=1.e-6) - ggg.process(cat2, cat2, cat1, ordered=True) + ggg.process(cat2, cat2, cat1) np.testing.assert_array_equal(ggg.ntri, true_ntri_221) np.testing.assert_allclose(ggg.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(ggg.gam0, true_gam0_221, rtol=1.e-4, atol=1.e-6) @@ -3163,6 +3156,14 @@ def test_direct_logsas_cross12(): np.testing.assert_allclose(ggg.gam2, true_gam2_221, rtol=1.e-4, atol=1.e-6) np.testing.assert_allclose(ggg.gam3, true_gam3_221, rtol=1.e-4, atol=1.e-6) + ggg.process(cat1, cat2, ordered=False, num_threads=2) + np.testing.assert_array_equal(ggg.ntri, true_ntri_sum) + np.testing.assert_allclose(ggg.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(ggg.gam0, true_gam0_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam1, true_gam1_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam2, true_gam2_sum, rtol=1.e-4, atol=1.e-6) + np.testing.assert_allclose(ggg.gam3, true_gam3_sum, rtol=1.e-4, atol=1.e-6) + @timer def test_ggg_logsas(): @@ -3235,7 +3236,7 @@ def test_ggg_logsas(): min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, sep_units='arcmin', phi_units='degrees', bin_type='LogSAS') t0 = time.time() - gggc.process(cat,cat,cat, ordered=True) + gggc.process(cat,cat,cat) t1 = time.time() print('cross process time = ',t1-t0) np.testing.assert_allclose(gggc.ntri, ggg.ntri, rtol=1.e-3) @@ -3249,7 +3250,7 @@ def test_ggg_logsas(): np.testing.assert_allclose(gggc.gam3, ggg.gam3, rtol=2.e-3) t0 = time.time() - gggc.process(cat,cat, ordered=True) + gggc.process(cat,cat) t1 = time.time() print('cross12 process time = ',t1-t0) np.testing.assert_allclose(gggc.ntri, ggg.ntri, rtol=1.e-3) diff --git a/tests/test_kkk.py b/tests/test_kkk.py index e22318cc..d42ab19c 100644 --- a/tests/test_kkk.py +++ b/tests/test_kkk.py @@ -109,30 +109,28 @@ def test_direct_logruv(): np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3) # Also check the cross calculation. - # Here, we get 6x as many triangles, since each triangle is discovered 6 times. - kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) - kkk.process(cat, cat, cat, num_threads=2) - np.testing.assert_array_equal(kkk.ntri, 6*true_ntri) - np.testing.assert_allclose(kkk.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) + kkk.process(cat,cat,cat, num_threads=2) + np.testing.assert_array_equal(kkk.ntri, true_ntri) + np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - # But with ordered=True, it only counts each triangle once. - kkk.process(cat,cat,cat, ordered=True, num_threads=2) + kkk.process(cat,cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) + # With ordered=False, we get 6x as many triangles, since each triangle is discovered 6 times. + kkk.process(cat,cat,cat, ordered=False) + np.testing.assert_array_equal(kkk.ntri, 6*true_ntri) + np.testing.assert_allclose(kkk.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) + np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) + # Or with 2 argument version, finds each triangle 3 times. - kkk.process(cat,cat) + kkk.process(cat,cat, ordered=False) np.testing.assert_array_equal(kkk.ntri, 3*true_ntri) np.testing.assert_allclose(kkk.weight, 3*true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - kkk.process(cat,cat, ordered=True) - np.testing.assert_array_equal(kkk.ntri, true_ntri) - np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) - np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, @@ -142,12 +140,12 @@ def test_direct_logruv(): np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - kkk.process(cat,cat,cat, ordered=True) + kkk.process(cat,cat,cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - kkk.process(cat,cat, ordered=True) + kkk.process(cat,cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) @@ -505,15 +503,11 @@ def test_direct_logruv_cross(): z_list = [true_zeta_123, true_zeta_132, true_zeta_213, true_zeta_231, true_zeta_312, true_zeta_321] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_zeta_sum = sum(z_list) pos = true_weight_sum > 0 true_zeta_sum[pos] /= true_weight_sum[pos] - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) - np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) # Now normalize each one individually. for w,z in zip(w_list, z_list): @@ -521,57 +515,68 @@ def test_direct_logruv_cross(): z[pos] /= w[pos] # With ordered=True we get just the ones in the given order. - kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-5) - kkk.process(cat1, cat3, cat2, ordered=True) + kkk.process(cat1, cat3, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_132) np.testing.assert_allclose(kkk.weight, true_weight_132, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_132, rtol=1.e-5) - kkk.process(cat2, cat1, cat3, ordered=True) + kkk.process(cat2, cat1, cat3) np.testing.assert_array_equal(kkk.ntri, true_ntri_213) np.testing.assert_allclose(kkk.weight, true_weight_213, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_213, rtol=1.e-5) - kkk.process(cat2, cat3, cat1, ordered=True) + kkk.process(cat2, cat3, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_231) np.testing.assert_allclose(kkk.weight, true_weight_231, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_231, rtol=1.e-5) - kkk.process(cat3, cat1, cat2, ordered=True) + kkk.process(cat3, cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_312) np.testing.assert_allclose(kkk.weight, true_weight_312, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_312, rtol=1.e-5) - kkk.process(cat3, cat2, cat1, ordered=True) + kkk.process(cat3, cat2, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_321) np.testing.assert_allclose(kkk.weight, true_weight_321, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_321, rtol=1.e-5) + # With the default ordered=False, we end up with the sum of all permutations. + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) + # Repeat with binslop = 0 kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) - kkk.process(cat1, cat2, cat3) - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-5) + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) + # And again with no top-level recursion kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) - kkk.process(cat1, cat2, cat3) - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-5) + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) + # Error to have cat3, but not cat2 with assert_raises(ValueError): kkk.process(cat1, cat3=cat3) @@ -701,15 +706,11 @@ def test_direct_logruv_cross12(): w_list = [true_weight_122, true_weight_212, true_weight_221] z_list = [true_zeta_122, true_zeta_212, true_zeta_221] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_zeta_sum = sum(z_list) pos = true_weight_sum > 0 true_zeta_sum[pos] /= true_weight_sum[pos] - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) - np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) # Now normalize each one individually. for w,z in zip(w_list, z_list): @@ -717,41 +718,46 @@ def test_direct_logruv_cross12(): z[pos] /= w[pos] # With ordered=True we get just the ones in the given order. - kkk.process(cat1, cat2, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_122) np.testing.assert_allclose(kkk.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_122, rtol=1.e-5) - kkk.process(cat2, cat1, cat2, ordered=True) + kkk.process(cat2, cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_212) np.testing.assert_allclose(kkk.weight, true_weight_212, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_212, rtol=1.e-5) - kkk.process(cat2, cat2, cat1, ordered=True) + kkk.process(cat2, cat2, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_221) np.testing.assert_allclose(kkk.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_221, rtol=1.e-5) - # Split into patches to test the list-based version of the code. - cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, k=k1, npatch=4) - cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, k=k2, npatch=4) - - kkk.process(cat1, cat2, num_threads=2) + # With ordered=False, we end up with the sum of all permutations. + kkk.process(cat1, cat2, ordered=False) np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) - kkk.process(cat1, cat2, ordered=True) + # Split into patches to test the list-based version of the code. + cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, k=k1, npatch=4) + cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, k=k2, npatch=4) + + kkk.process(cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_122) np.testing.assert_allclose(kkk.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_122, rtol=1.e-5) - kkk.process(cat2, cat1, cat2, ordered=True) + kkk.process(cat2, cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_212) np.testing.assert_allclose(kkk.weight, true_weight_212, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_212, rtol=1.e-5) - kkk.process(cat2, cat2, cat1, ordered=True) + kkk.process(cat2, cat2, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_221) np.testing.assert_allclose(kkk.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_221, rtol=1.e-5) + kkk.process(cat1, cat2, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) + @timer def test_direct_logruv_cross_3d(): @@ -897,15 +903,11 @@ def test_direct_logruv_cross_3d(): z_list = [true_zeta_123, true_zeta_132, true_zeta_213, true_zeta_231, true_zeta_312, true_zeta_321] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_zeta_sum = sum(z_list) pos = true_weight_sum > 0 true_zeta_sum[pos] /= true_weight_sum[pos] - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) - np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) # Now normalize each one individually. for w,z in zip(w_list, z_list): @@ -913,37 +915,48 @@ def test_direct_logruv_cross_3d(): z[pos] /= w[pos] # With ordered=True, we get just the ones in this order. - kkk.process(cat1, cat2, cat3, ordered=True, num_threads=2) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-5) + # With ordered=False, we end up with the sum of all permutations. + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) + # Repeat with binslop = 0 kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) - kkk.process(cat1, cat2, cat3) - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) - kkk.process(cat1, cat2, cat3, ordered=True, num_threads=2) + kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-5) + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) + # And again with no top-level recursion kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) - kkk.process(cat1, cat2, cat3) - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) - kkk.process(cat1, cat2, cat3, ordered=True, num_threads=2) + kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-5) + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-5) + @timer def test_constant(): @@ -977,12 +990,12 @@ def test_constant(): np.testing.assert_allclose(kkk.zeta, A**3, rtol=1.e-5) # Should also work as a cross-correlation - kkk.process(cat, cat, cat) + kkk.process(cat, cat, cat, ordered=True) print('as cross-correlation: kkk.zeta = ',kkk.zeta.flatten()) np.testing.assert_allclose(kkk.zeta, A**3, rtol=1.e-5) - kkk.process(cat, cat, cat, ordered=True) - print('as cross-correlation ordered: kkk.zeta = ',kkk.zeta.flatten()) + kkk.process(cat, cat, cat, ordered=False) + print('as cross-correlation unordered: kkk.zeta = ',kkk.zeta.flatten()) np.testing.assert_allclose(kkk.zeta, A**3, rtol=1.e-5) # Check LogSAS binning @@ -992,12 +1005,12 @@ def test_constant(): print('LogSAS: kkk.zeta = ',kkk2.zeta.flatten()) np.testing.assert_allclose(kkk2.zeta, A**3, rtol=1.e-5) - kkk2.process(cat, cat) + kkk2.process(cat, cat, ordered=True) print('as cross-correlation: kkk.zeta = ',kkk2.zeta.flatten()) np.testing.assert_allclose(kkk2.zeta, A**3, rtol=1.e-5) - kkk2.process(cat, cat, ordered=True) - print('as cross-correlation ordered: kkk.zeta = ',kkk2.zeta.flatten()) + kkk2.process(cat, cat, ordered=False) + print('as cross-correlation unordered: kkk.zeta = ',kkk2.zeta.flatten()) np.testing.assert_allclose(kkk2.zeta, A**3, rtol=1.e-5) # Now add some noise to the values. It should still work, but at slightly lower accuracy. @@ -1271,29 +1284,28 @@ def test_direct_logsas(): np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3) # Also check the cross calculation. - # Here, we get 6x as many triangles, since each triangle is discovered 6 times. - kkk.process(cat, cat, cat, num_threads=2) - np.testing.assert_array_equal(kkk.ntri, 6*true_ntri) - np.testing.assert_allclose(kkk.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) + kkk.process(cat,cat,cat) + np.testing.assert_array_equal(kkk.ntri, true_ntri) + np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - # But with ordered=True, it only counts each triangle once. - kkk.process(cat,cat,cat, ordered=True, num_threads=2) + kkk.process(cat,cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) + # With ordered=False, we get 6x as many triangles, since each triangle is discovered 6 times. + kkk.process(cat, cat, cat, ordered=False) + np.testing.assert_array_equal(kkk.ntri, 6*true_ntri) + np.testing.assert_allclose(kkk.weight, 6*true_weight, rtol=1.e-5, atol=1.e-8) + np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) + # Or with 2 argument version, finds each triangle 3 times. - kkk.process(cat,cat) + kkk.process(cat,cat, ordered=False) np.testing.assert_array_equal(kkk.ntri, 3*true_ntri) np.testing.assert_allclose(kkk.weight, 3*true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - kkk.process(cat,cat, ordered=True) - np.testing.assert_array_equal(kkk.ntri, true_ntri) - np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) - np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, @@ -1303,12 +1315,12 @@ def test_direct_logsas(): np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - kkk.process(cat,cat,cat, ordered=True) + kkk.process(cat,cat,cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) - kkk.process(cat,cat, ordered=True) + kkk.process(cat,cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8) @@ -1712,15 +1724,11 @@ def test_direct_logsas_cross(): z_list = [true_zeta_123, true_zeta_132, true_zeta_213, true_zeta_231, true_zeta_312, true_zeta_321] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_zeta_sum = sum(z_list) pos = true_weight_sum > 0 true_zeta_sum[pos] /= true_weight_sum[pos] - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) - np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-4, atol=1.e-6) # Now normalize each one individually. for w,z in zip(w_list, z_list): @@ -1728,55 +1736,66 @@ def test_direct_logsas_cross(): z[pos] /= w[pos] # With ordered=True we get just the ones in the given order. - kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-4, atol=1.e-6) - kkk.process(cat1, cat3, cat2, ordered=True) + kkk.process(cat1, cat3, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_132) np.testing.assert_allclose(kkk.weight, true_weight_132, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_132, rtol=1.e-4, atol=1.e-6) - kkk.process(cat2, cat1, cat3, ordered=True) + kkk.process(cat2, cat1, cat3) np.testing.assert_array_equal(kkk.ntri, true_ntri_213) np.testing.assert_allclose(kkk.weight, true_weight_213, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_213, rtol=1.e-4, atol=1.e-6) - kkk.process(cat2, cat3, cat1, ordered=True) + kkk.process(cat2, cat3, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_231) np.testing.assert_allclose(kkk.weight, true_weight_231, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_231, rtol=1.e-4, atol=1.e-6) - kkk.process(cat3, cat1, cat2, ordered=True) + kkk.process(cat3, cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_312) np.testing.assert_allclose(kkk.weight, true_weight_312, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_312, rtol=1.e-4, atol=1.e-6) - kkk.process(cat3, cat2, cat1, ordered=True) + kkk.process(cat3, cat2, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_321) np.testing.assert_allclose(kkk.weight, true_weight_321, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_321, rtol=1.e-4, atol=1.e-6) + # With ordered=False, we end up with the sum of all permutations. + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-4, atol=1.e-6) + # Repeat with binslop = 0 kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, nphi_bins=nphi_bins, bin_slop=0, bin_type='LogSAS') - kkk.process(cat1, cat2, cat3) - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-4, atol=1.e-6) + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-4, atol=1.e-6) + # And again with no top-level recursion kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, nphi_bins=nphi_bins, bin_slop=0, max_top=0, bin_type='LogSAS') - kkk.process(cat1, cat2, cat3) - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) np.testing.assert_allclose(kkk.weight, true_weight_123, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_123, rtol=1.e-4, atol=1.e-6) + kkk.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-4, atol=1.e-6) + # Error to have cat3, but not cat2 with assert_raises(ValueError): kkk.process(cat1, cat3=cat3) @@ -1896,9 +1915,6 @@ def test_direct_logsas_cross12(): true_zeta_sum = sum(z_list) pos = true_weight_sum > 0 true_zeta_sum[pos] /= true_weight_sum[pos] - np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) - np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) - np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-4, atol=1.e-6) # Now normalize each one individually. for w,z in zip(w_list, z_list): @@ -1906,41 +1922,45 @@ def test_direct_logsas_cross12(): z[pos] /= w[pos] # With ordered=True we get just the ones in the given order. - kkk.process(cat1, cat2, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_122) np.testing.assert_allclose(kkk.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_122, rtol=1.e-4, atol=1.e-6) - kkk.process(cat2, cat1, cat2, ordered=True) + kkk.process(cat2, cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_212) np.testing.assert_allclose(kkk.weight, true_weight_212, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_212, rtol=1.e-4, atol=1.e-6) - kkk.process(cat2, cat2, cat1, ordered=True) + kkk.process(cat2, cat2, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_221) np.testing.assert_allclose(kkk.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_221, rtol=1.e-4, atol=1.e-6) - # Split into patches to test the list-based version of the code. - cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, k=k1, npatch=4) - cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, k=k2, npatch=4) - - kkk.process(cat1, cat2, num_threads=2) + kkk.process(cat1, cat2, ordered=False) np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-4, atol=1.e-6) - kkk.process(cat1, cat2, ordered=True) + # Split into patches to test the list-based version of the code. + cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, k=k1, npatch=4) + cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, k=k2, npatch=4) + + kkk.process(cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_122) np.testing.assert_allclose(kkk.weight, true_weight_122, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_122, rtol=1.e-4, atol=1.e-6) - kkk.process(cat2, cat1, cat2, ordered=True) + kkk.process(cat2, cat1, cat2) np.testing.assert_array_equal(kkk.ntri, true_ntri_212) np.testing.assert_allclose(kkk.weight, true_weight_212, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_212, rtol=1.e-4, atol=1.e-6) - kkk.process(cat2, cat2, cat1, ordered=True) + kkk.process(cat2, cat2, cat1) np.testing.assert_array_equal(kkk.ntri, true_ntri_221) np.testing.assert_allclose(kkk.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_221, rtol=1.e-4, atol=1.e-6) + kkk.process(cat1, cat2, ordered=False) + np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) + np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) + np.testing.assert_allclose(kkk.zeta, true_zeta_sum, rtol=1.e-4, atol=1.e-6) + @timer def test_kkk_logsas(): @@ -1996,7 +2016,7 @@ def test_kkk_logsas(): min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, sep_units='arcmin', phi_units='deg', bin_type='LogSAS') t0 = time.time() - kkkc.process(cat,cat,cat, ordered=True) + kkkc.process(cat,cat,cat) t1 = time.time() print('cross process time = ',t1-t0) print(kkk.zeta) @@ -2009,7 +2029,7 @@ def test_kkk_logsas(): np.testing.assert_allclose(kkkc.zeta, kkk.zeta, rtol=1.e-3) t0 = time.time() - kkkc.process(cat,cat, ordered=True) + kkkc.process(cat,cat) t1 = time.time() print('cross12 process time = ',t1-t0) np.testing.assert_allclose(kkkc.ntri, kkk.ntri, rtol=1.e-3) diff --git a/tests/test_nnn.py b/tests/test_nnn.py index 9412daa5..6311d55b 100644 --- a/tests/test_nnn.py +++ b/tests/test_nnn.py @@ -963,8 +963,8 @@ def test_direct_logruv_auto(): min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=0) - drr.process(cat, rcat) - rdd.process(rcat, cat) + drr.process(cat, rcat, ordered=False) + rdd.process(rcat, cat, ordered=False) zeta, varzeta = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) config['nnn_statistic'] = 'compensated' @@ -1000,23 +1000,25 @@ def test_direct_logruv_auto(): ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) - # And compare to the cross correlation - # Here, we get 6x as much, since each triangle is discovered 6 times. - ddd.clear() + # Compare to the cross correlation + ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, + min_u=min_u, max_u=max_u, nubins=nubins, + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_slop=0, verbose=1) ddd.process(cat,cat,cat, num_threads=2) - np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) + np.testing.assert_array_equal(ddd.ntri, true_ntri) - # But with ordered=True, it only counts each triangle once. - ddd.process(cat,cat,cat, ordered=True, num_threads=2) + ddd.process(cat,cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) + # With ordered=False, we get 6x as much, since each triangle is discovered 6 times. + ddd.process(cat,cat,cat, ordered=False, num_threads=2) + np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) + # Or with 2 argument version, finds each triangle 3 times. - ddd.process(cat,cat) + ddd.process(cat,cat, ordered=False) np.testing.assert_array_equal(ddd.ntri, 3*true_ntri) - ddd.process(cat,cat, ordered=True) - np.testing.assert_array_equal(ddd.ntri, true_ntri) - do_pickle(ddd) # Split into patches to test the list-based version of the code. @@ -1227,7 +1229,7 @@ def test_direct_logruv_cross(): t0 = time.time() ddd.process(cat1, cat2, cat3) t1 = time.time() - print('brute unordered: ',t1-t0) + print('brute ordered 123: ',t1-t0) log_min_sep = np.log(min_sep) log_max_sep = np.log(max_sep) @@ -1295,92 +1297,92 @@ def test_direct_logruv_cross(): assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 - # With the default ordered=False, we end up with the sum of all permutations. - true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ - true_ntri_312 + true_ntri_321 - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - - # With ordered=True we get just the ones in the given order. - t0 = time.time() - ddd.process(cat1, cat2, cat3, ordered=True) - t1 = time.time() - print('brute ordered 123: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) t0 = time.time() - ddd.process(cat1, cat3, cat2, ordered=True) + ddd.process(cat1, cat3, cat2) t1 = time.time() print('brute ordered 132: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_132) t0 = time.time() - ddd.process(cat2, cat1, cat3, ordered=True) + ddd.process(cat2, cat1, cat3) t1 = time.time() print('brute ordered 213: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_213) t0 = time.time() - ddd.process(cat2, cat3, cat1, ordered=True) + ddd.process(cat2, cat3, cat1) t1 = time.time() print('brute ordered 231: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_231) t0 = time.time() - ddd.process(cat3, cat1, cat2, ordered=True) + ddd.process(cat3, cat1, cat2) t1 = time.time() print('brute ordered 312: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_312) t0 = time.time() - ddd.process(cat3, cat2, cat1, ordered=True) + ddd.process(cat3, cat2, cat1) t1 = time.time() print('brute ordered 321: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_321) + # With ordered=False, we end up with the sum of all permutations. + t0 = time.time() + ddd.process(cat1, cat2, cat3, ordered=False) + t1 = time.time() + print('brute unordered: ',t1-t0) + true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ + true_ntri_312 + true_ntri_321 + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # Repeat with binslop = 0 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) - t0 = time.time() - ddd.process(cat1, cat2, cat3) - t1 = time.time() - print('bin_slop=0 unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) t0 = time.time() - ddd.process(cat1, cat2, cat3, ordered=True) + ddd.process(cat1, cat2, cat3) t1 = time.time() print('bin_slop=0 ordered 123: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) t0 = time.time() - ddd.process(cat1, cat3, cat2, ordered=True) + ddd.process(cat1, cat3, cat2) t1 = time.time() print('bin_slop=0 ordered 132: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_132) t0 = time.time() - ddd.process(cat2, cat1, cat3, ordered=True) + ddd.process(cat2, cat1, cat3) t1 = time.time() print('bin_slop=0 ordered 213: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_213) t0 = time.time() - ddd.process(cat2, cat3, cat1, ordered=True) + ddd.process(cat2, cat3, cat1) t1 = time.time() print('bin_slop=0 ordered 231: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_231) t0 = time.time() - ddd.process(cat3, cat1, cat2, ordered=True) + ddd.process(cat3, cat1, cat2) t1 = time.time() print('bin_slop=0 ordered 312: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_312) t0 = time.time() - ddd.process(cat3, cat2, cat1, ordered=True) + ddd.process(cat3, cat2, cat1) t1 = time.time() print('bin_slop=0 ordered 321: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_321) + t0 = time.time() + ddd.process(cat1, cat2, cat3, ordered=False) + t1 = time.time() + print('bin_slop=0 unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) t0 = time.time() - ddd.process(cat1, cat2, cat3, ordered=True) + ddd.process(cat1, cat2, cat3) t1 = time.time() print('no top bin_slop=0, ordered 123 ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) @@ -1489,27 +1491,26 @@ def test_direct_logruv_cross12(): assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 - # With the default ordered=False, we end up with the sum of all permutations. - true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221 - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - - # With ordered=True we get just the ones in the given order. - t0 = time.time() - ddd.process(cat1, cat2, ordered=True) - t1 = time.time() - print('brute ordered: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_122) t0 = time.time() - ddd.process(cat2, cat1, cat2, ordered=True) + ddd.process(cat2, cat1, cat2) t1 = time.time() print('brute ordered 212: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_212) t0 = time.time() - ddd.process(cat2, cat2, cat1, ordered=True) + ddd.process(cat2, cat2, cat1) t1 = time.time() print('brute ordered 221: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_221) + # With ordered=False, we end up with the sum of all permutations. + t0 = time.time() + ddd.process(cat1, cat2, ordered=False) + t1 = time.time() + print('brute unordered: ',t1-t0) + true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221 + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # Repeat with binslop = 0 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, @@ -1518,24 +1519,25 @@ def test_direct_logruv_cross12(): t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() - print('bin_slop=0 unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - t0 = time.time() - ddd.process(cat1, cat2, ordered=True) - t1 = time.time() print('bin_slop=0 ordered: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_122) t0 = time.time() - ddd.process(cat2, cat1, cat2, ordered=True) + ddd.process(cat2, cat1, cat2) t1 = time.time() print('bin_slop=0 ordered 212: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_212) t0 = time.time() - ddd.process(cat2, cat2, cat1, ordered=True) + ddd.process(cat2, cat2, cat1) t1 = time.time() print('bin_slop=0 ordered 221: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_221) + t0 = time.time() + ddd.process(cat1, cat2, ordered=False) + t1 = time.time() + print('bin_slop=0 unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, @@ -1544,8 +1546,8 @@ def test_direct_logruv_cross12(): t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() - print('no top bin_slop=0 unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + print('no top bin_slop=0: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_122) # Split into patches to test the list-based version of the code. cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10) @@ -1558,13 +1560,13 @@ def test_direct_logruv_cross12(): t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() - print('patch unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - t0 = time.time() - ddd.process(cat1, cat2, ordered=True) - t1 = time.time() print('patch ordered 122: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_122) + t0 = time.time() + ddd.process(cat1, cat2, ordered=False) + t1 = time.time() + print('patch unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) @timer @@ -1877,13 +1879,13 @@ def test_direct_logruv_partial(): assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 + np.testing.assert_array_equal(ddda.ntri, true_ntri_123) + true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ true_ntri_312 + true_ntri_321 + ddda.process(cat1a, cat2a, cat3a, ordered=False) np.testing.assert_array_equal(ddda.ntri, true_ntri_sum) - ddda.process(cat1a, cat2a, cat3a, ordered=True) - np.testing.assert_array_equal(ddda.ntri, true_ntri_123) - # Now check that we get the same thing with all the points, but with w=0 for the ones # we don't want. w1 = np.zeros(ngal) @@ -1900,11 +1902,11 @@ def test_direct_logruv_partial(): min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True) dddb.process(cat1b, cat2b, cat3b) - np.testing.assert_array_equal(dddb.ntri, true_ntri_sum) - - dddb.process(cat1b, cat2b, cat3b, ordered=True) np.testing.assert_array_equal(dddb.ntri, true_ntri_123) + dddb.process(cat1b, cat2b, cat3b, ordered=False) + np.testing.assert_array_equal(dddb.ntri, true_ntri_sum) + @timer def test_direct_logruv_3d_auto(): @@ -2015,11 +2017,11 @@ def test_direct_logruv_3d_auto(): # And compare to the cross correlation # With ordered=False, we get 6x as much, since each triangle is discovered 6 times. ddd.process(cat,cat,cat) - np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) - - ddd.process(cat,cat,cat, ordered=True) np.testing.assert_array_equal(ddd.ntri, true_ntri) + ddd.process(cat,cat,cat, ordered=False) + np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) + # Also compare to using x,y,z rather than ra,dec,r cat = treecorr.Catalog(x=x, y=y, z=z) ddd.process(cat) @@ -2141,41 +2143,47 @@ def test_direct_logruv_3d_cross(): assert 0 <= kv < 2*nvbins true_ntri[kr,ku,kv] += 1 - # With the default ordered=False, we end up with the sum of all permutations. + np.testing.assert_array_equal(ddd.ntri, true_ntri_123) + + # With ordered=False, we end up with the sum of all permutations. + ddd.process(cat1, cat2, cat3, ordered=False) true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ true_ntri_312 + true_ntri_321 np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - ddd.process(cat1, cat2, cat3, ordered=True) - np.testing.assert_array_equal(ddd.ntri, true_ntri_123) - ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1) ddd.process(cat1, cat2, cat3) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - ddd.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) + ddd.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, bin_slop=0, verbose=1, max_top=0) - ddd.process(cat1, cat2, cat3) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) ddd.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) + ddd.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Also compare to using x,y,z rather than ra,dec,r cat1 = treecorr.Catalog(x=x1, y=y1, z=z1) cat2 = treecorr.Catalog(x=x2, y=y2, z=z2) cat3 = treecorr.Catalog(x=x3, y=y3, z=z3) - ddd.process(cat1, cat2, cat3) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + + ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, + min_u=min_u, max_u=max_u, nubins=nubins, + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_slop=0, verbose=1) + ddd.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) + ddd.process(cat1, cat2, cat3, ordered=False) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) @timer @@ -2487,8 +2495,8 @@ def test_nnn_logruv(): drr = ddd.copy() rdd = ddd.copy() - drr.process(cat,rand) - rdd.process(rand,cat) + drr.process(cat,rand, ordered=False) + rdd.process(rand,cat, ordered=False) zeta, varzeta = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) print('compensated zeta = ',zeta) @@ -2925,21 +2933,21 @@ def test_direct_logsas_auto(): np.testing.assert_array_equal(ddd.ntri, true_ntri) # And compare to the cross correlation - # As before, this will count each triangle 6 times. ddd.process(cat,cat,cat) + np.testing.assert_array_equal(ddd.ntri, true_ntri) + + # As before, this will count each triangle 6 times. + ddd.process(cat,cat,cat, ordered=False) np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) - # But with ordered=True, it only counts each triangle once. - ddd.process(cat,cat,cat, ordered=True) + # Or with 2 argument version + ddd.process(cat,cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) - # Or with 2 argument version, finds each triangle 3 times. - ddd.process(cat,cat) + # Now ordered=False finds each triangle 3 times. + ddd.process(cat,cat, ordered=False) np.testing.assert_array_equal(ddd.ntri, 3*true_ntri) - ddd.process(cat,cat, ordered=True) - np.testing.assert_array_equal(ddd.ntri, true_ntri) - do_pickle(ddd) # Split into patches to test the list-based version of the code. @@ -3114,72 +3122,73 @@ def test_direct_logsas_cross(): t1 = time.time() print('Python brute: ',t1-t0) - # With the default ordered=False, we end up with the sum of all permutations. - true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ - true_ntri_312 + true_ntri_321 - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - - # With ordered=True we get just the ones in the given order. - t0 = time.time() - ddd.process(cat1, cat2, cat3, ordered=True) - t1 = time.time() - print('brute ordered 123: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) + t0 = time.time() - ddd.process(cat1, cat3, cat2, ordered=True) + ddd.process(cat1, cat3, cat2) t1 = time.time() print('brute ordered 132: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_132) t0 = time.time() - ddd.process(cat2, cat1, cat3, ordered=True) + ddd.process(cat2, cat1, cat3) t1 = time.time() print('brute ordered 213: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_213) t0 = time.time() - ddd.process(cat2, cat3, cat1, ordered=True) + ddd.process(cat2, cat3, cat1) t1 = time.time() print('brute ordered 231: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_231) t0 = time.time() - ddd.process(cat3, cat1, cat2, ordered=True) + ddd.process(cat3, cat1, cat2) t1 = time.time() print('brute ordered 312: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_312) t0 = time.time() - ddd.process(cat3, cat2, cat1, ordered=True) + ddd.process(cat3, cat2, cat1) t1 = time.time() print('brute ordered 321: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_321) + # With ordered=False, we end up with the sum of all permutations. + t0 = time.time() + ddd.process(cat1, cat2, cat3, ordered=False) + t1 = time.time() + print('brute unordered: ',t1-t0) + true_ntri_sum = true_ntri_123 + true_ntri_132 + true_ntri_213 + true_ntri_231 +\ + true_ntri_312 + true_ntri_321 + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # Repeat with binslop = 0 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, bin_slop=0, verbose=1, bin_type='LogSAS') t0 = time.time() - ddd.process(cat1, cat2, cat3) - t1 = time.time() - print('bin_slop=0 unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - t0 = time.time() ddd.process(cat1, cat2, cat3, ordered=True) t1 = time.time() print('bin_slop=0 ordered: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) + t0 = time.time() + ddd.process(cat1, cat2, cat3, ordered=False) + t1 = time.time() + print('bin_slop=0 unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, bin_slop=0, verbose=1, max_top=0, bin_type='LogSAS') t0 = time.time() - ddd.process(cat1, cat2, cat3) - t1 = time.time() - print('no top unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - t0 = time.time() ddd.process(cat1, cat2, cat3, ordered=True) t1 = time.time() print('no top ordered: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) + t0 = time.time() + ddd.process(cat1, cat2, cat3, ordered=False) + t1 = time.time() + print('no top unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Split into patches to test the list-based version of the code. cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10) @@ -3193,38 +3202,38 @@ def test_direct_logsas_cross(): t0 = time.time() ddd.process(cat1, cat2, cat3) t1 = time.time() - print('patch unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - t0 = time.time() - ddd.process(cat1, cat2, cat3, ordered=True) - t1 = time.time() print('patch ordered 123: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) t0 = time.time() - ddd.process(cat1, cat3, cat2, ordered=True) + ddd.process(cat1, cat3, cat2) t1 = time.time() print('patch ordered 132: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_132) t0 = time.time() - ddd.process(cat2, cat1, cat3, ordered=True) + ddd.process(cat2, cat1, cat3) t1 = time.time() print('patch ordered 213: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_213) t0 = time.time() - ddd.process(cat2, cat3, cat1, ordered=True) + ddd.process(cat2, cat3, cat1) t1 = time.time() print('patch ordered 231: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_231) t0 = time.time() - ddd.process(cat3, cat1, cat2, ordered=True) + ddd.process(cat3, cat1, cat2) t1 = time.time() print('patch ordered 312: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_312) t0 = time.time() - ddd.process(cat3, cat2, cat1, ordered=True) + ddd.process(cat3, cat2, cat1) t1 = time.time() print('patch ordered 321: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_321) + t0 = time.time() + ddd.process(cat1, cat2, cat3, ordered=False) + t1 = time.time() + print('patch unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) @timer @@ -3320,27 +3329,27 @@ def test_direct_logsas_cross12(): t1 = time.time() print('Python brute: ',t1-t0) - # With the default ordered=False, we end up with the sum of all permutations. - true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221 - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - - # With ordered=True we get just the ones in the given order. - t0 = time.time() - ddd.process(cat1, cat2, ordered=True) - t1 = time.time() - print('brute ordered: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_122) + t0 = time.time() - ddd.process(cat2, cat1, cat2, ordered=True) + ddd.process(cat2, cat1, cat2) t1 = time.time() print('brute ordered 212: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_212) t0 = time.time() - ddd.process(cat2, cat2, cat1, ordered=True) + ddd.process(cat2, cat2, cat1) t1 = time.time() print('brute ordered 221: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_221) + # With ordered=False, we end up with the sum of all permutations. + t0 = time.time() + ddd.process(cat1, cat2, ordered=False) + t1 = time.time() + print('brute ordered: ',t1-t0) + true_ntri_sum = true_ntri_122 + true_ntri_212 + true_ntri_221 + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # Repeat with binslop = 0 ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, @@ -3348,34 +3357,30 @@ def test_direct_logsas_cross12(): t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() - print('bin_slop=0 unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - t0 = time.time() - ddd.process(cat1, cat2, ordered=True) - t1 = time.time() print('bin_slop=0 ordered: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_122) t0 = time.time() - ddd.process(cat2, cat1, cat2, ordered=True) + ddd.process(cat2, cat1, cat2) t1 = time.time() print('bin_slop=0 ordered 212: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_212) t0 = time.time() - ddd.process(cat2, cat2, cat1, ordered=True) + ddd.process(cat2, cat2, cat1) t1 = time.time() print('bin_slop=0 ordered 221: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_221) + t0 = time.time() + ddd.process(cat1, cat2, ordered=False) + t1 = time.time() + print('bin_slop=0 unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) + # And again with no top-level recursion ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, bin_slop=0, verbose=1, max_top=0, bin_type='LogSAS') t0 = time.time() - ddd.process(cat1, cat2) - t1 = time.time() - print('no top unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) - t0 = time.time() ddd.process(cat1, cat2, ordered=True) t1 = time.time() print('no top ordered: ',t1-t0) @@ -3390,16 +3395,16 @@ def test_direct_logsas_cross12(): t1 = time.time() print('no top ordered 221: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_221) + t0 = time.time() + ddd.process(cat1, cat2, ordered=False) + t1 = time.time() + print('no top unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) # Split into patches to test the list-based version of the code. cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10) cat2 = treecorr.Catalog(x=x2, y=y2, npatch=10) - t0 = time.time() - ddd.process(cat1, cat2) - t1 = time.time() - print('patch unordered: ',t1-t0) - np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) t0 = time.time() ddd.process(cat1, cat2, ordered=True) t1 = time.time() @@ -3415,6 +3420,11 @@ def test_direct_logsas_cross12(): t1 = time.time() print('patch ordered 221: ',t1-t0) np.testing.assert_array_equal(ddd.ntri, true_ntri_221) + t0 = time.time() + ddd.process(cat1, cat2, ordered=False) + t1 = time.time() + print('patch unordered: ',t1-t0) + np.testing.assert_array_equal(ddd.ntri, true_ntri_sum) @timer def test_nnn_logsas(): @@ -3478,7 +3488,7 @@ def test_nnn_logsas(): min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, sep_units='arcmin', verbose=1, bin_type='LogSAS') t0 = time.time() - dddc.process(cat,cat,cat, ordered=True) + dddc.process(cat,cat,cat) t1 = time.time() print('cross process time = ',t1-t0) np.testing.assert_allclose(dddc.ntri, ddd.ntri) @@ -3488,7 +3498,7 @@ def test_nnn_logsas(): np.testing.assert_allclose(dddc.meanphi, ddd.meanphi) t0 = time.time() - dddc.process(cat,cat, ordered=True) + dddc.process(cat,cat) t1 = time.time() print('cross12 process time = ',t1-t0) np.testing.assert_allclose(dddc.ntri, ddd.ntri) @@ -3715,8 +3725,8 @@ def test_nnn_logsas(): drr = ddd.copy() rdd = ddd.copy() - drr.process(cat,rand) - rdd.process(rand,cat) + drr.process(cat,rand, ordered=False) + rdd.process(rand,cat, ordered=False) zeta, varzeta = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) print('compensated zeta = ',zeta) @@ -3805,8 +3815,6 @@ def test_nnn_logsas(): header = fitsio.read_header(out_file_name3, 1) np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.) - - if __name__ == '__main__': test_logruv_binning() test_logsas_binning() diff --git a/tests/test_patch3pt.py b/tests/test_patch3pt.py index 9a6e0e2e..68273d38 100644 --- a/tests/test_patch3pt.py +++ b/tests/test_patch3pt.py @@ -1649,13 +1649,13 @@ def test_finalize_false(): kkk2.process(cat1, initialize=True, finalize=False) kkk2.process(cat2, initialize=False, finalize=False) kkk2.process(cat3, initialize=False, finalize=False) - kkk2.process(cat1, cat2, initialize=False, finalize=False) - kkk2.process(cat1, cat3, initialize=False, finalize=False) - kkk2.process(cat2, cat1, initialize=False, finalize=False) - kkk2.process(cat2, cat3, initialize=False, finalize=False) - kkk2.process(cat3, cat1, initialize=False, finalize=False) - kkk2.process(cat3, cat2, initialize=False, finalize=False) - kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True) + kkk2.process(cat1, cat2, ordered=False, initialize=False, finalize=False) + kkk2.process(cat1, cat3, ordered=False, initialize=False, finalize=False) + kkk2.process(cat2, cat1, ordered=False, initialize=False, finalize=False) + kkk2.process(cat2, cat3, ordered=False, initialize=False, finalize=False) + kkk2.process(cat3, cat1, ordered=False, initialize=False, finalize=False) + kkk2.process(cat3, cat2, ordered=False, initialize=False, finalize=False) + kkk2.process(cat1, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(kkk1.ntri, kkk2.ntri) np.testing.assert_allclose(kkk1.weight, kkk2.weight) @@ -1673,10 +1673,10 @@ def test_finalize_false(): patch_centers=cat.patch_centers) np.testing.assert_array_equal(cat23.patch, cat.patch[nsource:3*nsource]) - kkk1.process(cat1, cat23) - kkk2.process(cat1, cat2, initialize=True, finalize=False) - kkk2.process(cat1, cat3, initialize=False, finalize=False) - kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True) + kkk1.process(cat1, cat23, ordered=False) + kkk2.process(cat1, cat2, ordered=False, initialize=True, finalize=False) + kkk2.process(cat1, cat3, ordered=False, initialize=False, finalize=False) + kkk2.process(cat1, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(kkk1.ntri, kkk2.ntri) np.testing.assert_allclose(kkk1.weight, kkk2.weight) @@ -1737,13 +1737,13 @@ def test_finalize_false(): ggg2.process(cat1, initialize=True, finalize=False) ggg2.process(cat2, initialize=False, finalize=False) ggg2.process(cat3, initialize=False, finalize=False) - ggg2.process(cat1, cat2, initialize=False, finalize=False) - ggg2.process(cat1, cat3, initialize=False, finalize=False) - ggg2.process(cat2, cat1, initialize=False, finalize=False) - ggg2.process(cat2, cat3, initialize=False, finalize=False) - ggg2.process(cat3, cat1, initialize=False, finalize=False) - ggg2.process(cat3, cat2, initialize=False, finalize=False) - ggg2.process(cat1, cat2, cat3, initialize=False, finalize=True) + ggg2.process(cat1, cat2, ordered=False, initialize=False, finalize=False) + ggg2.process(cat1, cat3, ordered=False, initialize=False, finalize=False) + ggg2.process(cat2, cat1, ordered=False, initialize=False, finalize=False) + ggg2.process(cat2, cat3, ordered=False, initialize=False, finalize=False) + ggg2.process(cat3, cat1, ordered=False, initialize=False, finalize=False) + ggg2.process(cat3, cat2, ordered=False, initialize=False, finalize=False) + ggg2.process(cat1, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(ggg1.ntri, ggg2.ntri) np.testing.assert_allclose(ggg1.weight, ggg2.weight) @@ -1756,10 +1756,10 @@ def test_finalize_false(): np.testing.assert_allclose(ggg1.gam3, ggg2.gam3) # GGG cross12 - ggg1.process(cat1, cat23) - ggg2.process(cat1, cat2, initialize=True, finalize=False) - ggg2.process(cat1, cat3, initialize=False, finalize=False) - ggg2.process(cat1, cat2, cat3, initialize=False, finalize=True) + ggg1.process(cat1, cat23, ordered=False) + ggg2.process(cat1, cat2, ordered=False, initialize=True, finalize=False) + ggg2.process(cat1, cat3, ordered=False, initialize=False, finalize=False) + ggg2.process(cat1, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(ggg1.ntri, ggg2.ntri) np.testing.assert_allclose(ggg1.weight, ggg2.weight) @@ -1789,10 +1789,10 @@ def test_finalize_false(): np.testing.assert_allclose(ggg1.gam3, ggg2.gam3) # GGG cross - ggg1.process(cat, cat2, cat3) - ggg2.process(cat1, cat2, cat3, initialize=True, finalize=False) - ggg2.process(cat2, cat2, cat3, initialize=False, finalize=False) - ggg2.process(cat3, cat2, cat3, initialize=False, finalize=True) + ggg1.process(cat, cat2, cat3, ordered=False) + ggg2.process(cat1, cat2, cat3, ordered=False, initialize=True, finalize=False) + ggg2.process(cat2, cat2, cat3, ordered=False, initialize=False, finalize=False) + ggg2.process(cat3, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(ggg1.ntri, ggg2.ntri) np.testing.assert_allclose(ggg1.weight, ggg2.weight) @@ -1832,13 +1832,13 @@ def test_finalize_false(): nnn2.process(cat1, initialize=True, finalize=False) nnn2.process(cat2, initialize=False, finalize=False) nnn2.process(cat3, initialize=False, finalize=False) - nnn2.process(cat1, cat2, initialize=False, finalize=False) - nnn2.process(cat1, cat3, initialize=False, finalize=False) - nnn2.process(cat2, cat1, initialize=False, finalize=False) - nnn2.process(cat2, cat3, initialize=False, finalize=False) - nnn2.process(cat3, cat1, initialize=False, finalize=False) - nnn2.process(cat3, cat2, initialize=False, finalize=False) - nnn2.process(cat1, cat2, cat3, initialize=False, finalize=True) + nnn2.process(cat1, cat2, ordered=False, initialize=False, finalize=False) + nnn2.process(cat1, cat3, ordered=False, initialize=False, finalize=False) + nnn2.process(cat2, cat1, ordered=False, initialize=False, finalize=False) + nnn2.process(cat2, cat3, ordered=False, initialize=False, finalize=False) + nnn2.process(cat3, cat1, ordered=False, initialize=False, finalize=False) + nnn2.process(cat3, cat2, ordered=False, initialize=False, finalize=False) + nnn2.process(cat1, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(nnn1.ntri, nnn2.ntri) np.testing.assert_allclose(nnn1.weight, nnn2.weight) @@ -1847,10 +1847,10 @@ def test_finalize_false(): np.testing.assert_allclose(nnn1.meand3, nnn2.meand3) # NNN cross12 - nnn1.process(cat1, cat23) - nnn2.process(cat1, cat2, initialize=True, finalize=False) - nnn2.process(cat1, cat3, initialize=False, finalize=False) - nnn2.process(cat1, cat2, cat3, initialize=False, finalize=True) + nnn1.process(cat1, cat23, ordered=False) + nnn2.process(cat1, cat2, ordered=False, initialize=True, finalize=False) + nnn2.process(cat1, cat3, ordered=False, initialize=False, finalize=False) + nnn2.process(cat1, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(nnn1.ntri, nnn2.ntri) np.testing.assert_allclose(nnn1.weight, nnn2.weight) @@ -1872,10 +1872,10 @@ def test_finalize_false(): np.testing.assert_allclose(nnn1.meand3, nnn2.meand3) # NNN cross - nnn1.process(cat, cat2, cat3) - nnn2.process(cat1, cat2, cat3, initialize=True, finalize=False) - nnn2.process(cat2, cat2, cat3, initialize=False, finalize=False) - nnn2.process(cat3, cat2, cat3, initialize=False, finalize=True) + nnn1.process(cat, cat2, cat3, ordered=False) + nnn2.process(cat1, cat2, cat3, ordered=False, initialize=True, finalize=False) + nnn2.process(cat2, cat2, cat3, ordered=False, initialize=False, finalize=False) + nnn2.process(cat3, cat2, cat3, ordered=False, initialize=False, finalize=True) np.testing.assert_allclose(nnn1.ntri, nnn2.ntri) np.testing.assert_allclose(nnn1.weight, nnn2.weight) diff --git a/treecorr/exec_corr3.py b/treecorr/exec_corr3.py index 29e45f9f..1a8ce000 100644 --- a/treecorr/exec_corr3.py +++ b/treecorr/exec_corr3.py @@ -148,11 +148,11 @@ def corr3(config, logger=None): if rrr is not None and config['nnn_statistic'] == 'compensated': logger.warning("Performing DRR calculations...") drr = NNNCorrelation(config, logger=logger) - drr.process(cat1,rand1) + drr.process(cat1,rand1, ordered=False) logger.info("Done DRR calculations.") logger.warning("Performing DDR calculations...") rdd = NNNCorrelation(config, logger=logger) - rdd.process(rand1,cat1) + rdd.process(rand1,cat1, ordered=False) logger.info("Done DDR calculations.") ddd.write(config['nnn_file_name'], rrr=rrr, drr=drr, rdd=rdd) logger.warning("Wrote NNN correlation to %s",config['nnn_file_name']) diff --git a/treecorr/gggcorrelation.py b/treecorr/gggcorrelation.py index 57a32946..b7d6418d 100644 --- a/treecorr/gggcorrelation.py +++ b/treecorr/gggcorrelation.py @@ -291,13 +291,13 @@ def process_auto(self, cat, *, metric=None, num_threads=None): self.corr.processAuto(field.data, self.output_dots, self._bintype, self._metric) - def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads=None): + def process_cross12(self, cat1, cat2, *, metric=None, ordered=True, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. This accumulates the cross-correlation for the given catalogs as part of a larger - auto-correlation calculation. E.g. when splitting up a large catalog into patches, - this is appropriate to use for the cross correlation between different patches + auto- or cross-correlation calculation. E.g. when splitting up a large catalog into + patches, this is appropriate to use for the cross correlation between different patches as part of the complete auto-correlation of the full catalog. Parameters: @@ -309,7 +309,7 @@ def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (default: False) + catalogs. (default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) @@ -341,12 +341,12 @@ def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads self.corr.processCross12(f1.data, f2.data, (1 if ordered else 0), self.output_dots, self._bintype, self._metric) - def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=False, num_threads=None): + def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=True, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs as part of a larger - auto-correlation calculation. E.g. when splitting up a large catalog into patches, - this is appropriate to use for the cross correlation between different patches + auto- or cross-correlation calculation. E.g. when splitting up a large catalog into + patches, this is appropriate to use for the cross correlation between different patches as part of the complete auto-correlation of the full catalog. Parameters: @@ -357,7 +357,7 @@ def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=False, num_thr (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (default: False) + catalogs. (default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) @@ -557,7 +557,7 @@ def _sum(self, others): np.sum([c.weight for c in others], axis=0, out=self.weight) np.sum([c.ntri for c in others], axis=0, out=self.ntri) - def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, num_threads=None, + def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=True, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the 3pt correlation function. @@ -566,11 +566,15 @@ def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, num first catalog taking one corner of the triangles, and the second taking two corners. - If 3 arguments are given, then compute a three-way cross-correlation function. - For cross correlations, the default behavior is to allow the three triangle vertices - (P1, P2, P3) to come from any of the three (or two) catalogs. However, if you want to - keep track of the order of the catalogs, you can set ``ordered=True``, which will fix - P1 to come from ``cat1``, P2 from ``cat2`` and P3 from ``cat3``. The sides d1, d2, d3 - are taken to be opposite P1, P2, P3 respectively. + For cross correlations, the default behavior is to use cat1 for the first vertex (P1), + cat2 for the second vertex (P2), and cat3 for the third vertex (P3). If only two + catalogs are given, vertices P2 and P3 both come from cat2. The sides d1, d2, d3, + used to define the binning, are taken to be opposte P1, P2, P3 respectively. + + However, if you want to accumulate triangles where objects from each catalog can take + any position in the triangles, you can set ``ordered=False``. In this case, + triangles will be formed where P1, P2 and P3 can come any input catalog, so long as there + is one from cat1, one from cat2, and one from cat3 (or two from cat2 if cat3 is None). All arguments may be lists, in which case all items in the list are used for that element of the correlation. @@ -585,7 +589,7 @@ def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, num (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (see above; default: False) + catalogs. (see above; default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) diff --git a/treecorr/kkkcorrelation.py b/treecorr/kkkcorrelation.py index c6820503..f4fff86f 100644 --- a/treecorr/kkkcorrelation.py +++ b/treecorr/kkkcorrelation.py @@ -229,13 +229,13 @@ def process_auto(self, cat, *, metric=None, num_threads=None): self.corr.processAuto(field.data, self.output_dots, self._bintype, self._metric) - def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads=None): + def process_cross12(self, cat1, cat2, *, metric=None, ordered=True, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. This accumulates the cross-correlation for the given catalogs as part of a larger - auto-correlation calculation. E.g. when splitting up a large catalog into patches, - this is appropriate to use for the cross correlation between different patches + auto- or cross-correlation calculation. E.g. when splitting up a large catalog into + patches, this is appropriate to use for the cross correlation between different patches as part of the complete auto-correlation of the full catalog. Parameters: @@ -247,7 +247,7 @@ def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (default: False) + catalogs. (default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) @@ -279,12 +279,12 @@ def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads self.corr.processCross12(f1.data, f2.data, (1 if ordered else 0), self.output_dots, self._bintype, self._metric) - def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=False, num_threads=None): + def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=True, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs as part of a larger - auto-correlation calculation. E.g. when splitting up a large catalog into patches, - this is appropriate to use for the cross correlation between different patches + auto- or cross-correlation calculation. E.g. when splitting up a large catalog into + patches, this is appropriate to use for the cross correlation between different patches as part of the complete auto-correlation of the full catalog. Parameters: @@ -295,7 +295,7 @@ def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=False, num_thr (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (default: False) + catalogs. (default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) @@ -454,7 +454,7 @@ def _sum(self, others): np.sum([c.weight for c in others], axis=0, out=self.weight) np.sum([c.ntri for c in others], axis=0, out=self.ntri) - def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, num_threads=None, + def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=True, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the 3pt correlation function. @@ -463,11 +463,15 @@ def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, num first catalog taking one corner of the triangles, and the second taking two corners. - If 3 arguments are given, then compute a three-way cross-correlation function. - For cross correlations, the default behavior is to allow the three triangle vertices - (P1, P2, P3) to come from any of the three (or two) catalogs. However, if you want to - keep track of the order of the catalogs, you can set ``ordered=True``, which will fix - P1 to come from ``cat1``, P2 from ``cat2`` and P3 from ``cat3``. The sides d1, d2, d3 - are taken to be opposite P1, P2, P3 respectively. + For cross correlations, the default behavior is to use cat1 for the first vertex (P1), + cat2 for the second vertex (P2), and cat3 for the third vertex (P3). If only two + catalogs are given, vertices P2 and P3 both come from cat2. The sides d1, d2, d3, + used to define the binning, are taken to be opposte P1, P2, P3 respectively. + + However, if you want to accumulate triangles where objects from each catalog can take + any position in the triangles, you can set ``ordered=False``. In this case, + triangles will be formed where P1, P2 and P3 can come any input catalog, so long as there + is one from cat1, one from cat2, and one from cat3 (or two from cat2 if cat3 is None). All arguments may be lists, in which case all items in the list are used for that element of the correlation. @@ -482,7 +486,7 @@ def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, num (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (see above; default: False) + catalogs. (see above; default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) diff --git a/treecorr/nnncorrelation.py b/treecorr/nnncorrelation.py index ce4bad02..c797329b 100644 --- a/treecorr/nnncorrelation.py +++ b/treecorr/nnncorrelation.py @@ -265,13 +265,13 @@ def process_auto(self, cat, *, metric=None, num_threads=None): self._bintype, self._metric) self.tot += (1./6.) * cat.sumw**3 - def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads=None): + def process_cross12(self, cat1, cat2, *, metric=None, ordered=True, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. This accumulates the cross-correlation for the given catalogs as part of a larger - auto-correlation calculation. E.g. when splitting up a large catalog into patches, - this is appropriate to use for the cross correlation between different patches + auto- or cross-correlation calculation. E.g. when splitting up a large catalog into + patches, this is appropriate to use for the cross correlation between different patches as part of the complete auto-correlation of the full catalog. Parameters: @@ -283,7 +283,7 @@ def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (default: False) + catalogs. (default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) @@ -310,18 +310,16 @@ def process_cross12(self, cat1, cat2, *, metric=None, ordered=False, num_threads coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) - # Note: all 3 correlation objects are the same. Thus, all triangles will be placed - # into self.corr, whichever way the three catalogs are permuted for each triangle. self.corr.processCross12(f1.data, f2.data, (1 if ordered else 0), self.output_dots, self._bintype, self._metric) self.tot += cat1.sumw * cat2.sumw**2 / 2. - def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=False, num_threads=None): + def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=True, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs as part of a larger - auto-correlation calculation. E.g. when splitting up a large catalog into patches, - this is appropriate to use for the cross correlation between different patches + auto- or cross-correlation calculation. E.g. when splitting up a large catalog into + patches, this is appropriate to use for the cross correlation between different patches as part of the complete auto-correlation of the full catalog. Parameters: @@ -332,7 +330,8 @@ def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=False, num_thr (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (default: False) + catalogs. (default: True; Note: ordered=1 will fix cat1 in the + first location, but let 2 and 3 swap.) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) @@ -364,8 +363,6 @@ def process_cross(self, cat1, cat2, cat3, *, metric=None, ordered=False, num_thr coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) - # Note: all 6 correlation objects are the same. Thus, all triangles will be placed - # into self.corr, whichever way the three catalogs are permuted for each triangle. self.corr.processCross(f1.data, f2.data, f3.data, (3 if ordered is True else 1 if ordered == 1 else 0), self.output_dots, self._bintype, self._metric) @@ -511,7 +508,7 @@ def __iadd__(self, other): self.ntri[:] += other.ntri[:] return self - def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, + def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=True, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Accumulate the 3pt correlation of the points in the given Catalog(s). @@ -520,11 +517,17 @@ def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, first catalog taking one corner of the triangles, and the second taking two corners. - If 3 arguments are given, then compute a three-way cross-correlation. - For cross correlations, the default behavior is to allow the three triangle vertices - (P1, P2, P3) to come from any of the three (or two) catalogs. However, if you want to - keep track of the order of the catalogs, you can set ``ordered=True``, which will fix - P1 to come from ``cat1``, P2 from ``cat2`` and P3 from ``cat3``. The sides d1, d2, d3 - are taken to be opposite P1, P2, P3 respectively. + For cross correlations, the default behavior is to use cat1 for the first vertex (P1), + cat2 for the second vertex (P2), and cat3 for the third vertex (P3). If only two + catalogs are given, vertices P2 and P3 both come from cat2. The sides d1, d2, d3, + used to define the binning, are taken to be opposte P1, P2, P3 respectively. + + However, if you want to accumulate triangles where objects from each catalog can take + any position in the triangles, you can set ``ordered=False``. In this case, + triangles will be formed where P1, P2 and P3 can come any input catalog, so long as there + is one from cat1, one from cat2, and one from cat3 (or two from cat2 if cat3 is None). + This is particularly appropriate when doing random cross correlations so e.g. DRR, RDR + and RRD cross-correlations are all accumulated into a single NNNCorrelation instance. All arguments may be lists, in which case all items in the list are used for that element of the correlation. @@ -539,7 +542,7 @@ def process(self, cat1, cat2=None, cat3=None, *, metric=None, ordered=False, (default: 'Euclidean'; this value can also be given in the constructor in the config dict.) ordered (bool): Whether to fix the order of the triangle vertices to match the - catalogs. (see above; default: False) + catalogs. (see above; default: True) num_threads (int): How many OpenMP threads to use during the calculation. (default: use the number of cpu cores; this value can also be given in the constructor in the config dict.) From 0c2842ee6b0b9a3b1245a84ddf25acbe222c8d49 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 07:28:59 -0500 Subject: [PATCH 05/18] Let isTriangleInRange take dsq values --- include/BinType.h | 14 ++++++++------ src/Corr3.cpp | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/include/BinType.h b/include/BinType.h index 93571933..4be78b43 100644 --- a/include/BinType.h +++ b/include/BinType.h @@ -679,11 +679,12 @@ struct BinTypeHelper static bool isTriangleInRange(const BaseCell& c1, const BaseCell& c2, const BaseCell& c3, const MetricHelper& metric, + double d1sq, double d2sq, double d3sq, double d1, double d2, double d3, double& u, double& v, double logminsep, - double minsep, double maxsep, double binsize, double nbins, - double minu, double maxu, double ubinsize, double nubins, - double minv, double maxv, double vbinsize, double nvbins, + double minsep, double maxsep, double binsize, int nbins, + double minu, double maxu, double ubinsize, int nubins, + double minv, double maxv, double vbinsize, int nvbins, double& logd1, double& logd2, double& logd3, int ntot, int& index) { @@ -1148,11 +1149,12 @@ struct BinTypeHelper static bool isTriangleInRange(const BaseCell& c1, const BaseCell& c2, const BaseCell& c3, const MetricHelper& metric, + double d1sq, double d2sq, double d3sq, double d1, double d2, double d3, double& phi, double& cosphi, double logminsep, - double minsep, double maxsep, double binsize, double nbins, - double minphi, double maxphi, double phibinsize, double nphibins, - double , double , double , double , + double minsep, double maxsep, double binsize, int nbins, + double minphi, double maxphi, double phibinsize, int nphibins, + double , double , double , int , double& logd1, double& logd2, double& logd3, int ntot, int& index) { diff --git a/src/Corr3.cpp b/src/Corr3.cpp index 8a920a58..df813510 100644 --- a/src/Corr3.cpp +++ b/src/Corr3.cpp @@ -691,7 +691,7 @@ void BaseCorr3::process111Sorted( int index; if (BinTypeHelper::template isTriangleInRange( c1, c2, c3, metric, - d1, d2, d3, u, v, + d1sq, d2sq, d3sq, d1, d2, d3, u, v, _logminsep, _minsep, _maxsep, _binsize, _nbins, _minu, _maxu, _ubinsize, _nubins, _minv, _maxv, _vbinsize, _nvbins, From 5c98c5a2a415453fdd94893597e81ed7c96e898b Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 07:31:16 -0500 Subject: [PATCH 06/18] Clean up dbg lines some --- include/BinType.h | 15 +++--- src/Corr3.cpp | 113 ++++++++++++++++------------------------------ 2 files changed, 47 insertions(+), 81 deletions(-) diff --git a/include/BinType.h b/include/BinType.h index 4be78b43..fa9bec49 100644 --- a/include/BinType.h +++ b/include/BinType.h @@ -417,9 +417,9 @@ struct BinTypeHelper double minu, double minusq, double maxu, double maxusq, double minv, double minvsq, double maxv, double maxvsq) { - dbg<<"Stop111: "<= d2sq); @@ -457,19 +457,19 @@ struct BinTypeHelper // (d2 + s1+s3) < (d3 - s1-s2) if (O == 3 && d3sq > SQR(d2 + 2*s1+s2+s3)) { - dbg<<"d2 cannot be larger than d3\n"; + xdbg<<"d2 cannot be larger than d3\n"; return true; } // (d1 + s2+s3) < (d2 - s1-s3) double ss = s1+s2+2*s3; if (ss < d2 && d1sq < SQR(d2 - ss)) { - dbg<<"d1 cannot be larger than d2\n"; + xdbg<<"d1 cannot be larger than d2\n"; return true; } d1 = sqrt(d1sq); // (d1 + s2+s3) < (d3 - s1-s2) if (d3sq > SQR(d1 + s1+2*s2+s3)) { - dbg<<"d1 cannot be larger than d3\n"; + xdbg<<"d1 cannot be larger than d3\n"; return true; } } @@ -783,7 +783,7 @@ struct BinTypeHelper { enum { sort_d123 = false }; - static int calculateNTot(int nbins, int nphibins, int nvbins) + static int calculateNTot(int nbins, int nphibins, int ) { return nbins * nbins * nphibins; } static bool tooSmallS2(double s2, double halfminsep, double minphi, double ) @@ -1241,5 +1241,6 @@ struct BinTypeHelper }; + #endif diff --git a/src/Corr3.cpp b/src/Corr3.cpp index df813510..076797eb 100644 --- a/src/Corr3.cpp +++ b/src/Corr3.cpp @@ -210,6 +210,7 @@ void Corr3::clear() template void BaseCorr3::process(const BaseField& field, bool dots) { + dbg<<"Start process auto\n"; reset_ws(); Assert(_coords == -1 || _coords == C); _coords = C; @@ -239,12 +240,8 @@ void BaseCorr3::process(const BaseField& field, bool dots) #endif { if (dots) std::cout<<'.'<= 2) c1.WriteTree(get_dbgout()); #endif } corr.template process3(c1, metric); @@ -273,6 +270,7 @@ template void BaseCorr3::process(const BaseField& field1, const BaseField& field2, bool dots) { + dbg<<"Start process cross12\n"; reset_ws(); xdbg<<"_coords = "<<_coords< 0); Assert(n2 > 0); MetricHelper metric(0, 0, _xp, _yp, _zp); -#ifdef DEBUGLOGGING - if (verbose_level >= 2) { - xdbg<<"field1: \n"; - for (long i=0;i& c1 = *field1.getCells()[i]; - c1.WriteTree(get_dbgout()); - } - xdbg<<"field2: \n"; - for (long i=0;i& c2 = *field2.getCells()[i]; - c2.WriteTree(get_dbgout()); - } - } -#endif #ifdef _OPENMP #pragma omp parallel @@ -352,6 +334,7 @@ template void BaseCorr3::process(const BaseField& field1, const BaseField& field2, const BaseField& field3, bool dots) { + dbg<<"Start process cross full\n"; reset_ws(); xdbg<<"_coords = "<<_coords< 0); Assert(n2 > 0); Assert(n3 > 0); MetricHelper metric(0, 0, _xp, _yp, _zp); -#ifdef DEBUGLOGGING - if (verbose_level >= 2) { - xdbg<<"field1: \n"; - for (long i=0;i& c1 = *field1.getCells()[i]; - c1.WriteTree(get_dbgout()); - } - xdbg<<"field2: \n"; - for (long i=0;i& c2 = *field2.getCells()[i]; - c2.WriteTree(get_dbgout()); - } - xdbg<<"field3: \n"; - for (long i=0;i& c3 = *field3.getCells()[i]; - c3.WriteTree(get_dbgout()); - } - } -#endif #ifdef _OPENMP #pragma omp parallel @@ -439,15 +400,15 @@ template void BaseCorr3::process3(const BaseCell& c1, const MetricHelper& metric) { // Does all triangles with 3 points in c1 - xdbg<<"Process3: c1 = "<& c1, const BaseCell& c2, const MetricHelper& metric) { // Does all triangles with one point in c1 and the other two points in c2 + xdbg<& c1, const BaseCell& c2, // Some trivial stoppers: if (c1.getW() == 0) { - dbg<::tooSmallS2(s2, _halfminsep, _minu, _minv)) { - dbg<& c1, const BaseCell& c2, // If all possible triangles will have d2 < minsep, then abort the recursion here. // i.e. if d + s1 + s2 < minsep if (BinTypeHelper::tooSmallDist(rsq, s1ps2, _minsep, _minsepsq)) { - dbg< maxsep. // i.e. if d - s1 - s2 >= maxsep if (BinTypeHelper::tooLargeDist(rsq, s1ps2, _maxsep, _maxsepsq)) { - dbg<& c1, const BaseCell& c2, if (BinTypeHelper::template noAllowedAngles(rsq, s1ps2, s1, s2, _halfminsep, _minu, _minusq, _maxu, _maxusq, _minv, _minvsq, _maxv, _maxvsq)) { - dbg<& c1, const BaseCell& c2, const BaseCell& c3, const MetricHelper& metric, double d1sq, double d2sq, double d3sq) { - dbg<(c1, c3, c2, metric, d1sq, d3sq, d2sq); } } else { - // For the non-sorting BinTypes (i.e. LogSAS so far), we just need to make sure + // For the non-sorting BinTypes (e.g. LogSAS), we just need to make sure // 1-3-2 is counter-clockwise if (!metric.CCW(c1.getPos(), c3.getPos(), c2.getPos())) { xdbg<<":swap23\n"; @@ -673,7 +636,7 @@ void BaseCorr3::process111Sorted( _minu, _minusq, _maxu, _maxusq, _minv, _minvsq, _maxv, _maxvsq)) { - dbg<(c1, c2, c3, d1, d2, d3, u, v, logd1, logd2, logd3, index); } else { - dbg<::finishProcess( { double nnn = double(c1.getN()) * c2.getN() * c3.getN(); _ntri[index] += nnn; - dbg< "<<_ntri[index]< "<<_ntri[index]<::finishProcess( _meanv[index] += www * v; _weight[index] += www; - DirectHelper::template ProcessZeta( + DirectHelper::ProcessZeta( static_cast&>(c1), static_cast&>(c2), static_cast&>(c3), @@ -937,6 +900,7 @@ void Corr3::operator+=(const Corr3& rhs) for (int i=0; i<_ntot; ++i) _ntri[i] += rhs._ntri[i]; } + // // // The functions we call from Python. @@ -1039,13 +1003,13 @@ void ProcessCross12b(BaseCorr3& corr, BaseField& field1, BaseField& field2 Assert((ValidMC::_M == M)); switch(ordered) { case 0: - corr.template process::_M>(field1, field2, dots); - break; + corr.template process::_M>(field1, field2, dots); + break; case 1: - corr.template process::_M>(field1, field2, dots); - break; + corr.template process::_M>(field1, field2, dots); + break; default: - Assert(false); + Assert(false); } } @@ -1072,6 +1036,7 @@ template void ProcessCross12(BaseCorr3& corr, BaseField& field1, BaseField& field2, int ordered, bool dots, BinType bin_type, Metric metric) { + dbg<<"Start ProcessCross12 "<(corr, field1, field2, ordered, dots, metric); @@ -1130,7 +1095,7 @@ void ProcessCross(BaseCorr3& corr, BaseField& field1, BaseField& field2, BaseField& field3, int ordered, bool dots, BinType bin_type, Metric metric) { - dbg<<"Start ProcessCross3 "<(corr, field1, field2, field3, ordered, dots, metric); From a75ae3a3b12fb60b0f7549550a3da6dbe3508d95 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 07:33:09 -0500 Subject: [PATCH 07/18] Add writeZeta method --- include/Corr3.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/Corr3.h b/include/Corr3.h index e2cdea9f..5230ae66 100644 --- a/include/Corr3.h +++ b/include/Corr3.h @@ -41,6 +41,7 @@ class BaseCorr3 virtual ~BaseCorr3() {} virtual std::shared_ptr duplicate() =0; + virtual void writeZeta(std::ostream& os, int n) const = 0; virtual void addData(const BaseCorr3& rhs) =0; @@ -162,6 +163,9 @@ class Corr3 : public BaseCorr3 void clear(); // Set all data to 0. + void writeZeta(std::ostream& os, int n=1) const + { _zeta.write_full(os, n); } + template void finishProcess( const BaseCell& c1, const BaseCell& c2, const BaseCell& c3, From b9b85f1ffd0e327b9b29f6570ba4320bd9d10b4d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 07:34:37 -0500 Subject: [PATCH 08/18] Change getCells to be vector of const pointers --- include/Field.h | 9 ++++++++- src/Corr2.cpp | 13 +++++++++---- src/Corr3.cpp | 25 ++++++++++++++++--------- src/KMeans.cpp | 18 +++++++++--------- 4 files changed, 42 insertions(+), 23 deletions(-) diff --git a/include/Field.h b/include/Field.h index 94ef7888..c4e4e2b5 100644 --- a/include/Field.h +++ b/include/Field.h @@ -51,7 +51,14 @@ class BaseField Position getCenter() const { return _center; } double getSize() const { return std::sqrt(_sizesq); } long getNTopLevel() const { BuildCells(); return long(_cells.size()); } - const std::vector*>& getCells() const { BuildCells(); return _cells; } + const std::vector*>& getCells() const + { + BuildCells(); + // const_cast is insufficient to turn this into a vector of const BaseCell*. + // cf. https://stackoverflow.com/questions/19122858/why-is-a-vector-of-pointers-not-castable-to-a-const-vector-of-const-pointers + // But reinterpret_cast here is safe. + return reinterpret_cast*>&>(_cells); + } long countNear(double x, double y, double z, double sep) const; void getNear(double x, double y, double z, double sep, long* indices, long n) const; diff --git a/src/Corr2.cpp b/src/Corr2.cpp index 723ae275..1d3735bd 100644 --- a/src/Corr2.cpp +++ b/src/Corr2.cpp @@ -150,6 +150,8 @@ void BaseCorr2::process(const BaseField& field, bool dots) dbg<<"field has "< 0); + const std::vector*>& cells = field.getCells(); + #ifdef _OPENMP #pragma omp parallel { @@ -176,10 +178,10 @@ void BaseCorr2::process(const BaseField& field, bool dots) #endif if (dots) std::cout<<'.'<& c1 = *field.getCells()[i]; + const BaseCell& c1 = *cells[i]; bc2.template process2(c1, metric); for (long j=i+1;j& c2 = *field.getCells()[j]; + const BaseCell& c2 = *cells[j]; bc2.process11::do_reverse>(c1, c2, metric); } } @@ -226,6 +228,9 @@ void BaseCorr2::process(const BaseField& field1, const BaseField& field2, Assert(n1 > 0); Assert(n2 > 0); + const std::vector*>& c1list = field1.getCells(); + const std::vector*>& c2list = field2.getCells(); + #ifdef _OPENMP #pragma omp parallel { @@ -251,9 +256,9 @@ void BaseCorr2::process(const BaseField& field1, const BaseField& field2, #endif if (dots) std::cout<<'.'<& c1 = *field1.getCells()[i]; + const BaseCell& c1 = *c1list[i]; for (long j=0;j& c2 = *field2.getCells()[j]; + const BaseCell& c2 = *c2list[j]; bc2.process11(c1, c2, metric); } } diff --git a/src/Corr3.cpp b/src/Corr3.cpp index 076797eb..c4ee476c 100644 --- a/src/Corr3.cpp +++ b/src/Corr3.cpp @@ -218,6 +218,8 @@ void BaseCorr3::process(const BaseField& field, bool dots) dbg<<"field has "< 0); + const std::vector*>& cells = field.getCells(); + #ifdef _OPENMP #pragma omp parallel { @@ -234,7 +236,7 @@ void BaseCorr3::process(const BaseField& field, bool dots) #pragma omp for schedule(dynamic) #endif for (long i=0;i& c1 = *field.getCells()[i]; + const BaseCell& c1 = *cells[i]; #ifdef _OPENMP #pragma omp critical #endif @@ -246,11 +248,11 @@ void BaseCorr3::process(const BaseField& field, bool dots) } corr.template process3(c1, metric); for (long j=i+1;j& c2 = *field.getCells()[j]; + const BaseCell& c2 = *cells[j]; corr.template process12(c1, c2, metric); corr.template process12(c2, c1, metric); for (long k=j+1;k& c3 = *field.getCells()[k]; + const BaseCell& c3 = *cells[k]; corr.template process111(c1, c2, c3, metric); } } @@ -285,6 +287,8 @@ void BaseCorr3::process(const BaseField& field1, const BaseField& field2, MetricHelper metric(0, 0, _xp, _yp, _zp); + const std::vector*>& c1list = field1.getCells(); + const std::vector*>& c2list = field2.getCells(); #ifdef _OPENMP #pragma omp parallel @@ -309,12 +313,12 @@ void BaseCorr3::process(const BaseField& field1, const BaseField& field2, dbg<& c1 = *field1.getCells()[i]; + const BaseCell& c1 = *c1list[i]; for (long j=0;j& c2 = *field2.getCells()[j]; + const BaseCell& c2 = *c2list[j]; corr.template process12(c1, c2, metric); for (long k=j+1;k& c3 = *field2.getCells()[k]; + const BaseCell& c3 = *c2list[k]; corr.template process111(c1, c2, c3, metric); } } @@ -352,6 +356,9 @@ void BaseCorr3::process(const BaseField& field1, const BaseField& field2, MetricHelper metric(0, 0, _xp, _yp, _zp); + const std::vector*>& c1list = field1.getCells(); + const std::vector*>& c2list = field2.getCells(); + const std::vector*>& c3list = field3.getCells(); #ifdef _OPENMP #pragma omp parallel @@ -376,11 +383,11 @@ void BaseCorr3::process(const BaseField& field1, const BaseField& field2, dbg<& c1 = *field1.getCells()[i]; + const BaseCell& c1 = *c1list[i]; for (long j=0;j& c2 = *field2.getCells()[j]; + const BaseCell& c2 = *c2list[j]; for (long k=0;k& c3 = *field3.getCells()[k]; + const BaseCell& c3 = *c3list[k]; corr.template process111(c1, c2, c3, metric); } } diff --git a/src/KMeans.cpp b/src/KMeans.cpp index 36eff278..ec0709a8 100644 --- a/src/KMeans.cpp +++ b/src/KMeans.cpp @@ -60,7 +60,7 @@ void InitializeCentersTree(std::vector >& centers, const BaseCell template void InitializeCentersTree(std::vector >& centers, - const std::vector*>& cells, long long seed) + const std::vector*>& cells, long long seed) { dbg<<"Initialize centers: "< >& centers, template void InitializeCentersRand(std::vector >& centers, - const std::vector*>& cells, long long seed) + const std::vector*>& cells, long long seed) { dbg<<"Initialize centers (random): "< InitializeCentersKMPP(const BaseCell* cell, template void InitializeCentersKMPP(std::vector >& centers, - const std::vector*>& cells, long long seed) + const std::vector*>& cells, long long seed) { // cf. https://en.wikipedia.org/wiki/K-means%2B%2B // The basic KMeans++ algorithm is as follows: @@ -646,7 +646,7 @@ void FindCellsInPatches(const std::vector >& centers, // patch and then runs f, which can be any of the above function classes. template void FindCellsInPatches(const std::vector >& centers, - const std::vector*>& cells, F& f, + const std::vector*>& cells, F& f, const std::vector* inertia=0) { #ifdef _OPENMP @@ -743,7 +743,7 @@ template void KMeansInitTree1(BaseField& field, double* pycenters, int npatch, long long seed) { dbg<<"Start KMeansInitTree for "<*> cells = field.getCells(); + const std::vector*> cells = field.getCells(); std::vector > centers(npatch); InitializeCentersTree(centers, cells, seed); WriteCenters(centers, pycenters, npatch); @@ -753,7 +753,7 @@ template void KMeansInitRand1(BaseField& field, double* pycenters, int npatch, long long seed) { dbg<<"Start KMeansInitRand for "<*> cells = field.getCells(); + const std::vector*> cells = field.getCells(); std::vector > centers(npatch); InitializeCentersRand(centers, cells, seed); WriteCenters(centers, pycenters, npatch); @@ -763,7 +763,7 @@ template void KMeansInitKMPP1(BaseField& field, double* pycenters, int npatch, long long seed) { dbg<<"Start KMeansInitKMPP for "<*> cells = field.getCells(); + const std::vector*> cells = field.getCells(); std::vector > centers(npatch); InitializeCentersKMPP(centers, cells, seed); WriteCenters(centers, pycenters, npatch); @@ -774,7 +774,7 @@ void KMeansRun1(BaseField& field, double* pycenters, int npatch, int max_iter bool alt) { dbg<<"Start KMeansRun for "<*> cells = field.getCells(); + const std::vector*> cells = field.getCells(); // Initialize the centers of the patches smartly according to the tree structure. std::vector > centers(npatch); @@ -835,7 +835,7 @@ template void KMeansAssign1(BaseField& field, const double* pycenters, int npatch, long* patches, long n) { dbg<<"Start KMeansAssign for "<*> cells = field.getCells(); + const std::vector*> cells = field.getCells(); std::vector > centers(npatch); ReadCenters(centers, pycenters, npatch); From 27351f07af5453fe3be347eff24195740a934443 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 07:35:29 -0500 Subject: [PATCH 09/18] Minor cleanup of 3pt tests --- tests/test_nnn.py | 74 +++++++++++++++++++++++++++++------------------ 1 file changed, 46 insertions(+), 28 deletions(-) diff --git a/tests/test_nnn.py b/tests/test_nnn.py index 6311d55b..5ac5737d 100644 --- a/tests/test_nnn.py +++ b/tests/test_nnn.py @@ -2014,11 +2014,15 @@ def test_direct_logruv_3d_auto(): ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) - # And compare to the cross correlation - # With ordered=False, we get 6x as much, since each triangle is discovered 6 times. + # Compare to the cross correlation + ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, + min_u=min_u, max_u=max_u, nubins=nubins, + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_slop=0, verbose=1) ddd.process(cat,cat,cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) + # With ordered=False, we get 6x as much, since each triangle is discovered 6 times. ddd.process(cat,cat,cat, ordered=False) np.testing.assert_array_equal(ddd.ntri, 6*true_ntri) @@ -2870,7 +2874,7 @@ def test_direct_logsas_auto(): if d3 == 0.: continue phi = np.arccos((d2**2 + d3**2 - d1**2)/(2*d2*d3)) if not is_ccw(x[i],y[i],x[k],y[k],x[j],y[j]): - phi = 2*np.pi - phi + continue if d2 < min_sep or d2 >= max_sep: continue if d3 < min_sep or d3 >= max_sep: continue if phi < min_phi or phi >= max_phi: continue @@ -2953,6 +2957,9 @@ def test_direct_logsas_auto(): # Split into patches to test the list-based version of the code. cat = treecorr.Catalog(x=x, y=y, npatch=10) + ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, + min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, + bin_slop=0, verbose=1,bin_type='LogSAS') ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -3404,6 +3411,9 @@ def test_direct_logsas_cross12(): # Split into patches to test the list-based version of the code. cat1 = treecorr.Catalog(x=x1, y=y1, npatch=10) cat2 = treecorr.Catalog(x=x2, y=y2, npatch=10) + ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, + min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, + bin_slop=0, verbose=1, bin_type='LogSAS') t0 = time.time() ddd.process(cat1, cat2, ordered=True) @@ -3454,15 +3464,16 @@ def test_nnn_logsas(): # of separations and a moderate range for u, v, which gives us a variety of triangle lengths. s = 10. if __name__ == "__main__": - ngal = 20000 + ngal = 200000 nrand = 3 * ngal L = 50. * s # Not infinity, so this introduces some error. Our integrals were to infinity. tol_factor = 1 else: - ngal = 2000 + ngal = 5000 nrand = ngal L = 20. * s tol_factor = 5 + rng = np.random.RandomState(8675309) x = rng.normal(0,s, (ngal,) ) y = rng.normal(0,s, (ngal,) ) @@ -3508,9 +3519,9 @@ def test_nnn_logsas(): np.testing.assert_allclose(dddc.meanphi, ddd.meanphi) # log() != , but it should be close: - print('meanlogd1 - log(meand1) = ',ddd.meanlogd1 - np.log(ddd.meand1)) - print('meanlogd2 - log(meand2) = ',ddd.meanlogd2 - np.log(ddd.meand2)) - print('meanlogd3 - log(meand3) = ',ddd.meanlogd3 - np.log(ddd.meand3)) + #print('meanlogd1 - log(meand1) = ',ddd.meanlogd1 - np.log(ddd.meand1)) + #print('meanlogd2 - log(meand2) = ',ddd.meanlogd2 - np.log(ddd.meand2)) + #print('meanlogd3 - log(meand3) = ',ddd.meanlogd3 - np.log(ddd.meand3)) np.testing.assert_allclose(ddd.meanlogd1, np.log(ddd.meand1), rtol=1.e-3) np.testing.assert_allclose(ddd.meanlogd2, np.log(ddd.meand2), rtol=1.e-3) np.testing.assert_allclose(ddd.meanlogd3, np.log(ddd.meand3), rtol=1.e-3) @@ -3521,7 +3532,10 @@ def test_nnn_logsas(): rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, sep_units='arcmin', verbose=1, bin_type='LogSAS') + t0 = time.time() rrr.process(rand) + t1 = time.time() + print('time for rrr: ',t1-t0) d1 = ddd.meand1 d2 = ddd.meand2 @@ -3532,8 +3546,7 @@ def test_nnn_logsas(): zeta, varzeta = ddd.calculateZeta(rrr=rrr) print('zeta = ',zeta) print('true_zeta = ',true_zeta) - print('ratio = ',zeta / true_zeta) - print('diff = ',zeta - true_zeta) + print('mean ratio = ',np.mean(zeta / true_zeta)) print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta))) np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor) np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)), @@ -3543,13 +3556,13 @@ def test_nnn_logsas(): cat.write(os.path.join('data','nnn_data_logsas.dat')) rand.write(os.path.join('data','nnn_rand_logsas.dat')) config = treecorr.config.read_config('configs/nnn_logsas.yaml') - config['verbose'] = 3 + config['verbose'] = 1 treecorr.corr3(config) corr3_output = np.genfromtxt(os.path.join('output','nnn_logsas.out'), names=True, skip_header=1) - print('zeta = ',zeta) - print('from corr3 output = ',corr3_output['zeta']) - print('ratio = ',corr3_output['zeta']/zeta.flatten()) - print('diff = ',corr3_output['zeta']-zeta.flatten()) + #print('zeta = ',zeta) + #print('from corr3 output = ',corr3_output['zeta']) + #print('ratio = ',corr3_output['zeta']/zeta.flatten()) + #print('diff = ',corr3_output['zeta']-zeta.flatten()) np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3) # Check the fits write option @@ -3600,7 +3613,7 @@ def test_nnn_logsas(): # The read function should reshape them to the right shape. ddd2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, - sep_units='arcmin', verbose=1, bin_type='LogSAS') + sep_units='arcmin', bin_type='LogSAS') ddd2.read(out_file_name1) np.testing.assert_almost_equal(ddd2.logd2, ddd.logd2) np.testing.assert_almost_equal(ddd2.logd3, ddd.logd3) @@ -3667,7 +3680,7 @@ def test_nnn_logsas(): ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, - sep_units='arcmin', verbose=1, bin_type='LogSAS') + sep_units='arcmin', bin_type='LogSAS') ddd3.read(out_file_name3) np.testing.assert_almost_equal(ddd3.logd2, ddd.logd2) np.testing.assert_almost_equal(ddd3.logd3, ddd.logd3) @@ -3700,16 +3713,16 @@ def test_nnn_logsas(): ddd.calculateZeta(rrr=rrr, rdd=rrr) with assert_raises(TypeError): ddd.calculateZeta(rrr=rrr, drr=rrr) - rrr2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, + rrr3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_phi=min_phi, max_phi=max_phi, nphi_bins=nphi_bins, sep_units='arcmin', bin_type='LogSAS') # Error if any of them haven't been run yet. with assert_raises(ValueError): - ddd.calculateZeta(rrr=rrr2, drr=rrr, rdd=rrr) + ddd.calculateZeta(rrr=rrr3, drr=rrr, rdd=rrr) with assert_raises(ValueError): - ddd.calculateZeta(rrr=rrr, drr=rrr2, rdd=rrr) + ddd.calculateZeta(rrr=rrr, drr=rrr3, rdd=rrr) with assert_raises(ValueError): - ddd.calculateZeta(rrr=rrr, drr=rrr, rdd=rrr2) + ddd.calculateZeta(rrr=rrr, drr=rrr, rdd=rrr3) out_file_name3 = os.path.join('output','nnn_out3_logsas.dat') with assert_raises(TypeError): @@ -3725,8 +3738,13 @@ def test_nnn_logsas(): drr = ddd.copy() rdd = ddd.copy() + t0 = time.time() drr.process(cat,rand, ordered=False) + t1 = time.time() rdd.process(rand,cat, ordered=False) + t2 = time.time() + print('time for drr: ',t1-t0) + print('time for rdd: ',t2-t1) zeta, varzeta = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) print('compensated zeta = ',zeta) @@ -3734,14 +3752,13 @@ def test_nnn_logsas(): xi1 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d1**2/(4.*s**2)) - 1. xi2 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d2**2/(4.*s**2)) - 1. xi3 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d3**2/(4.*s**2)) - 1. - print('xi1 = ',xi1) - print('xi2 = ',xi2) - print('xi3 = ',xi3) - print('true_zeta + xi1 + xi2 + xi3 = ',true_zeta) + #print('xi1 = ',xi1) + #print('xi2 = ',xi2) + #print('xi3 = ',xi3) + #print('true_zeta + xi1 + xi2 + xi3 = ',true_zeta) true_zeta -= xi1 + xi2 + xi3 - print('true_zeta => ',true_zeta) - print('ratio = ',zeta / true_zeta) - print('diff = ',zeta - true_zeta) + #print('true_zeta => ',true_zeta) + print('mean ratio = ',np.mean(zeta / true_zeta)) print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta))) np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor) np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)), atol=0.1*tol_factor) @@ -3815,6 +3832,7 @@ def test_nnn_logsas(): header = fitsio.read_header(out_file_name3, 1) np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.) + if __name__ == '__main__': test_logruv_binning() test_logsas_binning() From 5b58be4d0dcbe3dc7430b9422a2e2c22bc48b62d Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 07:55:47 -0500 Subject: [PATCH 10/18] Write zeta to NNN output file if it has been computed --- tests/test_nnn.py | 6 ++++++ treecorr/nnncorrelation.py | 10 ++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/tests/test_nnn.py b/tests/test_nnn.py index 5ac5737d..f9b37f7a 100644 --- a/tests/test_nnn.py +++ b/tests/test_nnn.py @@ -973,6 +973,8 @@ def test_direct_logruv_auto(): np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3) + np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3) + np.testing.assert_allclose(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3) np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3) rrrf = ddd.tot / rrr.tot @@ -2338,6 +2340,8 @@ def test_nnn_logruv(): np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten()) np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten()) np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten()) + np.testing.assert_almost_equal(data['zeta'], ddd.zeta.flatten()) + np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(ddd.varzeta).flatten()) np.testing.assert_almost_equal(data['ntri'], ddd.ntri.flatten()) header = fitsio.read_header(out_file_name1, 1) np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.) @@ -3584,6 +3588,8 @@ def test_nnn_logsas(): np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten()) np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten()) np.testing.assert_almost_equal(data['meanphi'], ddd.meanphi.flatten()) + np.testing.assert_almost_equal(data['zeta'], ddd.zeta.flatten()) + np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(ddd.varzeta).flatten()) np.testing.assert_almost_equal(data['ntri'], ddd.ntri.flatten()) header = fitsio.read_header(out_file_name1, 1) np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.) diff --git a/treecorr/nnncorrelation.py b/treecorr/nnncorrelation.py index c797329b..f263991f 100644 --- a/treecorr/nnncorrelation.py +++ b/treecorr/nnncorrelation.py @@ -150,6 +150,7 @@ def __init__(self, config=None, *, logger=None, **kwargs): self._write_rrr = None self._write_drr = None self._write_rdd = None + self.zeta = None self.logger.debug('Finished building NNNCorr') @property @@ -584,7 +585,7 @@ def getStat(self): This raises a RuntimeError if calculateZeta has not been run yet. """ - if self._rrr_weight is None: + if self.zeta is None: raise RuntimeError("You need to call calculateZeta before calling estimate_cov.") return self.zeta.ravel() @@ -949,7 +950,10 @@ def _write_col_names(self): 'meand1', 'meanlogd1', 'meand2', 'meanlogd2', 'meand3', 'meanlogd3', 'meanphi'] if rrr is None: - col_names += [ 'DDD', 'ntri' ] + if self.zeta is not None: + col_names += [ 'zeta', 'sigma_zeta', 'DDD', 'ntri'] + else: + col_names += [ 'DDD', 'ntri' ] else: col_names += [ 'zeta','sigma_zeta','DDD','RRR' ] if drr is not None: @@ -973,6 +977,8 @@ def _write_data(self): if rrr is None: if drr is not None or rdd is not None: raise TypeError("rrr must be provided if other combinations are not None") + if self.zeta is not None: + data += [ self.zeta, np.sqrt(self.varzeta) ] data += [ self.weight, self.ntri ] else: # This will check for other invalid combinations of rrr, drr, etc. From 25485fb37be4c7217973714d95868682bf83d1f7 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 07:56:05 -0500 Subject: [PATCH 11/18] Various 3pt doc edits --- treecorr/corr3base.py | 18 +++--- treecorr/gggcorrelation.py | 109 ++++++++++++++++--------------------- treecorr/kkkcorrelation.py | 81 +++++++++++++-------------- treecorr/nnncorrelation.py | 102 ++++++++++++++++------------------ 4 files changed, 144 insertions(+), 166 deletions(-) diff --git a/treecorr/corr3base.py b/treecorr/corr3base.py index 3f7a0364..da7fef3f 100644 --- a/treecorr/corr3base.py +++ b/treecorr/corr3base.py @@ -30,7 +30,7 @@ class Namespace(object): pass class Corr3(object): - """This class stores the results of a 3-point correlation calculation, along with some + r"""This class stores the results of a 3-point correlation calculation, along with some ancillary data. This is a base class that is not intended to be constructed directly. But it has a few @@ -52,9 +52,9 @@ class Corr3(object): .. math:: - r &= d2 \\\\ - u &= \\frac{d3}{d2} \\\\ - v &= \\pm \\frac{(d1 - d2)}{d3} \\\\ + r &= d2 \\ + u &= \frac{d3}{d2} \\ + v &= \pm \frac{(d1 - d2)}{d3} \\ The orientation of the triangle is specified by the sign of v. Positive v triangles have the three sides d1,d2,d3 in counter-clockwise orientation. @@ -215,10 +215,10 @@ class Corr3(object): the range. min_top (int): The minimum number of top layers to use when setting up the field. - (default: :math:`\\max(3, \\log_2(N_{\\rm cpu}))`) + (default: :math:`\max(3, \log_2(N_{\rm cpu}))`) max_top (int): The maximum number of top layers to use when setting up the field. The top-level cells are where each calculation job starts. There will - typically be of order :math:`2^{\\rm max\\_top}` top-level cells. + typically be of order :math:`2^{\rm max\_top}` top-level cells. (default: 10) precision (int): The precision to use for the output values. This specifies how many digits to write. (default: 4) @@ -1332,11 +1332,11 @@ def estimate_cov(self, method, *, func=None, comm=None): return estimate_multi_cov([self], method, func=all_func, comm=comm) def build_cov_design_matrix(self, method, *, func=None, comm=None): - """Build the design matrix that is used for estimating the covariance matrix. + r"""Build the design matrix that is used for estimating the covariance matrix. The design matrix for patch-based covariance estimates is a matrix where each row - corresponds to a different estimate of the data vector, :math:`\\zeta_i` (or - :math:`f(\\zeta_i)` if using the optional ``func`` parameter). + corresponds to a different estimate of the data vector, :math:`\zeta_i` (or + :math:`f(\zeta_i)` if using the optional ``func`` parameter). The different of rows in the matrix for each valid ``method`` are: diff --git a/treecorr/gggcorrelation.py b/treecorr/gggcorrelation.py index b7d6418d..575e0e8d 100644 --- a/treecorr/gggcorrelation.py +++ b/treecorr/gggcorrelation.py @@ -62,15 +62,6 @@ class GGGCorrelation(Corr3): max_sep: The maximum separation being considered. logr1d: The nominal centers of the nbins bins in log(r). - If the bin_type is LogSAS, then it will have these attributes: - - Attributes: - nphi_bins: The number of bins in phi. - phi_bin_size: The size of the bins in phi. - min_phi: The minimum phi being considered. - max_phi: The maximum phi being considered. - phi1d: The nominal centers of the nphi_bins bins in phi. - If the bin_type is LogRUV, then it will have these attributes: Attributes: @@ -85,20 +76,19 @@ class GGGCorrelation(Corr3): u1d: The nominal centers of the nubins bins in u. v1d: The nominal centers of the nvbins bins in v. - In addition, the following attributes are numpy arrays whose shape is (nbins, nphi_bins, nbins) - if bin_type is LogSAS or (nbins, nubins, nvbins) if bin_type is LogRUV: - - If bin_type is LogSAS: + If the bin_type is LogSAS, then it will have these attributes: Attributes: - logd2: The nominal center of each d2 side bin in log(d2). - d2nom: The nominal center of each d2 side bin converted to regular distance. - i.e. d2 = exp(logd2). - logd3: The nominal center of each d3 side bin in log(d3). - d3nom: The nominal center of each d3 side bin converted to regular distance. - i.e. d3 = exp(logd3). - phi: The nominal center of each angular bin. - meanphi: The (weighted) mean value of phi for the triangles in each bin. + nphi_bins: The number of bins in phi. + phi_bin_size: The size of the bins in phi. + min_phi: The minimum phi being considered. + max_phi: The maximum phi being considered. + phi1d: The nominal centers of the nphi_bins bins in phi. + + In addition, the following attributes are numpy arrays whose shape is: + + * (nbins, nubins, nvbins) if bin_type is LogRUV + * (nbins, nbins, nphi_bins) if bin_type is LogSAS If bin_type is LogRUV: @@ -111,13 +101,25 @@ class GGGCorrelation(Corr3): meanu: The (weighted) mean value of u for the triangles in each bin. meanv: The (weighted) mean value of v for the triangles in each bin. + If bin_type is LogSAS: + + Attributes: + logd2: The nominal center of each bin in log(d2). + d2nom: The nominal center of each bin converted to regular d2 distance. + i.e. d2 = exp(logd2). + logd3: The nominal center of each bin in log(d3). + d3nom: The nominal center of each bin converted to regular d3 distance. + i.e. d3 = exp(logd3). + phi: The nominal center of each angular bin. + meanphi: The (weighted) mean value of phi for the triangles in each bin. + For any bin_type: Attributes: - gam0: The 0th "natural" correlation function, :math:`\Gamma_0(r,u,v)`. - gam1: The 1st "natural" correlation function, :math:`\Gamma_1(r,u,v)`. - gam2: The 2nd "natural" correlation function, :math:`\Gamma_2(r,u,v)`. - gam3: The 3rd "natural" correlation function, :math:`\Gamma_3(r,u,v)`. + gam0: The 0th "natural" correlation function, :math:`\Gamma_0`. + gam1: The 1st "natural" correlation function, :math:`\Gamma_1`. + gam2: The 2nd "natural" correlation function, :math:`\Gamma_2`. + gam3: The 3rd "natural" correlation function, :math:`\Gamma_3`. vargam0: The variance of :math:`\Gamma_0`, only including the shot noise propagated into the final correlation. This (and the related values for 1,2,3) does not include sample variance, so it is always an underestimate @@ -681,36 +683,10 @@ def write(self, file_name, *, file_type=None, precision=None, write_patch_result r_nom The nominal center of the bin in r = d2 where d1 > d2 > d3 u_nom The nominal center of the bin in u = d3/d2 v_nom The nominal center of the bin in v = +-(d1-d2)/d3 - meand1 The mean value :math:`\langle d1\rangle` of triangles that fell - into each bin - meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles that - fell into each bin - meand2 The mean value :math:`\langle d2\rangle` of triangles that fell - into each bin - meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles that - fell into each bin - meand3 The mean value :math:`\langle d3\rangle` of triangles that fell - into each bin - meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles that - fell into each bin meanu The mean value :math:`\langle u\rangle` of triangles that fell into each bin meanv The mean value :math:`\langle v\rangle` of triangles that fell into each bin - gam0r The real part of the estimator of :math:`\Gamma_0(r,u,v)` - gam0i The imag part of the estimator of :math:`\Gamma_0(r,u,v)` - gam1r The real part of the estimator of :math:`\Gamma_1(r,u,v)` - gam1i The imag part of the estimator of :math:`\Gamma_1(r,u,v)` - gam2r The real part of the estimator of :math:`\Gamma_2(r,u,v)` - gam2i The imag part of the estimator of :math:`\Gamma_2(r,u,v)` - gam3r The real part of the estimator of :math:`\Gamma_3(r,u,v)` - gam3i The imag part of the estimator of :math:`\Gamma_3(r,u,v)` - sigma_gam0 The sqrt of the variance estimate of :math:`\Gamma_0` - sigma_gam1 The sqrt of the variance estimate of :math:`\Gamma_1` - sigma_gam2 The sqrt of the variance estimate of :math:`\Gamma_2` - sigma_gam3 The sqrt of the variance estimate of :math:`\Gamma_3` - weight The total weight of triangles contributing to each bin - ntri The number of triangles contributing to each bin ========== ================================================================ For bin_type = LogSAS, the output file will include the following columns: @@ -722,6 +698,19 @@ def write(self, file_name, *, file_type=None, precision=None, write_patch_result d3_nom The nominal center of the bin in d3 phi_nom The nominal center of the bin in phi, the opening angle between d2 and d3 in the counter-clockwise direction + meanphi The mean value :math:`\langle phi\rangle` of triangles that fell + into each bin + ========== ================================================================ + + In addition, all bin types include the following columns: + + ========== ================================================================ + Column Description + ========== ================================================================ + meand1 The mean value :math:`\langle d1\rangle` of triangles that fell + into each bin + meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles that + fell into each bin meand2 The mean value :math:`\langle d2\rangle` of triangles that fell into each bin meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles that @@ -730,16 +719,14 @@ def write(self, file_name, *, file_type=None, precision=None, write_patch_result into each bin meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles that fell into each bin - meanphi The mean value :math:`\langle phi\rangle` of triangles that fell - into each bin - gam0r The real part of the estimator of :math:`\Gamma_0(r,u,v)` - gam0i The imag part of the estimator of :math:`\Gamma_0(r,u,v)` - gam1r The real part of the estimator of :math:`\Gamma_1(r,u,v)` - gam1i The imag part of the estimator of :math:`\Gamma_1(r,u,v)` - gam2r The real part of the estimator of :math:`\Gamma_2(r,u,v)` - gam2i The imag part of the estimator of :math:`\Gamma_2(r,u,v)` - gam3r The real part of the estimator of :math:`\Gamma_3(r,u,v)` - gam3i The imag part of the estimator of :math:`\Gamma_3(r,u,v)` + gam0r The real part of the estimator of :math:`\Gamma_0` + gam0i The imag part of the estimator of :math:`\Gamma_0` + gam1r The real part of the estimator of :math:`\Gamma_1` + gam1i The imag part of the estimator of :math:`\Gamma_1` + gam2r The real part of the estimator of :math:`\Gamma_2` + gam2i The imag part of the estimator of :math:`\Gamma_2` + gam3r The real part of the estimator of :math:`\Gamma_3` + gam3i The imag part of the estimator of :math:`\Gamma_3` sigma_gam0 The sqrt of the variance estimate of :math:`\Gamma_0` sigma_gam1 The sqrt of the variance estimate of :math:`\Gamma_1` sigma_gam2 The sqrt of the variance estimate of :math:`\Gamma_2` diff --git a/treecorr/kkkcorrelation.py b/treecorr/kkkcorrelation.py index f4fff86f..a40ac092 100644 --- a/treecorr/kkkcorrelation.py +++ b/treecorr/kkkcorrelation.py @@ -45,15 +45,6 @@ class KKKCorrelation(Corr3): max_sep: The maximum separation being considered. logr1d: The nominal centers of the nbins bins in log(r). - If the bin_type is LogSAS, then it will have these attributes: - - Attributes: - nphi_bins: The number of bins in phi. - phi_bin_size: The size of the bins in phi. - min_phi: The minimum phi being considered. - max_phi: The maximum phi being considered. - phi1d: The nominal centers of the nphi_bins bins in phi. - If the bin_type is LogRUV, then it will have these attributes: Attributes: @@ -68,20 +59,19 @@ class KKKCorrelation(Corr3): u1d: The nominal centers of the nubins bins in u. v1d: The nominal centers of the nvbins bins in v. - In addition, the following attributes are numpy arrays whose shape is (nbins, nphi_bins, nbins) - if bin_type is LogSAS or (nbins, nubins, nvbins) if bin_type is LogRUV: - - If bin_type is LogSAS: + If the bin_type is LogSAS, then it will have these attributes: Attributes: - logd2: The nominal center of each d2 side bin in log(d2). - d2nom: The nominal center of each d2 side bin converted to regular distance. - i.e. d2 = exp(logd2). - logd3: The nominal center of each d3 side bin in log(d3). - d3nom: The nominal center of each d3 side bin converted to regular distance. - i.e. d3 = exp(logd3). - phi: The nominal center of each angular bin. - meanphi: The (weighted) mean value of phi for the triangles in each bin. + nphi_bins: The number of bins in phi. + phi_bin_size: The size of the bins in phi. + min_phi: The minimum phi being considered. + max_phi: The maximum phi being considered. + phi1d: The nominal centers of the nphi_bins bins in phi. + + In addition, the following attributes are numpy arrays whose shape is: + + * (nbins, nubins, nvbins) if bin_type is LogRUV + * (nbins, nbins, nphi_bins) if bin_type is LogSAS If bin_type is LogRUV: @@ -94,10 +84,22 @@ class KKKCorrelation(Corr3): meanu: The (weighted) mean value of u for the triangles in each bin. meanv: The (weighted) mean value of v for the triangles in each bin. + If bin_type is LogSAS: + + Attributes: + logd2: The nominal center of each bin in log(d2). + d2nom: The nominal center of each bin converted to regular d2 distance. + i.e. d2 = exp(logd2). + logd3: The nominal center of each bin in log(d3). + d3nom: The nominal center of each bin converted to regular d3 distance. + i.e. d3 = exp(logd3). + phi: The nominal center of each angular bin. + meanphi: The (weighted) mean value of phi for the triangles in each bin. + For any bin_type: Attributes: - zeta: The correlation function, :math:`\zeta(r,u,v)`. + zeta: The correlation function, :math:`\zeta`. varzeta: The variance of :math:`\zeta`, only including the shot noise propagated into the final correlation. This does not include sample variance, so it is always an underestimate of the actual variance. @@ -552,27 +554,10 @@ def write(self, file_name, *, file_type=None, precision=None, write_patch_result r_nom The nominal center of the bin in r = d2 where d1 > d2 > d3 u_nom The nominal center of the bin in u = d3/d2 v_nom The nominal center of the bin in v = +-(d1-d2)/d3 - meand1 The mean value :math:`\langle d1\rangle` of triangles that fell - into each bin - meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles that - fell into each bin - meand2 The mean value :math:`\langle d2\rangle` of triangles that fell - into each bin - meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles that - fell into each bin - meand3 The mean value :math:`\langle d3\rangle` of triangles that fell - into each bin - meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles that - fell into each bin meanu The mean value :math:`\langle u\rangle` of triangles that fell into each bin meanv The mean value :math:`\langle v\rangle` of triangles that fell into each bin - zeta The estimator of :math:`\zeta(r,u,v)` - sigma_zeta The sqrt of the variance estimate of :math:`\zeta` - (if rrr is given) - weight The total weight of triangles contributing to each bin - ntri The number of triangles contributing to each bin ========== ================================================================ For bin_type = LogSAS, the output file will include the following columns: @@ -584,6 +569,19 @@ def write(self, file_name, *, file_type=None, precision=None, write_patch_result d3_nom The nominal center of the bin in d3 phi_nom The nominal center of the bin in phi, the opening angle between d2 and d3 in the counter-clockwise direction + meanphi The mean value :math:`\langle phi\rangle` of triangles that fell + into each bin + ========== ================================================================ + + In addition, all bin types include the following columns: + + ========== ================================================================ + Column Description + ========== ================================================================ + meand1 The mean value :math:`\langle d1\rangle` of triangles that fell + into each bin + meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles that + fell into each bin meand2 The mean value :math:`\langle d2\rangle` of triangles that fell into each bin meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles that @@ -592,10 +590,9 @@ def write(self, file_name, *, file_type=None, precision=None, write_patch_result into each bin meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles that fell into each bin - meanphi The mean value :math:`\langle phi\rangle` of triangles that fell - into each bin - zeta The estimator of :math:`\zeta(r,u,v)` + zeta The estimator of :math:`\zeta` sigma_zeta The sqrt of the variance estimate of :math:`\zeta` + (if rrr is given) weight The total weight of triangles contributing to each bin ntri The number of triangles contributing to each bin ========== ================================================================ diff --git a/treecorr/nnncorrelation.py b/treecorr/nnncorrelation.py index f263991f..c260db58 100644 --- a/treecorr/nnncorrelation.py +++ b/treecorr/nnncorrelation.py @@ -39,15 +39,6 @@ class NNNCorrelation(Corr3): tot: The total number of triangles processed, which is used to normalize the randoms if they have a different number of triangles. - If the bin_type is LogSAS, then it will have these attributes: - - Attributes: - nphi_bins: The number of bins in phi where v = +-(d1-d2)/d3. - phi_bin_size: The size of the bins in phi. - min_phi: The minimum phi being considered. - max_phi: The maximum phi being considered. - phi1d: The nominal centers of the nphi_bins bins in phi. - If the bin_type is LogRUV, then it will have these attributes: Attributes: @@ -62,27 +53,19 @@ class NNNCorrelation(Corr3): u1d: The nominal centers of the nubins bins in u. v1d: The nominal centers of the nvbins bins in v. - In addition, the following attributes are numpy arrays whose shape is (nbins, nphi_bins, nbins) - if bin_type is LogSAS or (nbins, nubins, nvbins) if bin_type is LogRUV: - - If bin_type is LogSAS: + If the bin_type is LogSAS, then it will have these attributes: Attributes: - logd2: The nominal center of each d2 side bin in log(d2). - d2nom: The nominal center of each d2 side bin converted to regular distance. - i.e. d2 = exp(logd2). - logd3: The nominal center of each d3 side bin in log(d3). - d3nom: The nominal center of each d3 side bin converted to regular distance. - i.e. d3 = exp(logd3). - phi: The nominal center of each angular bin. - meand2: The (weighted) mean value of d2 for the triangles in each bin. - meanlogd2: The mean value of log(d2) for the triangles in each bin. - meand3: The (weighted) mean value of d3 for the triangles in each bin. - meanlogd3: The mean value of log(d3) for the triangles in each bin. - meanphi: The (weighted) mean value of phi for the triangles in each bin. - weight: The total weight in each bin. - ntri: The number of triangles going into each bin (including those where one or - more objects have w=0). + nphi_bins: The number of bins in phi where v = +-(d1-d2)/d3. + phi_bin_size: The size of the bins in phi. + min_phi: The minimum phi being considered. + max_phi: The maximum phi being considered. + phi1d: The nominal centers of the nphi_bins bins in phi. + + In addition, the following attributes are numpy arrays whose shape is: + + * (nbins, nubins, nvbins) if bin_type is LogRUV + * (nbins, nbins, nphi_bins) if bin_type is LogSAS If bin_type is LogRUV: @@ -92,14 +75,33 @@ class NNNCorrelation(Corr3): i.e. r = exp(logr). u: The nominal center of each bin in u. v: The nominal center of each bin in v. + meanu: The mean value of u for the triangles in each bin. + meanv: The mean value of v for the triangles in each bin. + weight: The total weight in each bin. + ntri: The number of triangles going into each bin (including those where one or + more objects have w=0). + + If bin_type is LogSAS: + + Attributes: + logd2: The nominal center of each bin in log(d2). + d2nom: The nominal center of each bin converted to regular d2 distance. + i.e. d2 = exp(logd2). + logd3: The nominal center of each bin in log(d3). + d3nom: The nominal center of each bin converted to regular d3 distance. + i.e. d3 = exp(logd3). + phi: The nominal center of each angular bin. + meanphi: The (weighted) mean value of phi for the triangles in each bin. + + For any bin_type: + + Attributes: meand1: The (weighted) mean value of d1 for the triangles in each bin. meanlogd1: The mean value of log(d1) for the triangles in each bin. - meand2: The (weighted) mean value of d2 (aka r) for the triangles in each bin. + meand2: The (weighted) mean value of d2 for the triangles in each bin. meanlogd2: The mean value of log(d2) for the triangles in each bin. meand3: The (weighted) mean value of d3 for the triangles in each bin. meanlogd3: The mean value of log(d3) for the triangles in each bin. - meanu: The mean value of u for the triangles in each bin. - meanv: The mean value of v for the triangles in each bin. weight: The total weight in each bin. ntri: The number of triangles going into each bin (including those where one or more objects have w=0). @@ -851,30 +853,10 @@ def write(self, file_name, *, rrr=None, drr=None, rdd=None, file_type=None, prec r_nom The nominal center of the bin in r = d2 where d1 > d2 > d3 u_nom The nominal center of the bin in u = d3/d2 v_nom The nominal center of the bin in v = +-(d1-d2)/d3 - meand1 The mean value :math:`\langle d1\rangle` of triangles that fell - into each bin - meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles that - fell into each bin - meand2 The mean value :math:`\langle d2\rangle` of triangles that fell - into each bin - meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles that - fell into each bin - meand3 The mean value :math:`\langle d3\rangle` of triangles that fell - into each bin - meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles that - fell into each bin meanu The mean value :math:`\langle u\rangle` of triangles that fell into each bin meanv The mean value :math:`\langle v\rangle` of triangles that fell into each bin - zeta The estimator :math:`\zeta(r,u,v)` (if rrr is given) - sigma_zeta The sqrt of the variance estimate of :math:`\zeta` - (if rrr is given) - DDD The total weight of DDD triangles in each bin - RRR The total weight of RRR triangles in each bin (if rrr is given) - DRR The total weight of DRR triangles in each bin (if drr is given) - RDD The total weight of RDD triangles in each bin (if rdd is given) - ntri The number of triangles contributing to each bin ========== ================================================================ For bin_type = LogSAS, the output file will include the following columns: @@ -886,6 +868,19 @@ def write(self, file_name, *, rrr=None, drr=None, rdd=None, file_type=None, prec d3_nom The nominal center of the bin in d3 phi_nom The nominal center of the bin in phi, the opening angle between d2 and d3 in the counter-clockwise direction + meanphi The mean value :math:`\langle phi\rangle` of triangles that fell + into each bin + ========== ================================================================ + + In addition, all bin types include the following columns: + + ========== ================================================================ + Column Description + ========== ================================================================ + meand1 The mean value :math:`\langle d1\rangle` of triangles that fell + into each bin + meanlogd1 The mean value :math:`\langle \log(d1)\rangle` of triangles that + fell into each bin meand2 The mean value :math:`\langle d2\rangle` of triangles that fell into each bin meanlogd2 The mean value :math:`\langle \log(d2)\rangle` of triangles that @@ -894,9 +889,8 @@ def write(self, file_name, *, rrr=None, drr=None, rdd=None, file_type=None, prec into each bin meanlogd3 The mean value :math:`\langle \log(d3)\rangle` of triangles that fell into each bin - meanphi The mean value :math:`\langle phi\rangle` of triangles that fell - into each bin - zeta The estimator :math:`\zeta(d2,phi,d3)` (if rrr is given) + zeta The estimator :math:`\zeta` (if rrr is given, or zeta was + already computed) sigma_zeta The sqrt of the variance estimate of :math:`\zeta` (if rrr is given) DDD The total weight of DDD triangles in each bin From fc4baab37d9803e2861962a4626b91159fc7d388 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 08:17:06 -0500 Subject: [PATCH 12/18] Switch default bin_type to LogSAS --- tests/configs/ggg.yaml | 1 + tests/configs/ggg_direct.yaml | 1 + tests/configs/ggg_direct_cross12.yaml | 1 + tests/configs/ggg_direct_logsas.yaml | 2 +- tests/configs/ggg_direct_spherical.yaml | 1 + tests/configs/kkk.yaml | 1 + tests/configs/kkk_direct.yaml | 1 + tests/configs/kkk_direct_cross12.yaml | 1 + tests/configs/kkk_direct_logsas.yaml | 2 +- tests/configs/kkk_direct_spherical.yaml | 1 + .../configs/kkk_direct_spherical_logsas.yaml | 2 +- tests/configs/kkk_logsas.yaml | 2 +- tests/configs/nnn.yaml | 1 + tests/configs/nnn_3d.yaml | 1 + tests/configs/nnn_compensated.yaml | 1 + tests/configs/nnn_direct.yaml | 1 + tests/configs/nnn_direct_arc.yaml | 1 + tests/configs/nnn_direct_spherical.yaml | 1 + tests/configs/nnn_list1.yaml | 1 + tests/configs/nnn_list2.json | 1 + tests/configs/nnn_list3.params | 1 + tests/configs/nnn_list4.config | 1 + tests/configs/nnn_logsas.yaml | 2 +- tests/mpi_test3pt.py | 4 +- tests/test_ggg.py | 70 +++--- tests/test_kkk.py | 58 ++--- tests/test_nnn.py | 205 ++++++++++-------- tests/test_patch3pt.py | 68 +++--- tests/test_periodic.py | 12 +- treecorr/corr3base.py | 8 +- 30 files changed, 255 insertions(+), 198 deletions(-) diff --git a/tests/configs/ggg.yaml b/tests/configs/ggg.yaml index 89a60a5e..464a17d4 100644 --- a/tests/configs/ggg.yaml +++ b/tests/configs/ggg.yaml @@ -10,6 +10,7 @@ g2_col: 4 verbose: 1 +bin_type: LogRUV min_sep: 11. max_sep: 15. nbins: 3 diff --git a/tests/configs/ggg_direct.yaml b/tests/configs/ggg_direct.yaml index 61a506ca..8ecae3fb 100644 --- a/tests/configs/ggg_direct.yaml +++ b/tests/configs/ggg_direct.yaml @@ -11,6 +11,7 @@ w_col: w verbose: 1 +bin_type: LogRUV min_sep: 1. bin_size: 0.2 nbins: 10 diff --git a/tests/configs/ggg_direct_cross12.yaml b/tests/configs/ggg_direct_cross12.yaml index f3a3bae0..0bd80576 100644 --- a/tests/configs/ggg_direct_cross12.yaml +++ b/tests/configs/ggg_direct_cross12.yaml @@ -10,6 +10,7 @@ g2_col: 5 verbose: 0 +bin_type: LogRUV min_sep: 1. bin_size: 0.2 nbins: 10 diff --git a/tests/configs/ggg_direct_logsas.yaml b/tests/configs/ggg_direct_logsas.yaml index 16217e51..54cc3018 100644 --- a/tests/configs/ggg_direct_logsas.yaml +++ b/tests/configs/ggg_direct_logsas.yaml @@ -11,12 +11,12 @@ w_col: w verbose: 1 +bin_type: LogSAS min_sep: 1. max_sep: 10. nbins: 10 nphi_bins: 10 sep_units: arcmin bin_slop: 0 -bin_type: LogSAS ggg_file_name: output/ggg_direct_logsas.fits diff --git a/tests/configs/ggg_direct_spherical.yaml b/tests/configs/ggg_direct_spherical.yaml index 55332d90..7d99a170 100644 --- a/tests/configs/ggg_direct_spherical.yaml +++ b/tests/configs/ggg_direct_spherical.yaml @@ -11,6 +11,7 @@ w_col: w verbose: 1 +bin_type: LogRUV min_sep: 1. bin_size: 0.2 nbins: 10 diff --git a/tests/configs/kkk.yaml b/tests/configs/kkk.yaml index 977d8c58..5f5306f3 100644 --- a/tests/configs/kkk.yaml +++ b/tests/configs/kkk.yaml @@ -9,6 +9,7 @@ k_col: 3 verbose: 1 +bin_type: LogRUV min_sep: 11. max_sep: 15. nbins: 3 diff --git a/tests/configs/kkk_direct.yaml b/tests/configs/kkk_direct.yaml index 5077eb78..132b176d 100644 --- a/tests/configs/kkk_direct.yaml +++ b/tests/configs/kkk_direct.yaml @@ -10,6 +10,7 @@ w_col: w verbose: 1 +bin_type: LogRUV min_sep: 1. bin_size: 0.2 nbins: 10 diff --git a/tests/configs/kkk_direct_cross12.yaml b/tests/configs/kkk_direct_cross12.yaml index e0e66d5d..3871d1ec 100644 --- a/tests/configs/kkk_direct_cross12.yaml +++ b/tests/configs/kkk_direct_cross12.yaml @@ -9,6 +9,7 @@ k_col: 4 verbose: 0 +bin_type: LogRUV min_sep: 1. bin_size: 0.2 nbins: 10 diff --git a/tests/configs/kkk_direct_logsas.yaml b/tests/configs/kkk_direct_logsas.yaml index e9adf3da..bc4a161f 100644 --- a/tests/configs/kkk_direct_logsas.yaml +++ b/tests/configs/kkk_direct_logsas.yaml @@ -10,6 +10,7 @@ w_col: w verbose: 1 +bin_type: LogSAS min_sep: 1. max_sep: 10. nbins: 10 @@ -17,6 +18,5 @@ nphi_bins: 10 phi_units: 'radians' sep_units: arcmin bin_slop: 0 -bin_type: LogSAS kkk_file_name: output/kkk_direct_logsas.fits diff --git a/tests/configs/kkk_direct_spherical.yaml b/tests/configs/kkk_direct_spherical.yaml index 770213b2..b1e409e6 100644 --- a/tests/configs/kkk_direct_spherical.yaml +++ b/tests/configs/kkk_direct_spherical.yaml @@ -10,6 +10,7 @@ w_col: w verbose: 1 +bin_type: LogRUV min_sep: 1. bin_size: 0.2 nbins: 10 diff --git a/tests/configs/kkk_direct_spherical_logsas.yaml b/tests/configs/kkk_direct_spherical_logsas.yaml index d0f72bed..efd34c48 100644 --- a/tests/configs/kkk_direct_spherical_logsas.yaml +++ b/tests/configs/kkk_direct_spherical_logsas.yaml @@ -10,6 +10,7 @@ w_col: w verbose: 1 +bin_type: LogSAS min_sep: 5. max_sep: 100. nbins: 3 @@ -17,6 +18,5 @@ sep_units: degrees nphi_bins: 6 phi_units: degrees bin_slop: 0 -bin_type: LogSAS kkk_file_name: output/kkk_direct_sph_logsas.fits diff --git a/tests/configs/kkk_logsas.yaml b/tests/configs/kkk_logsas.yaml index e00b6a70..19916b34 100644 --- a/tests/configs/kkk_logsas.yaml +++ b/tests/configs/kkk_logsas.yaml @@ -9,6 +9,7 @@ k_col: 3 verbose: 1 +bin_type: LogSAS min_sep: 10. max_sep: 13. nbins: 3 @@ -17,6 +18,5 @@ min_phi: 45. max_phi: 90. phi_units: degree nphi_bins: 5 -bin_type: LogSAS kkk_file_name: output/kkk_logsas.out diff --git a/tests/configs/nnn.yaml b/tests/configs/nnn.yaml index f1ec1639..441affcc 100644 --- a/tests/configs/nnn.yaml +++ b/tests/configs/nnn.yaml @@ -9,6 +9,7 @@ y_units: arcmin verbose: 1 +bin_type: LogRUV min_sep: 11. max_sep: 13. nbins: 2 diff --git a/tests/configs/nnn_3d.yaml b/tests/configs/nnn_3d.yaml index 1103baed..7c2e3b1b 100644 --- a/tests/configs/nnn_3d.yaml +++ b/tests/configs/nnn_3d.yaml @@ -10,6 +10,7 @@ dec_units: deg verbose: 1 +bin_type: LogRUV min_sep: 10. max_sep: 20. nbins: 8 diff --git a/tests/configs/nnn_compensated.yaml b/tests/configs/nnn_compensated.yaml index 3fa5dc63..5200fff3 100644 --- a/tests/configs/nnn_compensated.yaml +++ b/tests/configs/nnn_compensated.yaml @@ -9,6 +9,7 @@ y_units: arcmin verbose: 1 +bin_type: LogRUV min_sep: 11. max_sep: 13. nbins: 2 diff --git a/tests/configs/nnn_direct.yaml b/tests/configs/nnn_direct.yaml index 1ead51cd..46d41cda 100644 --- a/tests/configs/nnn_direct.yaml +++ b/tests/configs/nnn_direct.yaml @@ -7,6 +7,7 @@ y_col: 2 verbose: 0 +bin_type: LogRUV min_sep: 1. max_sep: 50. nbins: 20 diff --git a/tests/configs/nnn_direct_arc.yaml b/tests/configs/nnn_direct_arc.yaml index bd5de89f..3b977937 100644 --- a/tests/configs/nnn_direct_arc.yaml +++ b/tests/configs/nnn_direct_arc.yaml @@ -9,6 +9,7 @@ w_col: w verbose: 1 +bin_type: LogRUV min_sep: 1. max_sep: 180. nbins: 10 diff --git a/tests/configs/nnn_direct_spherical.yaml b/tests/configs/nnn_direct_spherical.yaml index 4e3b1ec6..c14079f1 100644 --- a/tests/configs/nnn_direct_spherical.yaml +++ b/tests/configs/nnn_direct_spherical.yaml @@ -9,6 +9,7 @@ w_col: w verbose: 1 +bin_type: LogRUV min_sep: 1. bin_size: 0.2 nbins: 10 diff --git a/tests/configs/nnn_list1.yaml b/tests/configs/nnn_list1.yaml index 3cda6265..846ac07f 100644 --- a/tests/configs/nnn_list1.yaml +++ b/tests/configs/nnn_list1.yaml @@ -7,6 +7,7 @@ y_col: 2 verbose: 1 +bin_type: LogRUV min_sep: 30. max_sep: 50. nbins: 3 diff --git a/tests/configs/nnn_list2.json b/tests/configs/nnn_list2.json index ebe8b255..4b52494d 100644 --- a/tests/configs/nnn_list2.json +++ b/tests/configs/nnn_list2.json @@ -7,6 +7,7 @@ "verbose": 1, +"bin_type": "LogRUV", "min_sep": 30.0, "max_sep": 50.0, "nbins": 3, diff --git a/tests/configs/nnn_list3.params b/tests/configs/nnn_list3.params index 3a5da505..2eeec454 100644 --- a/tests/configs/nnn_list3.params +++ b/tests/configs/nnn_list3.params @@ -7,6 +7,7 @@ y_col = 2 verbose = 1 +bin_type = LogRUV min_sep = 30. max_sep = 50. nbins = 3 diff --git a/tests/configs/nnn_list4.config b/tests/configs/nnn_list4.config index c40d3d50..327e2a90 100644 --- a/tests/configs/nnn_list4.config +++ b/tests/configs/nnn_list4.config @@ -7,6 +7,7 @@ y_col = 2 verbose = 1 +bin_type = LogRUV min_sep = 30. max_sep = 50. nbins = 3 diff --git a/tests/configs/nnn_logsas.yaml b/tests/configs/nnn_logsas.yaml index 737b064d..ecc58c5b 100644 --- a/tests/configs/nnn_logsas.yaml +++ b/tests/configs/nnn_logsas.yaml @@ -9,6 +9,7 @@ y_units: arcmin verbose: 1 +bin_type: LogSAS min_sep: 11. max_sep: 13. nbins: 2 @@ -16,7 +17,6 @@ sep_units: arcmin min_phi: 0.8 max_phi: 2.3 nphi_bins: 15 -bin_type: LogSAS nnn_statistic: simple nnn_file_name: output/nnn_logsas.out diff --git a/tests/mpi_test3pt.py b/tests/mpi_test3pt.py index 1d0d04f3..49dd259c 100644 --- a/tests/mpi_test3pt.py +++ b/tests/mpi_test3pt.py @@ -63,7 +63,7 @@ def do_mpi_corr(comm, Correlation, cross, attr, output=True): config = dict(nbins=3, min_sep=100., max_sep=200., sep_units='arcmin', min_u=0.9, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.1, nvbins=1, bin_slop=0) + min_v=0.0, max_v=0.1, nvbins=1, bin_slop=0, bin_type='LogRUV') # First run on one process t0 = time.time() @@ -135,7 +135,7 @@ def do_mpi_corr2(comm, Correlation, cross, attr, output=True): config = dict(nbins=3, min_sep=100., max_sep=200., sep_units='arcmin', min_u=0.9, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.1, nvbins=1, bin_slop=0) + min_v=0.0, max_v=0.1, nvbins=1, bin_slop=0, bin_type='LogRUV') # First run on one process t0 = time.time() diff --git a/tests/test_ggg.py b/tests/test_ggg.py index 8142b0de..861c537a 100644 --- a/tests/test_ggg.py +++ b/tests/test_ggg.py @@ -40,7 +40,8 @@ def test_direct_logruv(): nrbins = 10 nubins = 5 nvbins = 5 - ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) + ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True, + bin_type='LogRUV') ggg.process(cat, num_threads=2) true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int) @@ -156,7 +157,8 @@ def test_direct_logruv(): np.testing.assert_allclose(data['gam3i'], ggg.gam3i.flatten(), rtol=1.e-3) # Also check the cross calculation. - ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) + ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True, + bin_type='LogRUV') ggg.process(cat, cat, cat, num_threads=2) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) @@ -195,7 +197,8 @@ def test_direct_logruv(): np.testing.assert_allclose(ggg.gam3i, true_gam3.imag, rtol=1.e-5, atol=1.e-8) # Or with 2 argument version, finds each triangle 3 times. - ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) + ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True, + bin_type='LogRUV') ggg.process(cat, cat, ordered=False) np.testing.assert_array_equal(ggg.ntri, 3*true_ntri) np.testing.assert_allclose(ggg.weight, 3*true_weight, rtol=1.e-5, atol=1.e-8) @@ -211,7 +214,7 @@ def test_direct_logruv(): # Repeat with binslop = 0, since the code flow is different from brute=True. # And don't do any top-level recursion so we actually test not going to the leaves. ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - bin_slop=0, max_top=0) + bin_slop=0, max_top=0, bin_type='LogRUV') ggg.process(cat) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) @@ -295,7 +298,8 @@ def test_direct_logruv(): ascii_name = 'output/ggg_ascii.txt' ggg.write(ascii_name, precision=16) - ggg3 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins) + ggg3 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, + bin_type='LogRUV') ggg3.read(ascii_name) np.testing.assert_allclose(ggg3.ntri, ggg.ntri) np.testing.assert_allclose(ggg3.weight, ggg.weight) @@ -323,7 +327,8 @@ def test_direct_logruv(): else: fits_name = 'output/ggg_fits.fits' ggg.write(fits_name) - ggg4 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins) + ggg4 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, + bin_type='LogRUV') ggg4.read(fits_name) np.testing.assert_allclose(ggg4.ntri, ggg.ntri) np.testing.assert_allclose(ggg4.weight, ggg.weight) @@ -348,37 +353,40 @@ def test_direct_logruv(): with assert_raises(TypeError): ggg2 += config - ggg5 = treecorr.GGGCorrelation(min_sep=min_sep/2, bin_size=bin_size, nbins=nrbins) + ggg5 = treecorr.GGGCorrelation(min_sep=min_sep/2, bin_size=bin_size, nbins=nrbins, + bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg5 - ggg6 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size/2, nbins=nrbins) + ggg6 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size/2, nbins=nrbins, + bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg6 - ggg7 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins*2) + ggg7 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins*2, + bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg7 ggg8 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - min_u=0.1) + min_u=0.1, bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg8 ggg0 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - max_u=0.1) + min_u=0.1, bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg0 ggg10 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - nubins=nrbins*2) + nubins=nrbins*2, bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg10 ggg11 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - min_v=0.1) + min_v=0.1, bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg11 ggg12 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - max_v=0.1) + max_v=0.1, bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg12 ggg13 = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - nvbins=nrbins*2) + nvbins=nrbins*2, bin_type='LogRUV') with assert_raises(ValueError): ggg2 += ggg13 @@ -407,7 +415,7 @@ def test_direct_logruv_spherical(): nubins = 5 nvbins = 5 ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - sep_units='deg', brute=True) + sep_units='deg', brute=True, bin_type='LogRUV') ggg.process(cat) r = np.sqrt(x**2 + y**2 + z**2) @@ -528,7 +536,7 @@ def test_direct_logruv_spherical(): # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - sep_units='deg', bin_slop=0, max_top=0) + sep_units='deg', bin_slop=0, max_top=0, bin_type='LogRUV') ggg.process(cat) np.testing.assert_array_equal(ggg.ntri, true_ntri) np.testing.assert_allclose(ggg.weight, true_weight, rtol=1.e-5, atol=1.e-8) @@ -583,7 +591,7 @@ def test_direct_logruv_cross(): ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True) + brute=True, bin_type='LogRUV') ggg.process(cat1, cat2, cat3, num_threads=2) # Figure out the correct answer for each permutation @@ -835,7 +843,7 @@ def test_direct_logruv_cross(): ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') ggg.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_123) @@ -857,7 +865,7 @@ def test_direct_logruv_cross(): ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') ggg.process(cat1, cat2, cat3, ordered=True) #print('max_top = 0: ggg.ntri = ',ggg.ntri) @@ -920,7 +928,7 @@ def test_direct_logruv_cross12(): ggg = treecorr.GGGCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True) + brute=True, bin_type='LogRUV') ggg.process(cat1, cat2, num_threads=2) # Figure out the correct answer for each permutation @@ -1127,7 +1135,7 @@ def test_direct_logruv_cross12(): ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') ggg.process(cat1, cat2, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) @@ -1149,7 +1157,7 @@ def test_direct_logruv_cross12(): ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') ggg.process(cat1, cat2, ordered=True) np.testing.assert_array_equal(ggg.ntri, true_ntri_122) @@ -1250,14 +1258,14 @@ def test_ggg_logruv(): ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') ggg.process(cat) # Using bin_size=None rather than omiting bin_size is equivalent. ggg2 = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_size=None, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') ggg2.process(cat, num_threads=1) ggg.process(cat, num_threads=1) assert ggg2 == ggg @@ -1487,7 +1495,7 @@ def test_ggg_logruv(): ggg2 = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') ggg2.read(out_file_name1) np.testing.assert_almost_equal(ggg2.logr, ggg.logr) np.testing.assert_almost_equal(ggg2.u, ggg.u) @@ -1528,7 +1536,7 @@ def test_map3_logruv(): r0 = 10. L = 20.*r0 out_name = os.path.join('data','ggg_map.out') - ggg = treecorr.GGGCorrelation(bin_size=0.1, min_sep=1, nbins=47, verbose=2) + ggg = treecorr.GGGCorrelation(bin_size=0.1, min_sep=1, nbins=47, verbose=2, bin_type='LogRUV') # This takes a few hours to run, so be careful about enabling this. if not os.path.isfile(out_name): @@ -1898,7 +1906,7 @@ def test_grid_logruv(): cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2) ggg = treecorr.GGGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, nubins=nubins, nvbins=nvbins, - verbose=1) + verbose=1, bin_type='LogRUV') ggg.process(cat) # log() != , but it should be close: @@ -2031,7 +2039,8 @@ def test_vargam_logruv(): cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin') ggg = treecorr.GGGCorrelation(bin_size=0.5, min_sep=30., max_sep=100., - sep_units='arcmin', nubins=3, nvbins=3, verbose=1) + sep_units='arcmin', nubins=3, nvbins=3, verbose=1, + bin_type='LogRUV') ggg.process(cat) all_gggs.append(ggg) @@ -2175,7 +2184,8 @@ def test_vargam_logruv(): cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin') ggg = treecorr.GGGCorrelation(bin_size=0.5, min_sep=30., max_sep=100., - sep_units='arcmin', nubins=3, nvbins=3, verbose=1) + sep_units='arcmin', nubins=3, nvbins=3, verbose=1, + bin_type='LogRUV') ggg.process(cat) print('single run:') print('max relerr for gam0r = ',np.max(np.abs((ggg.vargam0 - var_gam0r)/var_gam0r))) diff --git a/tests/test_kkk.py b/tests/test_kkk.py index d42ab19c..85347e7d 100644 --- a/tests/test_kkk.py +++ b/tests/test_kkk.py @@ -40,7 +40,8 @@ def test_direct_logruv(): nrbins = 10 nubins = 5 nvbins = 5 - kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True) + kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True, + bin_type='LogRUV') kkk.process(cat, num_threads=2) true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int) @@ -134,7 +135,7 @@ def test_direct_logruv(): # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - bin_slop=0, max_top=0) + bin_slop=0, max_top=0, bin_type='LogRUV') kkk.process(cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) @@ -183,7 +184,8 @@ def test_direct_logruv(): ascii_name = 'output/kkk_ascii.txt' kkk.write(ascii_name, precision=16) - kkk3 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins) + kkk3 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, + bin_type='LogRUV') kkk3.read(ascii_name) np.testing.assert_allclose(kkk3.ntri, kkk.ntri) np.testing.assert_allclose(kkk3.weight, kkk.weight) @@ -204,7 +206,8 @@ def test_direct_logruv(): else: fits_name = 'output/kkk_fits.fits' kkk.write(fits_name) - kkk4 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins) + kkk4 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, + bin_type='LogRUV') kkk4.read(fits_name) np.testing.assert_allclose(kkk4.ntri, kkk.ntri) np.testing.assert_allclose(kkk4.weight, kkk.weight) @@ -220,37 +223,40 @@ def test_direct_logruv(): with assert_raises(TypeError): kkk2 += config - kkk5 = treecorr.KKKCorrelation(min_sep=min_sep/2, bin_size=bin_size, nbins=nrbins) + kkk5 = treecorr.KKKCorrelation(min_sep=min_sep/2, bin_size=bin_size, nbins=nrbins, + bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk5 - kkk6 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size/2, nbins=nrbins) + kkk6 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size/2, nbins=nrbins, + bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk6 - kkk7 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins*2) + kkk7 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins*2, + bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk7 kkk8 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - min_u=0.1) + min_u=0.1, bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk8 kkk0 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - max_u=0.1) + max_u=0.1, bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk0 kkk10 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - nubins=nrbins*2) + nubins=nrbins*2, bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk10 kkk11 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - min_v=0.1) + min_v=0.1, bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk11 kkk12 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - max_v=0.1) + max_v=0.1, bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk12 kkk13 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - nvbins=nrbins*2) + nvbins=nrbins*2, bin_type='LogRUV') with assert_raises(ValueError): kkk2 += kkk13 @@ -279,7 +285,7 @@ def test_direct_logruv_spherical(): nubins = 5 nvbins = 5 kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - sep_units='deg', brute=True) + sep_units='deg', brute=True, bin_type='LogRUV') kkk.process(cat) r = np.sqrt(x**2 + y**2 + z**2) @@ -355,7 +361,7 @@ def test_direct_logruv_spherical(): # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - sep_units='deg', bin_slop=0, max_top=0) + sep_units='deg', bin_slop=0, max_top=0, bin_type='LogRUV') kkk.process(cat) np.testing.assert_array_equal(kkk.ntri, true_ntri) np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8) @@ -399,7 +405,7 @@ def test_direct_logruv_cross(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True) + brute=True, bin_type='LogRUV') kkk.process(cat1, cat2, cat3, num_threads=2) # Figure out the correct answer for each permutation @@ -549,7 +555,7 @@ def test_direct_logruv_cross(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) @@ -565,7 +571,7 @@ def test_direct_logruv_cross(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) @@ -614,7 +620,7 @@ def test_direct_logruv_cross12(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True) + brute=True, bin_type='LogRUV') kkk.process(cat1, cat2, num_threads=2) # Figure out the correct answer for each permutation @@ -799,7 +805,7 @@ def test_direct_logruv_cross_3d(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True) + brute=True, bin_type='LogRUV') kkk.process(cat1, cat2, cat3, num_threads=2) # Figure out the correct answer for each permutation @@ -929,7 +935,7 @@ def test_direct_logruv_cross_3d(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) @@ -945,7 +951,7 @@ def test_direct_logruv_cross_3d(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') kkk.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(kkk.ntri, true_ntri_123) @@ -984,7 +990,7 @@ def test_constant(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') kkk.process(cat) print('kkk.zeta = ',kkk.zeta.flatten()) np.testing.assert_allclose(kkk.zeta, A**3, rtol=1.e-5) @@ -1070,7 +1076,7 @@ def test_kkk_logruv(): kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') kkk.process(cat, num_threads=1) # Using bin_size=None rather than omiting bin_size is equivalent. @@ -1078,7 +1084,7 @@ def test_kkk_logruv(): kkk2 = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_size=None, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') kkk2.process(cat, num_threads=1) assert kkk2 == kkk @@ -1171,7 +1177,7 @@ def test_kkk_logruv(): kkk2 = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') kkk2.read(out_file_name) np.testing.assert_almost_equal(kkk2.logr, kkk.logr) np.testing.assert_almost_equal(kkk2.u, kkk.u) diff --git a/tests/test_nnn.py b/tests/test_nnn.py index f9b37f7a..a3dd805d 100644 --- a/tests/test_nnn.py +++ b/tests/test_nnn.py @@ -64,13 +64,14 @@ def check_defaultuv(nnn): assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.nbins == 20 + assert nnn.bin_type == 'LogRUV' check_defaultuv(nnn) check_arrays(nnn) # Specify min, max, n for u,v too. nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, min_u=0.2, max_u=0.9, nubins=12, - min_v=0., max_v=0.2, nvbins=2) + min_v=0., max_v=0.2, nvbins=2, bin_type='LogRUV') assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.nbins == 20 @@ -83,7 +84,7 @@ def check_defaultuv(nnn): check_arrays(nnn) # Omit min_sep - nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1) + nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1, bin_type='LogRUV') assert nnn.bin_size == 0.1 assert nnn.max_sep == 20. assert nnn.nbins == 20 @@ -93,7 +94,7 @@ def check_defaultuv(nnn): # Specify max, n, bs for u,v too. nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1, max_u=0.9, nubins=3, ubin_size=0.05, - max_v=0.4, nvbins=4, vbin_size=0.05) + max_v=0.4, nvbins=4, vbin_size=0.05, bin_type='LogRUV') assert nnn.bin_size == 0.1 assert nnn.max_sep == 20. assert nnn.nbins == 20 @@ -108,7 +109,7 @@ def check_defaultuv(nnn): check_arrays(nnn) # Omit max_sep - nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1) + nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1, bin_type='LogRUV') assert nnn.bin_size == 0.1 assert nnn.min_sep == 5. assert nnn.nbins == 20 @@ -117,7 +118,7 @@ def check_defaultuv(nnn): # Specify min, n, bs for u,v too. nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1, min_u=0.7, nubins=4, ubin_size=0.05, - min_v=0.2, nvbins=4, vbin_size=0.05) + min_v=0.2, nvbins=4, vbin_size=0.05, bin_type='LogRUV') assert nnn.min_sep == 5. assert nnn.bin_size == 0.1 assert nnn.nbins == 20 @@ -131,7 +132,7 @@ def check_defaultuv(nnn): check_arrays(nnn) # Omit nbins - nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1) + nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_type='LogRUV') assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. @@ -140,7 +141,7 @@ def check_defaultuv(nnn): # Specify min, max, bs for u,v too. nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, min_u=0.2, max_u=0.9, ubin_size=0.03, - min_v=0.1, max_v=0.3, vbin_size=0.07) + min_v=0.1, max_v=0.3, vbin_size=0.07, bin_type='LogRUV') assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.bin_size <= 0.1 @@ -157,7 +158,7 @@ def check_defaultuv(nnn): # If only one of min/max v are set, respect that nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, min_u=0.2, ubin_size=0.03, - min_v=0.2, vbin_size=0.07) + min_v=0.2, vbin_size=0.07, bin_type='LogRUV') assert nnn.min_u == 0.2 assert nnn.max_u == 1. assert nnn.nubins == 27 @@ -169,7 +170,7 @@ def check_defaultuv(nnn): check_arrays(nnn) nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, max_u=0.2, ubin_size=0.03, - max_v=0.2, vbin_size=0.07) + max_v=0.2, vbin_size=0.07, bin_type='LogRUV') assert nnn.min_u == 0. assert nnn.max_u == 0.2 assert nnn.nubins == 7 @@ -183,7 +184,7 @@ def check_defaultuv(nnn): # If only vbin_size is set for v, automatically figure out others. # (And if necessary adjust the bin_size down a bit.) nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, - ubin_size=0.3, vbin_size=0.3) + ubin_size=0.3, vbin_size=0.3, bin_type='LogRUV') assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. @@ -199,7 +200,7 @@ def check_defaultuv(nnn): # If only nvbins is set for v, automatically figure out others. nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, - nubins=5, nvbins=5) + nubins=5, nvbins=5, bin_type='LogRUV') assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. @@ -216,7 +217,7 @@ def check_defaultuv(nnn): # If both nvbins and vbin_size are set, set min/max automatically nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, ubin_size=0.1, nubins=5, - vbin_size=0.1, nvbins=5) + vbin_size=0.1, nvbins=5, bin_type='LogRUV') assert nnn.bin_size <= 0.1 assert nnn.min_sep == 5. assert nnn.max_sep == 20. @@ -253,35 +254,36 @@ def check_defaultuv(nnn): assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20, bin_type='Invalid') assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6) + min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6, bin_type='LogRUV') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_u=0.9, max_u=0.3) + min_u=0.9, max_u=0.3, bin_type='LogRUV') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_u=-0.1, max_u=0.3) + min_u=-0.1, max_u=0.3, bin_type='LogRUV') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_u=0.1, max_u=1.3) + min_u=0.1, max_u=1.3, bin_type='LogRUV') assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9) + min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9, bin_type='LogRUV') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_v=0.9, max_v=0.3) + min_v=0.9, max_v=0.3, bin_type='LogRUV') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_v=-0.1, max_v=0.3) + min_v=-0.1, max_v=0.3, bin_type='LogRUV') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_v=0.1, max_v=1.3) + min_v=0.1, max_v=1.3, bin_type='LogRUV') assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20, - split_method='invalid') + split_method='invalid', bin_type='LogRUV') assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - phi_bin_size=0.3) + phi_bin_size=0.3, bin_type='LogRUV') assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - nphi_bins=3) + nphi_bins=3, bin_type='LogRUV') assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - min_phi=0.3) + min_phi=0.3, bin_type='LogRUV') assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, - max_phi=0.3) + max_phi=0.3, bin_type='LogRUV') # Check the use of sep_units # radians - nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians') + nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians', + bin_type='LogRUV') np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5.) @@ -293,7 +295,8 @@ def check_defaultuv(nnn): check_arrays(nnn) # arcsec - nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec') + nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec', + bin_type='LogRUV') np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600) @@ -307,7 +310,8 @@ def check_defaultuv(nnn): check_defaultuv(nnn) # arcmin - nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin') + nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin', + bin_type='LogRUV') np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60) @@ -320,7 +324,8 @@ def check_defaultuv(nnn): check_defaultuv(nnn) # degrees - nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees') + nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees', + bin_type='LogRUV') np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180) @@ -333,7 +338,8 @@ def check_defaultuv(nnn): check_defaultuv(nnn) # hours - nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours') + nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours', + bin_type='LogRUV') np.testing.assert_almost_equal(nnn.min_sep, 5.) np.testing.assert_almost_equal(nnn.max_sep, 20.) np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12) @@ -349,7 +355,8 @@ def check_defaultuv(nnn): # Start with default behavior nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, min_u=0., max_u=0.9, ubin_size=0.03, - min_v=0., max_v=0.21, vbin_size=0.07) + min_v=0., max_v=0.21, vbin_size=0.07, + bin_type='LogRUV') assert nnn.bin_slop == 1.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) @@ -361,7 +368,8 @@ def check_defaultuv(nnn): # Explicitly set bin_slop=1.0 does the same thing. nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0, min_u=0., max_u=0.9, ubin_size=0.03, - min_v=0., max_v=0.21, vbin_size=0.07) + min_v=0., max_v=0.21, vbin_size=0.07, + bin_type='LogRUV') assert nnn.bin_slop == 1.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) @@ -373,7 +381,8 @@ def check_defaultuv(nnn): # Use a smaller bin_slop nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2, min_u=0., max_u=0.9, ubin_size=0.03, - min_v=0., max_v=0.21, vbin_size=0.07) + min_v=0., max_v=0.21, vbin_size=0.07, + bin_type='LogRUV') assert nnn.bin_slop == 0.2 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) @@ -385,7 +394,8 @@ def check_defaultuv(nnn): # Use bin_slop == 0 nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0, min_u=0., max_u=0.9, ubin_size=0.03, - min_v=0., max_v=0.21, vbin_size=0.07) + min_v=0., max_v=0.21, vbin_size=0.07, + bin_type='LogRUV') assert nnn.bin_slop == 0.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) @@ -397,7 +407,8 @@ def check_defaultuv(nnn): # Bigger bin_slop nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0, min_u=0., max_u=0.9, ubin_size=0.03, - min_v=0., max_v=0.21, vbin_size=0.07, verbose=0) + min_v=0., max_v=0.21, vbin_size=0.07, verbose=0, + bin_type='LogRUV') assert nnn.bin_slop == 2.0 assert nnn.bin_size == 0.1 assert np.isclose(nnn.ubin_size, 0.03) @@ -409,7 +420,8 @@ def check_defaultuv(nnn): # With bin_size > 0.1, explicit bin_slop=1.0 is accepted. nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0, min_u=0., max_u=0.9, ubin_size=0.03, - min_v=0., max_v=0.21, vbin_size=0.07, verbose=0) + min_v=0., max_v=0.21, vbin_size=0.07, verbose=0, + bin_type='LogRUV') assert nnn.bin_slop == 1.0 assert nnn.bin_size == 0.4 assert np.isclose(nnn.ubin_size, 0.03) @@ -421,7 +433,8 @@ def check_defaultuv(nnn): # But implicit bin_slop is reduced so that b = 0.1 nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, min_u=0., max_u=0.9, ubin_size=0.03, - min_v=0., max_v=0.21, vbin_size=0.07) + min_v=0., max_v=0.21, vbin_size=0.07, + bin_type='LogRUV') assert nnn.bin_size == 0.4 assert np.isclose(nnn.ubin_size, 0.03) assert np.isclose(nnn.vbin_size, 0.07) @@ -433,7 +446,8 @@ def check_defaultuv(nnn): # Separately for each of the three parameters nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.05, min_u=0., max_u=0.9, ubin_size=0.3, - min_v=0., max_v=0.17, vbin_size=0.17) + min_v=0., max_v=0.17, vbin_size=0.17, + bin_type='LogRUV') assert nnn.bin_size == 0.05 assert np.isclose(nnn.ubin_size, 0.3) assert np.isclose(nnn.vbin_size, 0.17) @@ -473,10 +487,12 @@ def check_default_phi(nnn): # Check the different ways to set up the binning: # Omit bin_size - nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, bin_type='LogSAS') + # Default is LogSAS + nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20) assert nnn.min_sep == 5. assert nnn.max_sep == 20. assert nnn.nbins == 20 + assert nnn.bin_type == 'LogSAS' check_default_phi(nnn) check_arrays(nnn) @@ -843,7 +859,7 @@ def test_direct_logruv_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=1) + brute=True, verbose=1, bin_type='LogRUV') ddd.process(cat) log_min_sep = np.log(min_sep) @@ -920,7 +936,7 @@ def test_direct_logruv_auto(): rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=0, rng=rng) + brute=True, verbose=0, rng=rng, bin_type='LogRUV') rrr.process(rcat) zeta, varzeta = ddd.calculateZeta(rrr=rrr) @@ -958,11 +974,11 @@ def test_direct_logruv_auto(): drr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=0) + brute=True, verbose=0, bin_type='LogRUV') rdd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=0) + brute=True, verbose=0, bin_type='LogRUV') drr.process(cat, rcat, ordered=False) rdd.process(rcat, cat, ordered=False) zeta, varzeta = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) @@ -990,7 +1006,7 @@ def test_direct_logruv_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -998,7 +1014,7 @@ def test_direct_logruv_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -1006,7 +1022,7 @@ def test_direct_logruv_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') ddd.process(cat,cat,cat, num_threads=2) np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -1090,7 +1106,7 @@ def test_direct_logruv_auto(): ddd.write(ascii_name, precision=16) ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') ddd3.read(ascii_name) np.testing.assert_allclose(ddd3.ntri, ddd.ntri) np.testing.assert_allclose(ddd3.weight, ddd.weight) @@ -1107,47 +1123,47 @@ def test_direct_logruv_auto(): ddd2 += config ddd4 = treecorr.NNNCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd4 ddd5 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd5 ddd6 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd6 ddd7 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u-0.1, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd7 ddd8 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u+0.1, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd8 ddd9 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins*2, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd9 ddd10 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v-0.1, max_v=max_v, nvbins=nvbins) + min_v=min_v-0.1, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd10 ddd11 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v+0.1, nvbins=nvbins) + min_v=min_v, max_v=max_v+0.1, nvbins=nvbins, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd11 ddd12 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins*2) + min_v=min_v, max_v=max_v, nvbins=nvbins*2, bin_type='LogRUV') with assert_raises(ValueError): ddd2 += ddd12 @@ -1157,7 +1173,7 @@ def test_direct_logruv_auto(): ddd13 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - logger=cl.logger) + logger=cl.logger, bin_type='LogRUV') ddd13.process_auto(cat2) ddd13 += ddd2 assert "Detected a change in catalog coordinate systems" in cl.output @@ -1166,7 +1182,7 @@ def test_direct_logruv_auto(): ddd14 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - logger=cl.logger) + logger=cl.logger, bin_type='LogRUV') ddd14.process_auto(cat2, metric='Arc') ddd14 += ddd2 assert "Detected a change in metric" in cl.output @@ -1180,7 +1196,7 @@ def test_direct_logruv_auto(): ddd.write(fits_name) ddd15 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, bin_type='LogRUV') ddd15.read(fits_name) np.testing.assert_allclose(ddd15.ntri, ddd.ntri) np.testing.assert_allclose(ddd15.weight, ddd.weight) @@ -1227,7 +1243,7 @@ def test_direct_logruv_cross(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=1) + brute=True, verbose=1, bin_type='LogRUV') t0 = time.time() ddd.process(cat1, cat2, cat3) t1 = time.time() @@ -1339,7 +1355,7 @@ def test_direct_logruv_cross(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') t0 = time.time() ddd.process(cat1, cat2, cat3) @@ -1382,7 +1398,7 @@ def test_direct_logruv_cross(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') t0 = time.time() ddd.process(cat1, cat2, cat3) t1 = time.time() @@ -1424,7 +1440,7 @@ def test_direct_logruv_cross12(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=1) + brute=True, verbose=1, bin_type='LogRUV') t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() @@ -1517,7 +1533,7 @@ def test_direct_logruv_cross12(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() @@ -1544,7 +1560,7 @@ def test_direct_logruv_cross12(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() @@ -1558,7 +1574,7 @@ def test_direct_logruv_cross12(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') t0 = time.time() ddd.process(cat1, cat2) t1 = time.time() @@ -1596,7 +1612,7 @@ def test_direct_logruv_spherical(): nubins = 5 nvbins = 5 ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - sep_units='deg', brute=True) + sep_units='deg', brute=True, bin_type='LogRUV') ddd.process(cat, num_threads=2) r = np.sqrt(x**2 + y**2 + z**2) @@ -1665,7 +1681,7 @@ def test_direct_logruv_spherical(): # Repeat with binslop = 0 # And don't do any top-level recursion so we actually test not going to the leaves. ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, - sep_units='deg', bin_slop=0, max_top=0) + sep_units='deg', bin_slop=0, max_top=0, bin_type='LogRUV') ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8) @@ -1701,7 +1717,7 @@ def test_direct_logruv_arc(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, nubins=nubins, ubin_size=ubin_size, nvbins=nvbins, vbin_size=vbin_size, - sep_units='deg', brute=True) + sep_units='deg', brute=True, bin_type='LogRUV') ddd.process(cat, metric='Arc') r = np.sqrt(x**2 + y**2 + z**2) @@ -1772,7 +1788,7 @@ def test_direct_logruv_arc(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins, nubins=nubins, ubin_size=ubin_size, nvbins=nvbins, vbin_size=vbin_size, - sep_units='deg', bin_slop=0, max_top=0) + sep_units='deg', bin_slop=0, max_top=0, bin_type='LogRUV') ddd.process(cat, metric='Arc') np.testing.assert_array_equal(ddd.ntri, true_ntri) np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8) @@ -1811,7 +1827,7 @@ def test_direct_logruv_partial(): ddda = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True) + brute=True, bin_type='LogRUV') ddda.process(cat1a, cat2a, cat3a) log_min_sep = np.log(min_sep) @@ -1902,7 +1918,7 @@ def test_direct_logruv_partial(): dddb = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True) + brute=True, bin_type='LogRUV') dddb.process(cat1b, cat2b, cat3b) np.testing.assert_array_equal(dddb.ntri, true_ntri_123) @@ -1940,7 +1956,7 @@ def test_direct_logruv_3d_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=1) + brute=True, verbose=1, bin_type='LogRUV') ddd.process(cat) log_min_sep = np.log(min_sep) @@ -2004,7 +2020,7 @@ def test_direct_logruv_3d_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -2012,7 +2028,7 @@ def test_direct_logruv_3d_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') ddd.process(cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -2020,7 +2036,7 @@ def test_direct_logruv_3d_auto(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') ddd.process(cat,cat,cat) np.testing.assert_array_equal(ddd.ntri, true_ntri) @@ -2080,7 +2096,7 @@ def test_direct_logruv_3d_cross(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - brute=True, verbose=1) + brute=True, verbose=1, bin_type='LogRUV') ddd.process(cat1, cat2, cat3) log_min_sep = np.log(min_sep) @@ -2160,7 +2176,7 @@ def test_direct_logruv_3d_cross(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') ddd.process(cat1, cat2, cat3) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) ddd.process(cat1, cat2, cat3, ordered=False) @@ -2170,7 +2186,7 @@ def test_direct_logruv_3d_cross(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1, max_top=0) + bin_slop=0, verbose=1, max_top=0, bin_type='LogRUV') ddd.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) ddd.process(cat1, cat2, cat3, ordered=False) @@ -2184,7 +2200,7 @@ def test_direct_logruv_3d_cross(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, verbose=1) + bin_slop=0, verbose=1, bin_type='LogRUV') ddd.process(cat1, cat2, cat3, ordered=True) np.testing.assert_array_equal(ddd.ntri, true_ntri_123) @@ -2247,14 +2263,14 @@ def test_nnn_logruv(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') ddd.process(cat) # Using bin_size=None rather than omitting bin_size is equivalent. ddd2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_size=None, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') ddd2.process(cat, num_threads=1) ddd.process(cat, num_threads=1) assert ddd2 == ddd @@ -2289,7 +2305,7 @@ def test_nnn_logruv(): rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') rrr.process(rand) d1 = ddd.meand1 @@ -2373,7 +2389,7 @@ def test_nnn_logruv(): ddd2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') ddd2.read(out_file_name1) np.testing.assert_almost_equal(ddd2.logr, ddd.logr) np.testing.assert_almost_equal(ddd2.u, ddd.u) @@ -2444,7 +2460,7 @@ def test_nnn_logruv(): ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, - sep_units='arcmin', verbose=1) + sep_units='arcmin', verbose=1, bin_type='LogRUV') ddd3.read(out_file_name3) np.testing.assert_almost_equal(ddd3.logr, ddd.logr) np.testing.assert_almost_equal(ddd3.u, ddd.u) @@ -2480,7 +2496,8 @@ def test_nnn_logruv(): ddd.calculateZeta(rrr=rrr, drr=rrr) rrr2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, - nubins=nubins, nvbins=nvbins, sep_units='arcmin') + nubins=nubins, nvbins=nvbins, sep_units='arcmin', + bin_type='LogRUV') # Error if any of them haven't been run yet. with assert_raises(ValueError): ddd.calculateZeta(rrr=rrr2, drr=rrr, rdd=rrr) @@ -2665,7 +2682,7 @@ def test_3d_logruv(): cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='deg', dec_units='deg') ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, - nubins=nubins, nvbins=nvbins, verbose=1) + nubins=nubins, nvbins=nvbins, verbose=1, bin_type='LogRUV') ddd.process(cat) rx = (rng.random_sample(nrand)-0.5) * L + xcen @@ -2678,7 +2695,7 @@ def test_3d_logruv(): rand = treecorr.Catalog(ra=rra, dec=rdec, r=rr, ra_units='deg', dec_units='deg') rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, - nubins=nubins, nvbins=nvbins, verbose=1) + nubins=nubins, nvbins=nvbins, verbose=1, bin_type='LogRUV') rrr.process(rand) d1 = ddd.meand1 @@ -2744,13 +2761,15 @@ def test_list_logruv(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, - nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1) + nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1, + bin_type='LogRUV') ddd.process(data_cats) # Now do the same thing with one big catalog dddx = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, - nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1) + nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1, + bin_type='LogRUV') data_catx = treecorr.Catalog(x=x.reshape( (ngal*ncats,) ), y=y.reshape( (ngal*ncats,) )) dddx.process(data_catx) # Only test to rtol=0.1, since there are now differences between the auto and cross related @@ -2760,12 +2779,14 @@ def test_list_logruv(): rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, - nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1) + nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1, + bin_type='LogRUV') rrr.process(rand_cats) rrrx = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, - nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1) + nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1, + bin_type='LogRUV') rand_catx = treecorr.Catalog(x=rx.reshape( (nrand*ncats,) ), y=ry.reshape( (nrand*ncats,) )) rrrx.process(rand_catx) np.testing.assert_allclose(rrr.ntri, rrrx.ntri, rtol=0.1) diff --git a/tests/test_patch3pt.py b/tests/test_patch3pt.py index 68273d38..4abbd9b5 100644 --- a/tests/test_patch3pt.py +++ b/tests/test_patch3pt.py @@ -124,8 +124,8 @@ def test_kkk_jk(): print(run,': ',np.mean(k),np.std(k)) cat = treecorr.Catalog(x=x, y=y, k=k) kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., - min_u=0.9, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.1, nvbins=1) + min_u=0.9, max_u=1.0, nubins=1, + min_v=0.0, max_v=0.1, nvbins=1, bin_type='LogRUV') kkk.process(cat) print(kkk.ntri.ravel().tolist()) print(kkk.zeta.ravel().tolist()) @@ -147,7 +147,7 @@ def test_kkk_jk(): cat = treecorr.Catalog(x=x, y=y, k=k) kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., min_u=0.9, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.1, nvbins=1, rng=rng) + min_v=0.0, max_v=0.1, nvbins=1, rng=rng, bin_type='LogRUV') kkk.process(cat) print(kkk.ntri.ravel()) print(kkk.zeta.ravel()) @@ -251,7 +251,7 @@ def test_kkk_jk(): kkkp.write(file_name, write_patch_results=True) kkk3 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., min_u=0.9, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.1, nvbins=1, rng=rng) + min_v=0.0, max_v=0.1, nvbins=1, rng=rng, bin_type='LogRUV') kkk3.read(file_name) cov3 = kkk3.estimate_cov('jackknife') np.testing.assert_allclose(cov3, cov1) @@ -261,7 +261,7 @@ def test_kkk_jk(): kkkp.write(file_name, write_patch_results=True) kkk4 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., min_u=0.9, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.1, nvbins=1, rng=rng) + min_v=0.0, max_v=0.1, nvbins=1, rng=rng, bin_type='LogRUV') kkk4.read(file_name) cov4 = kkk4.estimate_cov('jackknife') np.testing.assert_allclose(cov4, cov1) @@ -279,7 +279,7 @@ def test_kkk_jk(): kkkp.write(file_name, write_patch_results=True) kkk5 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., min_u=0.9, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.1, nvbins=1, rng=rng) + min_v=0.0, max_v=0.1, nvbins=1, rng=rng, bin_type='LogRUV') kkk5.read(file_name) cov5 = kkk5.estimate_cov('jackknife') np.testing.assert_allclose(cov5, cov1) @@ -580,7 +580,7 @@ def test_ggg_jk(): cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2) ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40., min_u=0.6, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.6, nvbins=1) + min_v=0.0, max_v=0.6, nvbins=1, bin_type='LogRUV') ggg.process(cat) print(ggg.ntri.ravel()) print(f(ggg)) @@ -601,7 +601,7 @@ def test_ggg_jk(): cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2) ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40., min_u=0.6, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.6, nvbins=1, rng=rng) + min_v=0.0, max_v=0.6, nvbins=1, rng=rng, bin_type='LogRUV') ggg.process(cat) print(ggg.ntri.ravel()) print(ggg.gam0.ravel()) @@ -687,7 +687,7 @@ def test_ggg_jk(): gggp.write(file_name, write_patch_results=True) ggg3 = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40., min_u=0.6, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.6, nvbins=1) + min_v=0.0, max_v=0.6, nvbins=1, bin_type='LogRUV') ggg3.read(file_name) cov3 = ggg3.estimate_cov('jackknife', func=f) print('cov3 = ',cov3) @@ -698,7 +698,7 @@ def test_ggg_jk(): gggp.write(file_name, write_patch_results=True) ggg4 = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40., min_u=0.6, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.6, nvbins=1) + min_v=0.0, max_v=0.6, nvbins=1, bin_type='LogRUV') ggg4.read(file_name) cov4 = ggg4.estimate_cov('jackknife', func=f) np.testing.assert_allclose(cov4, cov1) @@ -716,7 +716,7 @@ def test_ggg_jk(): gggp.write(file_name, write_patch_results=True) ggg5 = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40., min_u=0.6, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.6, nvbins=1) + min_v=0.0, max_v=0.6, nvbins=1, bin_type='LogRUV') ggg5.read(file_name) cov5 = ggg5.estimate_cov('jackknife', func=f) np.testing.assert_allclose(cov5, cov1) @@ -987,14 +987,14 @@ def test_nnn_jk(): cat = treecorr.Catalog(x=x[select], y=y[select]) ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1) + min_v=0.0, max_v=0.2, nvbins=1, bin_type='LogRUV') rx = rng.uniform(0,1000, rand_factor*nsource) ry = rng.uniform(0,1000, rand_factor*nsource) rand_cat = treecorr.Catalog(x=rx, y=ry) rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1) + min_v=0.0, max_v=0.2, nvbins=1, bin_type='LogRUV') rrr.process(rand_cat) rdd = ddd.copy() drr = ddd.copy() @@ -1033,7 +1033,7 @@ def test_nnn_jk(): rand_cat = treecorr.Catalog(x=rx, y=ry) rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1) + min_v=0.0, max_v=0.2, nvbins=1, bin_type='LogRUV') t0 = time.time() rrr.process(rand_cat) t1 = time.time() @@ -1051,7 +1051,7 @@ def test_nnn_jk(): cat = treecorr.Catalog(x=x[select], y=y[select]) ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1, rng=rng) + min_v=0.0, max_v=0.2, nvbins=1, rng=rng, bin_type='LogRUV') rdd = ddd.copy() drr = ddd.copy() ddd.process(cat) @@ -1081,7 +1081,7 @@ def test_nnn_jk(): # Do the same thing with patches on D, but not yet on R. dddp = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1, rng=rng) + min_v=0.0, max_v=0.2, nvbins=1, rng=rng, bin_type='LogRUV') rddp = dddp.copy() drrp = dddp.copy() catp = treecorr.Catalog(x=x[select], y=y[select], patch_centers=patch_centers) @@ -1305,7 +1305,7 @@ def test_nnn_jk(): rddp.write(rdd_file_name, write_patch_results=True) ddd3 = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1) + min_v=0.0, max_v=0.2, nvbins=1, bin_type='LogRUV') rrr3 = ddd3.copy() drr3 = ddd3.copy() rdd3 = ddd3.copy() @@ -1328,7 +1328,7 @@ def test_nnn_jk(): rddp.write(rdd_file_name, write_patch_results=True) ddd4 = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1) + min_v=0.0, max_v=0.2, nvbins=1, bin_type='LogRUV') rrr4 = ddd4.copy() drr4 = ddd4.copy() rdd4 = ddd4.copy() @@ -1359,7 +1359,7 @@ def test_nnn_jk(): rddp.write(rdd_file_name, write_patch_results=True) ddd5 = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2, min_u=0.8, max_u=1.0, nubins=1, - min_v=0.0, max_v=0.2, nvbins=1) + min_v=0.0, max_v=0.2, nvbins=1, bin_type='LogRUV') rrr5 = ddd5.copy() drr5 = ddd5.copy() rdd5 = ddd5.copy() @@ -1422,13 +1422,13 @@ def test_brute_jk(): # Start with KKK, since relatively simple. kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True, min_u=0., max_u=1.0, nubins=1, - min_v=0., max_v=1.0, nvbins=1) + min_v=0., max_v=1.0, nvbins=1, bin_type='LogRUV') kkk1.process(cat_nopatch) kkk = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True, min_u=0., max_u=1.0, nubins=1, min_v=0., max_v=1.0, nvbins=1, - var_method='jackknife') + var_method='jackknife', bin_type='LogRUV') kkk.process(cat) np.testing.assert_allclose(kkk.zeta, kkk1.zeta) @@ -1441,7 +1441,7 @@ def test_brute_jk(): g2=cat.g2[cat.patch != i]) kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True, min_u=0., max_u=1.0, nubins=1, - min_v=0., max_v=1.0, nvbins=1) + min_v=0., max_v=1.0, nvbins=1, bin_type='LogRUV') kkk1.process(cat1) print('zeta = ',kkk1.zeta.ravel()) kkk_zeta_list.append(kkk1.zeta.ravel()) @@ -1456,13 +1456,13 @@ def test_brute_jk(): # Now GGG ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True, min_u=0., max_u=1.0, nubins=1, - min_v=0., max_v=1.0, nvbins=1) + min_v=0., max_v=1.0, nvbins=1, bin_type='LogRUV') ggg1.process(cat_nopatch) ggg = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True, min_u=0., max_u=1.0, nubins=1, min_v=0., max_v=1.0, nvbins=1, - var_method='jackknife') + var_method='jackknife', bin_type='LogRUV') ggg.process(cat) np.testing.assert_allclose(ggg.gam0, ggg1.gam0) np.testing.assert_allclose(ggg.gam1, ggg1.gam1) @@ -1482,7 +1482,7 @@ def test_brute_jk(): g2=cat.g2[cat.patch != i]) ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True, min_u=0., max_u=1.0, nubins=1, - min_v=0., max_v=1.0, nvbins=1) + min_v=0., max_v=1.0, nvbins=1, bin_type='LogRUV') ggg1.process(cat1) ggg_gam0_list.append(ggg1.gam0.ravel()) ggg_gam1_list.append(ggg1.gam1.ravel()) @@ -1532,7 +1532,7 @@ def test_brute_jk(): ddd = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0, min_u=0., max_u=1.0, nubins=1, min_v=0., max_v=1.0, nvbins=1, - var_method='jackknife') + var_method='jackknife', bin_type='LogRUV') drr = ddd.copy() rdd = ddd.copy() rrr = ddd.copy() @@ -1553,7 +1553,7 @@ def test_brute_jk(): y=rand_cat.y[rand_cat.patch != i]) ddd1 = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0, min_u=0., max_u=1.0, nubins=1, - min_v=0., max_v=1.0, nvbins=1) + min_v=0., max_v=1.0, nvbins=1, bin_type='LogRUV') drr1 = ddd1.copy() rdd1 = ddd1.copy() rrr1 = ddd1.copy() @@ -1640,12 +1640,12 @@ def test_finalize_false(): # KKK auto kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True, min_u=0.8, max_u=1.0, nubins=1, - min_v=0., max_v=0.2, nvbins=1) + min_v=0., max_v=0.2, nvbins=1, bin_type='LogRUV') kkk1.process(cat) kkk2 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True, min_u=0.8, max_u=1.0, nubins=1, - min_v=0., max_v=0.2, nvbins=1) + min_v=0., max_v=0.2, nvbins=1, bin_type='LogRUV') kkk2.process(cat1, initialize=True, finalize=False) kkk2.process(cat2, initialize=False, finalize=False) kkk2.process(cat3, initialize=False, finalize=False) @@ -1728,12 +1728,12 @@ def test_finalize_false(): # GGG auto ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True, min_u=0.8, max_u=1.0, nubins=1, - min_v=0., max_v=0.2, nvbins=1) + min_v=0., max_v=0.2, nvbins=1, bin_type='LogRUV') ggg1.process(cat) ggg2 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True, min_u=0.8, max_u=1.0, nubins=1, - min_v=0., max_v=0.2, nvbins=1) + min_v=0., max_v=0.2, nvbins=1, bin_type='LogRUV') ggg2.process(cat1, initialize=True, finalize=False) ggg2.process(cat2, initialize=False, finalize=False) ggg2.process(cat3, initialize=False, finalize=False) @@ -1823,12 +1823,12 @@ def test_finalize_false(): # NNN auto nnn1 = treecorr.NNNCorrelation(nbins=3, min_sep=10., max_sep=200., bin_slop=0, min_u=0.8, max_u=1.0, nubins=1, - min_v=0., max_v=0.2, nvbins=1) + min_v=0., max_v=0.2, nvbins=1, bin_type='LogRUV') nnn1.process(cat) nnn2 = treecorr.NNNCorrelation(nbins=3, min_sep=10., max_sep=200., bin_slop=0, min_u=0.8, max_u=1.0, nubins=1, - min_v=0., max_v=0.2, nvbins=1) + min_v=0., max_v=0.2, nvbins=1, bin_type='LogRUV') nnn2.process(cat1, initialize=True, finalize=False) nnn2.process(cat2, initialize=False, finalize=False) nnn2.process(cat3, initialize=False, finalize=False) @@ -1940,7 +1940,7 @@ def test_lowmem(): kkk = treecorr.KKKCorrelation(nbins=1, min_sep=280., max_sep=300., min_u=0.95, max_u=1.0, nubins=1, - min_v=0., max_v=0.05, nvbins=1) + min_v=0., max_v=0.05, nvbins=1, bin_type='LogRUV') t0 = time.time() s0 = hp.heap().size if hp else 0 diff --git a/tests/test_periodic.py b/tests/test_periodic.py index f9b8676d..6dd50d2c 100644 --- a/tests/test_periodic.py +++ b/tests/test_periodic.py @@ -363,7 +363,8 @@ def test_3pt(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - bin_slop=0, xperiod=Lx, yperiod=Ly, brute=True) + bin_slop=0, xperiod=Lx, yperiod=Ly, brute=True, + bin_type='LogRUV') ddd.process(cat, metric='Periodic', num_threads=1) #print('ddd.ntri = ',ddd.ntri) @@ -452,7 +453,8 @@ def test_3pt(): # If don't give a period, then an error. ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, - min_v=min_v, max_v=max_v, nvbins=nvbins) + min_v=min_v, max_v=max_v, nvbins=nvbins, + bin_type='LogRUV') with assert_raises(ValueError): ddd.process(cat, metric='Periodic') @@ -460,13 +462,13 @@ def test_3pt(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - xperiod=3) + xperiod=3, bin_type='LogRUV') with assert_raises(ValueError): ddd.process(cat, metric='Periodic') ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - yperiod=3) + yperiod=3, bin_type='LogRUV') with assert_raises(ValueError): ddd.process(cat, metric='Periodic') @@ -474,7 +476,7 @@ def test_3pt(): ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v, nvbins=nvbins, - period=3) + period=3, bin_type='LogRUV') with assert_raises(ValueError): ddd.process(cat) diff --git a/treecorr/corr3base.py b/treecorr/corr3base.py index da7fef3f..21d74432 100644 --- a/treecorr/corr3base.py +++ b/treecorr/corr3base.py @@ -111,7 +111,7 @@ class Corr3(object): linearly from min_u .. max_u and min_v .. max_v. - 'LogSAS' uses the SAS description given above. The bin steps will be uniform in log(d) for both d2 and d3 from log(min_sep) .. log(max_sep). The phi values are binned - linearly from min_phi .. max_phi. + linearly from min_phi .. max_phi. This is the default. Parameters: config (dict): A configuration dict that can be used to pass in the below kwargs if @@ -226,7 +226,7 @@ class Corr3(object): metric (str): Which metric to use for distance measurements. Options are listed above. (default: 'Euclidean') bin_type (str): What type of binning should be used. Only one option currently. - (default: 'LogRUV') + (default: 'LogSAS') period (float): For the 'Periodic' metric, the period to use in all directions. (default: None) xperiod (float): For the 'Periodic' metric, the period to use in the x direction. @@ -316,7 +316,7 @@ class Corr3(object): 'The number of digits after the decimal in the output.'), 'metric': (str, False, 'Euclidean', ['Euclidean', 'Arc', 'Periodic'], 'Which metric to use for the distance measurements'), - 'bin_type': (str, False, 'LogRUV', ['LogRUV', 'LogSAS'], + 'bin_type': (str, False, 'LogSAS', ['LogRUV', 'LogSAS'], 'Which type of binning should be used'), 'period': (float, False, None, None, 'The period to use for all directions for the Periodic metric'), @@ -361,7 +361,7 @@ def __init__(self, config=None, *, logger=None, rng=None, **kwargs): else: self._ro.output_dots = get(self.config,'verbose',int,1) >= 2 - self._ro.bin_type = self.config.get('bin_type', 'LogRUV') + self._ro.bin_type = self.config.get('bin_type', 'LogSAS') self._ro.sep_units = self.config.get('sep_units','') self._ro._sep_units = get(self.config,'sep_units',str,'radians') From 6c77a73ddace2ac19f7caa05de4c22d6206624e1 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 08:18:56 -0500 Subject: [PATCH 13/18] Add to CHANGELOG --- CHANGELOG.rst | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index dcd0bf8c..c507b85b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -25,9 +25,15 @@ API Changes normally used directly by users, so it shouldn't be noticeable in user code. (#155) - Removed all deprecations from the 4.x series. (#156) - Removed support for reading back in output files from the 3.x series. (#165) -- Removed the 3pt CrossCorrelation classes. See the new ``ordered=True`` option to the 3pt - ``process`` method instead to get correlations where the order of the three catalogs is fixed. - This is simpler and more intuitive, and for many use cases it is more efficient. (#165) +- Removed the 3pt CrossCorrelation classes, which used to be the way to get ordered three-point + correlations. But they were rather unwieldy and not very intuitive. The new ``ordered`` + option to the three-point ``process`` methods is much simpler and more efficient for the common + case of only wanting a single order for the catalogs. (#165) +- Switched the default behavior of 3pt cross-correlations to respect the order of the catalogs + in the triangle definitions. That is, points from cat1 will be at P1 in the triangle, + points from cat2 at P2, and points from cat3 at P3. To recover the old behavior, you may + use the new ``ordered=False`` option. +- Switched the default binning for three-point correlations to LogSAS, rather than LogRUV. Performance improvements From b5d6d74913e24b2ab1f9e9e0fbc3b3161c2005b0 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 08:40:57 -0500 Subject: [PATCH 14/18] Fix comment error --- tests/test_kkk.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_kkk.py b/tests/test_kkk.py index 85347e7d..4c770257 100644 --- a/tests/test_kkk.py +++ b/tests/test_kkk.py @@ -545,7 +545,7 @@ def test_direct_logruv_cross(): np.testing.assert_allclose(kkk.weight, true_weight_321, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_321, rtol=1.e-5) - # With the default ordered=False, we end up with the sum of all permutations. + # With ordered=False, we end up with the sum of all permutations. kkk.process(cat1, cat2, cat3, ordered=False) np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) @@ -1915,7 +1915,6 @@ def test_direct_logsas_cross12(): w_list = [true_weight_122, true_weight_212, true_weight_221] z_list = [true_zeta_122, true_zeta_212, true_zeta_221] - # With the default ordered=False, we end up with the sum of all permutations. true_ntri_sum = sum(n_list) true_weight_sum = sum(w_list) true_zeta_sum = sum(z_list) @@ -1940,6 +1939,7 @@ def test_direct_logsas_cross12(): np.testing.assert_allclose(kkk.weight, true_weight_221, rtol=1.e-5) np.testing.assert_allclose(kkk.zeta, true_zeta_221, rtol=1.e-4, atol=1.e-6) + # With ordered=False, we end up with the sum of all permutations. kkk.process(cat1, cat2, ordered=False) np.testing.assert_array_equal(kkk.ntri, true_ntri_sum) np.testing.assert_allclose(kkk.weight, true_weight_sum, rtol=1.e-5) From e7dd89fb5523744a8cfe92ba6830a5778f8f5796 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 09:17:35 -0500 Subject: [PATCH 15/18] Fix erroneous assert when O=True --- include/BinType.h | 5 +++-- tests/test_patch3pt.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/BinType.h b/include/BinType.h index fa9bec49..5e3cbe37 100644 --- a/include/BinType.h +++ b/include/BinType.h @@ -692,13 +692,14 @@ struct BinTypeHelper Assert(d1 > 0.); Assert(d3 > 0.); Assert(u > 0.); - Assert(v >= 0.); // v can potentially == 0. if (O && !(d1 >= d2 && d2 >= d3)) { xdbg<<"Sides are not in correct size ordering d1 >= d2 >= d3\n"; return false; } + Assert(v >= 0.); // v can potentially == 0. + if (d2 < minsep || d2 >= maxsep) { xdbg<<"d2 not in minsep .. maxsep\n"; return false; @@ -723,7 +724,7 @@ struct BinTypeHelper Assert(kr >= 0); Assert(kr <= nbins); if (kr == nbins) --kr; // This is rare, but can happen with numerical differences - // between the math for log and for non-log checks. + // between the math for log and for non-log checks. Assert(kr < nbins); int ku = int(floor((u-minu)/ubinsize)); diff --git a/tests/test_patch3pt.py b/tests/test_patch3pt.py index 4abbd9b5..8f4c3419 100644 --- a/tests/test_patch3pt.py +++ b/tests/test_patch3pt.py @@ -2010,4 +2010,4 @@ def test_lowmem(): test_nnn_jk() test_brute_jk() test_finalize_false() - test_lowmem + test_lowmem() From a5cfacf481c612555004fc0ee89897fc4b23af65 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 09:27:54 -0500 Subject: [PATCH 16/18] Reference PR in CHANGELOG --- CHANGELOG.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c507b85b..c3ac8104 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -32,8 +32,8 @@ API Changes - Switched the default behavior of 3pt cross-correlations to respect the order of the catalogs in the triangle definitions. That is, points from cat1 will be at P1 in the triangle, points from cat2 at P2, and points from cat3 at P3. To recover the old behavior, you may - use the new ``ordered=False`` option. -- Switched the default binning for three-point correlations to LogSAS, rather than LogRUV. + use the new ``ordered=False`` option. (#166) +- Switched the default binning for three-point correlations to LogSAS, rather than LogRUV. (#166) Performance improvements From cbe42523f478df34debff7bf6778184c76f1e428 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 09:47:08 -0500 Subject: [PATCH 17/18] Increase lomem slightly patch3pt lowmem test --- tests/test_patch3pt.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_patch3pt.py b/tests/test_patch3pt.py index 8f4c3419..f9ef7fd6 100644 --- a/tests/test_patch3pt.py +++ b/tests/test_patch3pt.py @@ -1910,13 +1910,13 @@ def test_lowmem(): nhalo = 100 npatch = 4 himem = 7.e5 - lomem = 8.e4 + lomem = 9.e4 else: nsource = 1000 nhalo = 100 npatch = 4 - himem = 1.3e5 - lomem = 8.e4 + himem = 1.4e5 + lomem = 1.e5 rng = np.random.RandomState(8675309) x, y, g1, g2, k = generate_shear_field(nsource, nhalo, rng) From 4091c4d9519a0edf9dccfc3e8b53042833e0dbc8 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sun, 14 Jan 2024 10:46:59 -0500 Subject: [PATCH 18/18] coverage --- treecorr/catalog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/treecorr/catalog.py b/treecorr/catalog.py index c40d3bbf..4d0970fd 100644 --- a/treecorr/catalog.py +++ b/treecorr/catalog.py @@ -2741,7 +2741,7 @@ def __getstate__(self): def __setstate__(self, d): self.__dict__ = d - if self._logger_name is not None: + if self._logger_name is not None: # pragma: no branch self.logger = setup_logger(get(self.config,'verbose',int,1), self.config.get('log_file',None), self._logger_name) self._field = lambda : None