orpheus-npcf 0.2.1__cp310-cp310-musllinux_1_2_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- orpheus/__init__.py +9 -0
- orpheus/catalog.py +1216 -0
- orpheus/covariance.py +153 -0
- orpheus/direct.py +1091 -0
- orpheus/flat2dgrid.py +68 -0
- orpheus/npcf_base.py +766 -0
- orpheus/npcf_fourth.py +1716 -0
- orpheus/npcf_second.py +620 -0
- orpheus/npcf_third.py +1684 -0
- orpheus/orpheus_clib.cpython-310-x86_64-linux-gnu.so +0 -0
- orpheus/patchutils.py +369 -0
- orpheus/utils.py +198 -0
- orpheus_npcf-0.2.1.dist-info/METADATA +67 -0
- orpheus_npcf-0.2.1.dist-info/RECORD +19 -0
- orpheus_npcf-0.2.1.dist-info/WHEEL +5 -0
- orpheus_npcf-0.2.1.dist-info/licenses/LICENSE +674 -0
- orpheus_npcf-0.2.1.dist-info/sboms/auditwheel.cdx.json +1 -0
- orpheus_npcf-0.2.1.dist-info/top_level.txt +1 -0
- orpheus_npcf.libs/libgomp-8949ffbe.so.1.0.0 +0 -0
|
Binary file
|
orpheus/patchutils.py
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
# Here we collect some utils for mapping the a full-sky survey to a set of overlapping patches
|
|
2
|
+
# In the middle term much of this functionality should be included in the orpheus code
|
|
3
|
+
|
|
4
|
+
from astropy.coordinates import SkyCoord
|
|
5
|
+
from healpy import ang2pix, pix2vec, nside2pixarea, nside2resol, query_disc, Rotator, nside2npix
|
|
6
|
+
import numpy as np
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
import pickle
|
|
9
|
+
import os
|
|
10
|
+
import sys
|
|
11
|
+
from time import time
|
|
12
|
+
from threadpoolctl import threadpool_limits
|
|
13
|
+
|
|
14
|
+
from sklearn.cluster import KMeans
|
|
15
|
+
|
|
16
|
+
def pickle_save(data, filename):
|
|
17
|
+
|
|
18
|
+
file_path = Path(filename)
|
|
19
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
20
|
+
|
|
21
|
+
try:
|
|
22
|
+
with open(filename, 'wb') as file:
|
|
23
|
+
pickle.dump(data, file)
|
|
24
|
+
except Exception as e:
|
|
25
|
+
print(f"An error occurred while saving the dictionary: {e}")
|
|
26
|
+
|
|
27
|
+
def pickle_load(filename):
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
with open(filename, 'rb') as file:
|
|
31
|
+
data = pickle.load(file)
|
|
32
|
+
return data
|
|
33
|
+
except Exception as e:
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
def frompatchindices_preparerot(index, patchindices, ra, dec, rotsignflip):
|
|
37
|
+
|
|
38
|
+
inds_inner = patchindices["patches"][index]["inner"]
|
|
39
|
+
inds_outer = patchindices["patches"][index]["outer"]
|
|
40
|
+
inds_extpatch = np.append(inds_inner,inds_outer)
|
|
41
|
+
ngal_patch = len(inds_extpatch)
|
|
42
|
+
patch_isinner = np.zeros(ngal_patch,dtype=bool)
|
|
43
|
+
patch_isinner[:len(inds_inner)] = True
|
|
44
|
+
patch_isinner[len(inds_inner):] = False
|
|
45
|
+
# Note that we fix the rotangle at this instance as this is required when computing patches
|
|
46
|
+
# across multiple catalogs. In that case the patchcenters are by definition the com of the
|
|
47
|
+
# joint catalog. For a single catalog this does not matter. The signs match the (theta,phi)
|
|
48
|
+
# conventions in healpy -- see the toorigin function for details.
|
|
49
|
+
rotangle = [+patchindices['info']['patchcenters'][index][0]*np.pi/180.,
|
|
50
|
+
-patchindices['info']['patchcenters'][index][1]*np.pi/180.]
|
|
51
|
+
nextrotres = toorigin(ra[inds_extpatch],
|
|
52
|
+
dec[inds_extpatch],
|
|
53
|
+
isinner=patch_isinner,
|
|
54
|
+
rotangle=rotangle,
|
|
55
|
+
inv=False,
|
|
56
|
+
rotsignflip=rotsignflip,
|
|
57
|
+
radec_units="deg")
|
|
58
|
+
rotangle, ra_rot, dec_rot, rotangle_polars = nextrotres
|
|
59
|
+
|
|
60
|
+
return inds_extpatch, patch_isinner, rotangle, ra_rot, dec_rot, rotangle_polars
|
|
61
|
+
|
|
62
|
+
def gen_cat_patchindices(ra_deg, dec_deg, npatches, patchextend_arcmin, nside_hash=128, verbose=False, method='kmeans_healpix',
|
|
63
|
+
kmeanshp_maxiter=1000, kmeanshp_tol=1e-10, kmeanshp_randomstate=42, healpix_nside=8):
|
|
64
|
+
""" Decomposes a spherical catalog in ~equal-area patches with a buffer region
|
|
65
|
+
|
|
66
|
+
Parameters
|
|
67
|
+
----------
|
|
68
|
+
ra_deg: numpy.ndarray
|
|
69
|
+
The ra of the catalog, given in units of degree.
|
|
70
|
+
dec_deg: numpy.ndarray
|
|
71
|
+
The dec of the catalog, given in units of degree.
|
|
72
|
+
npatches: int
|
|
73
|
+
The number of patches in which the catalog shall be decomposed.
|
|
74
|
+
patchextend_arcmin: float
|
|
75
|
+
The buffer region that extends around each patch, given in units of arcmin.
|
|
76
|
+
nside_hash: int
|
|
77
|
+
The healpix resolution used for hashing subareas of the patches.
|
|
78
|
+
verbose: bool
|
|
79
|
+
Flag setting on whether output is printed to the console.
|
|
80
|
+
|
|
81
|
+
Returns
|
|
82
|
+
-------
|
|
83
|
+
cat_patchindices: dict
|
|
84
|
+
A dictionary containing information about the individual patches,
|
|
85
|
+
as well as the galaxy indices that are assigned to the inner region
|
|
86
|
+
and to the buffer region of each individual patch
|
|
87
|
+
|
|
88
|
+
Notes
|
|
89
|
+
-----
|
|
90
|
+
Choosing a small value of nside_hash will result in a larger extension of
|
|
91
|
+
the patches then neccessary while choosing a large value increases the
|
|
92
|
+
runtime. A good compromise is to choose nside_hash such that its resolution
|
|
93
|
+
is a few times smaller than the buffer region of the patches
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
def build_indexhash(arr):
|
|
97
|
+
"""Returns a hash for indices of repeated values in a 1D array"""
|
|
98
|
+
sort_indices = np.argsort(arr)
|
|
99
|
+
arr = np.asarray(arr)[sort_indices]
|
|
100
|
+
vals, first_indices = np.unique(arr, return_index=True)
|
|
101
|
+
indices = np.split(sort_indices, first_indices[1:])
|
|
102
|
+
indhash = {}
|
|
103
|
+
for elval,val in enumerate(vals):
|
|
104
|
+
indhash[val] = indices[elval]
|
|
105
|
+
return indhash
|
|
106
|
+
|
|
107
|
+
if verbose:
|
|
108
|
+
print("Computing inner region of patches")
|
|
109
|
+
t1 = time()
|
|
110
|
+
|
|
111
|
+
# Run treecorrs k-means implementation
|
|
112
|
+
if method=='kmeans_treecorr':
|
|
113
|
+
try:
|
|
114
|
+
import treecorr
|
|
115
|
+
cat = treecorr.Catalog(ra=ra_deg, dec=dec_deg,
|
|
116
|
+
ra_units="deg", dec_units="deg",
|
|
117
|
+
npatch=npatches)
|
|
118
|
+
patchinds = cat.patch
|
|
119
|
+
except ImportError:
|
|
120
|
+
if method=='kmeans_treecorr':
|
|
121
|
+
print('Treecorr not availbale...switching to patch creation via KMeans')
|
|
122
|
+
method = 'kmeans_healpix'
|
|
123
|
+
|
|
124
|
+
# Run standard k-means on catalog reduced to healpix pixels
|
|
125
|
+
elif method=='kmeans_healpix':
|
|
126
|
+
# Step 1: Reduce discrete ra/dec to unique healpix pixels and transform those to to 3D positions
|
|
127
|
+
nside_kmeans = 2048 # I keep this fixed for now as it will most likely work well for all reasonable cases.
|
|
128
|
+
eq = SkyCoord(ra_deg, dec_deg, frame='galactic', unit='deg')
|
|
129
|
+
l, b = eq.galactic.l.value, eq.galactic.b.value
|
|
130
|
+
theta = np.radians(90. - b)
|
|
131
|
+
phi = np.radians(l)
|
|
132
|
+
hpx_inds = ang2pix(nside_kmeans, theta, phi)
|
|
133
|
+
hpx_uinds = np.unique(hpx_inds)
|
|
134
|
+
# Step 2: Run standard kmeans algorithm on the healpix pixels
|
|
135
|
+
# Note that each pixel carries the same (unity) weight. This implies
|
|
136
|
+
# that we make the patches have approximately equal area, but neglect
|
|
137
|
+
# depth variations on a patch sized scale. To me this seems to be a
|
|
138
|
+
# sensible choice as the flat-sky approximation only cares about the
|
|
139
|
+
# extent of the patches. If one wants to use the patches as Jackknife
|
|
140
|
+
# samples for an internal covariance matrix estimate this choice might
|
|
141
|
+
# need to be revisited (but as of now I do not see a clear point against
|
|
142
|
+
# continuing to use the current setup as long as the patchsize is in a
|
|
143
|
+
# domain where the contributions to the covariance that are containing
|
|
144
|
+
# shapenoise are expected to be subdominant).
|
|
145
|
+
clust = KMeans(n_clusters=npatches,
|
|
146
|
+
init='k-means++',
|
|
147
|
+
n_init='auto',
|
|
148
|
+
max_iter=kmeanshp_maxiter,
|
|
149
|
+
tol=kmeanshp_tol,
|
|
150
|
+
verbose=0,
|
|
151
|
+
random_state=kmeanshp_randomstate,
|
|
152
|
+
copy_x=True,
|
|
153
|
+
algorithm='lloyd')
|
|
154
|
+
X = np.array(pix2vec(nside=nside_kmeans,ipix=hpx_uinds,nest=False)).T
|
|
155
|
+
# Temorarily limit max number of OMP here as KMeans per default chooses all available
|
|
156
|
+
# cores and might crash in case scipy has not been compiled to handle this.
|
|
157
|
+
# Also I observed that KMeans becomes fairly inefficient for this many cores anyways.
|
|
158
|
+
with threadpool_limits(limits=32, user_api="openmp"):
|
|
159
|
+
clustinds = clust.fit_predict(X, y=None, sample_weight=None)
|
|
160
|
+
# Step 3: Map the pixel centers back to the galaxy indices
|
|
161
|
+
hashmap = np.vectorize({upix: center for upix, center in zip(hpx_uinds, clustinds)}.get)
|
|
162
|
+
patchinds = hashmap(hpx_inds)
|
|
163
|
+
# Simply assign to healpix pixel. Fast and stable, but patchareas might strongly vary in size.
|
|
164
|
+
elif method == "healpix":
|
|
165
|
+
eq = SkyCoord(ra_deg, dec_deg, frame='galactic', unit='deg')
|
|
166
|
+
l, b = eq.galactic.l.value, eq.galactic.b.value
|
|
167
|
+
theta = np.radians(90. - b)
|
|
168
|
+
phi = np.radians(l)
|
|
169
|
+
patchinds = ang2pix(healpix_nside, theta, phi).astype(int)
|
|
170
|
+
npatches = len(np.unique(patchinds).flatten())
|
|
171
|
+
else:
|
|
172
|
+
raise NotImplementedError
|
|
173
|
+
|
|
174
|
+
if verbose:
|
|
175
|
+
t2=time()
|
|
176
|
+
print("Took %.3f seconds"%(t2-t1))
|
|
177
|
+
|
|
178
|
+
# Assign galaxy positions to healpix pixels
|
|
179
|
+
if verbose:
|
|
180
|
+
print("Mapping catalog to healpix grid")
|
|
181
|
+
t1=time()
|
|
182
|
+
eq = SkyCoord(ra_deg, dec_deg, frame='galactic', unit='deg')
|
|
183
|
+
l, b = eq.galactic.l.value, eq.galactic.b.value
|
|
184
|
+
theta = np.radians(90. - b)
|
|
185
|
+
phi = np.radians(l)
|
|
186
|
+
cat_indices = ang2pix(nside_hash, theta, phi)
|
|
187
|
+
if verbose:
|
|
188
|
+
t2=time()
|
|
189
|
+
print("Took %.3f seconds"%(t2-t1))
|
|
190
|
+
|
|
191
|
+
# Build a hash connecting the galaxies residing in each healpix pixel
|
|
192
|
+
if verbose:
|
|
193
|
+
t1=time()
|
|
194
|
+
print("Building index hash")
|
|
195
|
+
cat_indhash = build_indexhash(cat_indices)
|
|
196
|
+
if verbose:
|
|
197
|
+
t2=time()
|
|
198
|
+
print("Took %.3f seconds"%(t2-t1))
|
|
199
|
+
|
|
200
|
+
# Construct buffer region around patches
|
|
201
|
+
if verbose:
|
|
202
|
+
print("Building buffer around patches")
|
|
203
|
+
t1=time()
|
|
204
|
+
_pixarea = nside2pixarea(nside_hash,degrees=True)
|
|
205
|
+
_pixreso = nside2resol(nside_hash,arcmin=True)
|
|
206
|
+
if method == 'kmeans_treecorr':
|
|
207
|
+
_patchcenters = cat.patch_centers
|
|
208
|
+
elif method == 'kmeans_healpix' or method=='healpix':
|
|
209
|
+
_patchcenters = np.array([[np.mean(ra_deg[ patchinds==patchind]), np.mean(dec_deg[ patchinds==patchind])] for patchind in range(npatches)])
|
|
210
|
+
else:
|
|
211
|
+
raise NotImplementedError
|
|
212
|
+
|
|
213
|
+
cat_patchindices = {}
|
|
214
|
+
cat_patchindices["info"] = {}
|
|
215
|
+
cat_patchindices["info"]["patchextend_deg"] = patchextend_arcmin/60.
|
|
216
|
+
cat_patchindices["info"]["nside_hash"] = nside_hash
|
|
217
|
+
cat_patchindices["info"]["method"] = method
|
|
218
|
+
cat_patchindices["info"]["kmeanshp_maxiter"] = kmeanshp_maxiter
|
|
219
|
+
cat_patchindices["info"]["kmeanshp_tol"] = kmeanshp_tol
|
|
220
|
+
cat_patchindices["info"]["kmeanshp_randomstate"] = kmeanshp_randomstate
|
|
221
|
+
cat_patchindices["info"]["healpix_nside"] = healpix_nside
|
|
222
|
+
cat_patchindices["info"]["patchcenters"] = _patchcenters
|
|
223
|
+
cat_patchindices["info"]["patchareas"] = np.zeros(npatches,dtype=float)
|
|
224
|
+
cat_patchindices["info"]["patch_ngalsinner"] = np.zeros(npatches,dtype=int)
|
|
225
|
+
cat_patchindices["info"]["patch_ngalsouter"] = np.zeros(npatches,dtype=int)
|
|
226
|
+
cat_patchindices["patches"] = {}
|
|
227
|
+
ext_buffer = (patchextend_arcmin+_pixreso)*np.pi/180./60.
|
|
228
|
+
for elpatch in range(npatches):
|
|
229
|
+
if verbose:
|
|
230
|
+
sys.stdout.write("\r%i/%i"%(elpatch+1,npatches))
|
|
231
|
+
patchsel = patchinds==elpatch
|
|
232
|
+
cat_patchindices["patches"][elpatch] = {}
|
|
233
|
+
|
|
234
|
+
# Get indices of gals within inner patch
|
|
235
|
+
galinds_inner = np.argwhere(patchsel).flatten().astype(int)
|
|
236
|
+
|
|
237
|
+
# Find healpix pixels in extended patch
|
|
238
|
+
patch_indices = np.unique(ang2pix(nside_hash, theta[patchsel], phi[patchsel]))
|
|
239
|
+
extpatch_indices = set()
|
|
240
|
+
for pix in patch_indices:
|
|
241
|
+
nextset = set(query_disc(nside=nside_hash,
|
|
242
|
+
vec=pix2vec(nside_hash,pix),
|
|
243
|
+
radius=ext_buffer))
|
|
244
|
+
extpatch_indices.update(nextset)
|
|
245
|
+
|
|
246
|
+
# Assign galaxies to extended patch
|
|
247
|
+
galinds_ext = set()
|
|
248
|
+
for pix in extpatch_indices:
|
|
249
|
+
try:
|
|
250
|
+
galinds_ext.update(set(cat_indhash[pix]))
|
|
251
|
+
except:
|
|
252
|
+
pass
|
|
253
|
+
galinds_outer = np.array(list(galinds_ext-set(galinds_inner)),dtype=int)
|
|
254
|
+
cat_patchindices["info"]["patchareas"][elpatch] = _pixarea*len(patch_indices)
|
|
255
|
+
cat_patchindices["info"]["patch_ngalsinner"][elpatch] = len(galinds_inner)
|
|
256
|
+
cat_patchindices["info"]["patch_ngalsouter"][elpatch] = len(galinds_outer)
|
|
257
|
+
cat_patchindices["patches"][elpatch]["inner"] = galinds_inner
|
|
258
|
+
cat_patchindices["patches"][elpatch]["outer"] = galinds_outer
|
|
259
|
+
if verbose:
|
|
260
|
+
t2=time()
|
|
261
|
+
print("Took %.3f seconds"%(t2-t1))
|
|
262
|
+
|
|
263
|
+
return cat_patchindices
|
|
264
|
+
|
|
265
|
+
def toorigin(ras, decs, isinner=None, rotangle=None, inv=False, rotsignflip=False, radec_units="deg"):
|
|
266
|
+
""" Rotates survey patch s.t. its center of mass lies in the origin. """
|
|
267
|
+
import healpy as hp
|
|
268
|
+
assert(radec_units in ["rad", "deg"])
|
|
269
|
+
|
|
270
|
+
if isinner is None:
|
|
271
|
+
isinner = np.ones(len(ras), dtype=bool)
|
|
272
|
+
|
|
273
|
+
# Map (ra, dec) --> (theta, phi)
|
|
274
|
+
if radec_units=="deg":
|
|
275
|
+
decs_rad = decs*np.pi/180.
|
|
276
|
+
ras_rad = ras*np.pi/180.
|
|
277
|
+
thetas = np.pi/2. + decs_rad
|
|
278
|
+
phis = ras_rad
|
|
279
|
+
|
|
280
|
+
# Compute rotation angle
|
|
281
|
+
if rotangle is None:
|
|
282
|
+
rotangle = [np.mean(phis[isinner]),np.pi/2.-np.mean(thetas[isinner])]
|
|
283
|
+
thisrot = Rotator(rot=rotangle, deg=False, inv=inv)
|
|
284
|
+
rotatedthetas, rotatedphis = thisrot(thetas,phis,inv=False)
|
|
285
|
+
rotangle_polars = np.exp((-1)**rotsignflip*1J * 2 * thisrot.angle_ref(rotatedthetas, rotatedphis,inv=True))
|
|
286
|
+
|
|
287
|
+
# Transform back to (ra,dec)
|
|
288
|
+
ra_rot = rotatedphis
|
|
289
|
+
dec_rot = rotatedthetas - np.pi/2.
|
|
290
|
+
if radec_units=="deg":
|
|
291
|
+
dec_rot *= 180./np.pi
|
|
292
|
+
ra_rot *= 180./np.pi
|
|
293
|
+
|
|
294
|
+
return rotangle, ra_rot, dec_rot, rotangle_polars
|
|
295
|
+
|
|
296
|
+
def cat2hpx(lon, lat, nside, radec=True, do_counts=False, return_idx=False, return_indices=False, weights=None):
|
|
297
|
+
"""
|
|
298
|
+
Convert a catalogue to a HEALPix map of number counts per resolution
|
|
299
|
+
element.
|
|
300
|
+
|
|
301
|
+
Parameters
|
|
302
|
+
----------
|
|
303
|
+
lon, lat : (ndarray, ndarray)
|
|
304
|
+
Coordinates of the sources in degree. If radec=True, assume input is in the icrs
|
|
305
|
+
coordinate system. Otherwise assume input is glon, glat
|
|
306
|
+
nside : int
|
|
307
|
+
HEALPix nside of the target map
|
|
308
|
+
radec : bool
|
|
309
|
+
Switch between R.A./Dec and glon/glat as input coordinate system.
|
|
310
|
+
do_counts : bool
|
|
311
|
+
Return the number of counts per HEALPix pixel
|
|
312
|
+
return_idx : bool
|
|
313
|
+
Return the set of non-empty HEALPix pixel indices
|
|
314
|
+
return_indices : bool
|
|
315
|
+
Returns the per-object HEALPix pixel indices
|
|
316
|
+
weights: None or ndarray
|
|
317
|
+
Needs to be given if each point carries an individual weight
|
|
318
|
+
|
|
319
|
+
Return
|
|
320
|
+
------
|
|
321
|
+
hpx_map : ndarray
|
|
322
|
+
HEALPix map of the catalogue number counts in Galactic coordinates
|
|
323
|
+
|
|
324
|
+
Notes
|
|
325
|
+
-----
|
|
326
|
+
This functions is a generalised version of https://stackoverflow.com/a/50495134
|
|
327
|
+
"""
|
|
328
|
+
|
|
329
|
+
npix = nside2npix(nside)
|
|
330
|
+
|
|
331
|
+
if radec:
|
|
332
|
+
eq = SkyCoord(lon, lat, frame='galactic', unit='deg')
|
|
333
|
+
l, b = eq.galactic.l.value, eq.galactic.b.value
|
|
334
|
+
else:
|
|
335
|
+
l, b = lon, lat
|
|
336
|
+
|
|
337
|
+
# conver to theta, phi
|
|
338
|
+
theta = np.radians(90. - b)
|
|
339
|
+
phi = np.radians(l)
|
|
340
|
+
|
|
341
|
+
# convert to HEALPix indices
|
|
342
|
+
indices = ang2pix(nside, theta, phi)
|
|
343
|
+
|
|
344
|
+
if do_counts:
|
|
345
|
+
idx, counts = np.unique(indices, return_counts=True)
|
|
346
|
+
if weights is not None:
|
|
347
|
+
idx, inv = np.unique(indices,return_inverse=True)
|
|
348
|
+
weights_pix = np.bincount(inv,weights.reshape(-1))
|
|
349
|
+
else:
|
|
350
|
+
idx = np.asarray(list(set(list(indices)))).astype(int)
|
|
351
|
+
|
|
352
|
+
# fill the fullsky map
|
|
353
|
+
hpx_map = np.zeros(npix, dtype=int)
|
|
354
|
+
#counts[counts>1] = 1
|
|
355
|
+
if do_counts:
|
|
356
|
+
hpx_map[idx] = counts
|
|
357
|
+
else:
|
|
358
|
+
hpx_map[idx] = np.ones(len(idx), dtype=int)
|
|
359
|
+
|
|
360
|
+
res = ()
|
|
361
|
+
if return_idx:
|
|
362
|
+
res += (idx, )
|
|
363
|
+
res += (hpx_map.astype(int)),
|
|
364
|
+
if weights is not None:
|
|
365
|
+
res += (weights_pix),
|
|
366
|
+
if return_indices:
|
|
367
|
+
res += (indices),
|
|
368
|
+
|
|
369
|
+
return res
|
orpheus/utils.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from itertools import combinations_with_replacement, product
|
|
3
|
+
import os
|
|
4
|
+
import site
|
|
5
|
+
|
|
6
|
+
def convertunits(unit_in, unit_target):
|
|
7
|
+
'''unit can be '''
|
|
8
|
+
vals = {'rad': 180./np.pi,
|
|
9
|
+
'deg': 1.,
|
|
10
|
+
'arcmin': 1./60.,
|
|
11
|
+
'arcsec': 1./60./60.}
|
|
12
|
+
assert((unit_in in vals.keys()) and (unit_target in vals.keys()))
|
|
13
|
+
return vals[unit_in]/vals[unit_target]
|
|
14
|
+
|
|
15
|
+
def flatlist(A):
|
|
16
|
+
rt = []
|
|
17
|
+
for i in A:
|
|
18
|
+
if isinstance(i,list): rt.extend(flatlist(i))
|
|
19
|
+
else: rt.append(i)
|
|
20
|
+
return rt
|
|
21
|
+
|
|
22
|
+
def get_site_packages_dir():
|
|
23
|
+
return [p for p in site.getsitepackages()
|
|
24
|
+
if p.endswith(("site-packages", "dist-packages"))][0]
|
|
25
|
+
|
|
26
|
+
def search_file_in_site_package(directory, package):
|
|
27
|
+
for root, dirs, files in os.walk(directory):
|
|
28
|
+
for file in files:
|
|
29
|
+
if file.startswith(package):
|
|
30
|
+
return os.path.join(root, file)
|
|
31
|
+
return None
|
|
32
|
+
|
|
33
|
+
def gen_thetacombis_fourthorder(nbinsr, nthreads, batchsize, batchsize_max, ordered=True, custom=None, verbose=False):
|
|
34
|
+
|
|
35
|
+
# Allocate selector for custom bins
|
|
36
|
+
if custom is None:
|
|
37
|
+
customsel = np.ones(nbinsr*nbinsr*nbinsr, dtype=bool)
|
|
38
|
+
else:
|
|
39
|
+
custom = custom.astype(int)
|
|
40
|
+
assert(np.max(custom)<nbinsr*nbinsr*nbinsr)
|
|
41
|
+
assert(np.min(custom)>=0)
|
|
42
|
+
customsel = np.zeros(nbinsr*nbinsr*nbinsr, dtype=bool)
|
|
43
|
+
customsel[custom] = True
|
|
44
|
+
|
|
45
|
+
# Build the bins
|
|
46
|
+
allelbs = []
|
|
47
|
+
thetacombis_batches = []
|
|
48
|
+
nbinsr3 = 0
|
|
49
|
+
cutlo_2 = 0
|
|
50
|
+
cutlo_3 = 0
|
|
51
|
+
tmpind = 0
|
|
52
|
+
for elb1 in range(nbinsr):
|
|
53
|
+
for elb2 in range(nbinsr):
|
|
54
|
+
for elb3 in range(nbinsr):
|
|
55
|
+
valid = True
|
|
56
|
+
if ordered:
|
|
57
|
+
if elb1>elb2 or elb1>elb3 or elb2>elb3:
|
|
58
|
+
valid = False
|
|
59
|
+
if valid and customsel[tmpind]:
|
|
60
|
+
thetacombis_batches.append([tmpind])
|
|
61
|
+
allelbs.append([elb1,elb2,elb3])
|
|
62
|
+
nbinsr3 += 1
|
|
63
|
+
tmpind += 1
|
|
64
|
+
thetacombis_batches = np.asarray(thetacombis_batches,dtype=np.int32)
|
|
65
|
+
allelbs = np.asarray(allelbs,dtype=np.int32)
|
|
66
|
+
if batchsize is None:
|
|
67
|
+
batchsize = min(nbinsr3,min(batchsize_max,nbinsr3/nthreads))
|
|
68
|
+
if verbose:
|
|
69
|
+
print("Using batchsize of %i for radial bins"%batchsize)
|
|
70
|
+
if batchsize==batchsize_max:
|
|
71
|
+
nbatches = np.int32(np.ceil(nbinsr3/batchsize))
|
|
72
|
+
else:
|
|
73
|
+
nbatches = np.int32(nbinsr3/batchsize)
|
|
74
|
+
#thetacombis_batches = np.arange(nbinsr3).astype(np.int32)
|
|
75
|
+
cumnthetacombis_batches = (np.arange(nbatches+1)*nbinsr3/(nbatches)).astype(np.int32)
|
|
76
|
+
nthetacombis_batches = (cumnthetacombis_batches[1:]-cumnthetacombis_batches[:-1]).astype(np.int32)
|
|
77
|
+
cumnthetacombis_batches[-1] = nbinsr3
|
|
78
|
+
nthetacombis_batches[-1] = nbinsr3-cumnthetacombis_batches[-2]
|
|
79
|
+
thetacombis_batches = thetacombis_batches.flatten().astype(np.int32)
|
|
80
|
+
nbatches = len(nthetacombis_batches)
|
|
81
|
+
|
|
82
|
+
return nbinsr3, allelbs, thetacombis_batches, cumnthetacombis_batches, nthetacombis_batches, nbatches
|
|
83
|
+
|
|
84
|
+
def gen_n2n3indices_Upsfourth(nmax):
|
|
85
|
+
""" List of flattened indices corresponding to selection """
|
|
86
|
+
nmax_alloc = 2*nmax+1
|
|
87
|
+
reconstructed = np.zeros((2*nmax_alloc+1,2*nmax_alloc+1),dtype=int)
|
|
88
|
+
for _n2 in range(-nmax-1,nmax+2):
|
|
89
|
+
for _n3 in range(-nmax-1,nmax+2):
|
|
90
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
91
|
+
for _n3 in range(-2*nmax,-nmax-1):
|
|
92
|
+
for _n2 in range(-nmax-1-_n3,nmax+2):
|
|
93
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
94
|
+
for _n2 in range(-2*nmax-1,-nmax-1):
|
|
95
|
+
for _n3 in range(-nmax-1-_n2,nmax+2):
|
|
96
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
97
|
+
for _n3 in range(nmax+2,2*nmax+1):
|
|
98
|
+
for _n2 in range(-nmax-1,nmax+2-_n3):
|
|
99
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
100
|
+
for _n2 in range(nmax+2,2*nmax+2):
|
|
101
|
+
for _n3 in range(-nmax-1,nmax+2-_n2):
|
|
102
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
103
|
+
_shape = reconstructed.shape
|
|
104
|
+
_inds = np.argwhere((reconstructed>0).flatten())[:,0].astype(np.int32)
|
|
105
|
+
_n2s = np.argwhere(reconstructed>0)[:,0].astype(np.int32)-nmax_alloc
|
|
106
|
+
_n3s = np.argwhere(reconstructed>0)[:,1].astype(np.int32)-nmax_alloc
|
|
107
|
+
return _shape, _inds, _n2s, _n3s
|
|
108
|
+
|
|
109
|
+
def gen_n2n3indices_Gtildefourth(nmax):
|
|
110
|
+
""" List of flattened indices corresponding to selection """
|
|
111
|
+
nmax_alloc = 2*nmax+1
|
|
112
|
+
reconstructed = np.zeros((2*nmax_alloc+1,2*nmax_alloc+1),dtype=int)
|
|
113
|
+
for _n2 in range(-nmax-1,nmax+2):
|
|
114
|
+
for _n3 in range(-nmax-1,nmax+2):
|
|
115
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
116
|
+
for _n3 in range(-2*nmax,-nmax-1):
|
|
117
|
+
for _n2 in range(-nmax-1-_n3,nmax+2):
|
|
118
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
119
|
+
for _n2 in range(-2*nmax-1,-nmax-1):
|
|
120
|
+
for _n3 in range(-nmax-1-_n2,nmax+2):
|
|
121
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
122
|
+
for _n3 in range(nmax+2,2*nmax+1):
|
|
123
|
+
for _n2 in range(-nmax-1,nmax+2-_n3):
|
|
124
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
125
|
+
for _n2 in range(nmax+2,2*nmax+2):
|
|
126
|
+
for _n3 in range(-nmax-1,nmax+2-_n2):
|
|
127
|
+
reconstructed[nmax_alloc+_n2,nmax_alloc+_n3] += 1
|
|
128
|
+
_shape = reconstructed.shape
|
|
129
|
+
_inds = np.argwhere((reconstructed>0).flatten())[:,0].astype(np.int32)
|
|
130
|
+
_n2s = np.argwhere(reconstructed>0)[:,0].astype(np.int32)-nmax_alloc
|
|
131
|
+
_n3s = np.argwhere(reconstructed>0)[:,1].astype(np.int32)-nmax_alloc
|
|
132
|
+
return _shape, _inds, _n2s, _n3s
|
|
133
|
+
|
|
134
|
+
def symmetrize_map3_multiscale(map3, return_list=False):
|
|
135
|
+
"""
|
|
136
|
+
Symmetrizes third-order aperture mass over tomographic bin combinations
|
|
137
|
+
and radial bin combinations
|
|
138
|
+
Assumes map3 to be of shape (8, nbinsz**3, nbinsr**3)
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
nbinsz = int(round((map3.shape[1])**(1/3)))
|
|
142
|
+
nbinsr = int(round((map3.shape[2])**(1/3)))
|
|
143
|
+
|
|
144
|
+
# Get unique combinations of indices (e.g., r1 <= r2 <= r3)
|
|
145
|
+
r_combs = np.array(list(combinations_with_replacement(range(nbinsr), 3)))
|
|
146
|
+
z_combs = np.array(list(combinations_with_replacement(range(nbinsz), 3)))
|
|
147
|
+
indr3, indz3 = r_combs.shape[0], z_combs.shape[0]
|
|
148
|
+
|
|
149
|
+
# Get permutations for each unique combination
|
|
150
|
+
perm_map = np.array([[0, 1, 2], [1, 2, 0], [2, 0, 1],
|
|
151
|
+
[1, 0, 2], [2, 1, 0], [0, 2, 1]])
|
|
152
|
+
|
|
153
|
+
# Create arrays of permuted 3D indices
|
|
154
|
+
# Shape is (6, n_combinations, 3)
|
|
155
|
+
r_perms_3d = r_combs[:, perm_map].transpose(1, 0, 2)
|
|
156
|
+
z_perms_3d = z_combs[:, perm_map].transpose(1, 0, 2)
|
|
157
|
+
|
|
158
|
+
# Convert 3D permuted indices to flat indices
|
|
159
|
+
r_powers = np.array([nbinsr**2, nbinsr, 1])
|
|
160
|
+
sel_foots = np.dot(r_perms_3d, r_powers) # Shape: (6, indr3)
|
|
161
|
+
z_powers = np.array([nbinsz**2, nbinsz, 1])
|
|
162
|
+
zcombis = np.dot(z_perms_3d, z_powers) # Shape: (6, indz3)
|
|
163
|
+
|
|
164
|
+
# Do the averaging
|
|
165
|
+
# Shape is (8, 6_z_perms, indz3, 6_r_perms, indr3)
|
|
166
|
+
all_perms_data = map3[:, zcombis][:, :, :, sel_foots]
|
|
167
|
+
map3_symm = np.mean(all_perms_data, axis=(1, 3))
|
|
168
|
+
|
|
169
|
+
# Allocate final result
|
|
170
|
+
res = map3_symm
|
|
171
|
+
if return_list:
|
|
172
|
+
res = (map3_symm, )
|
|
173
|
+
# Rearrange and split the data to match original list format
|
|
174
|
+
list_data = all_perms_data.transpose(3, 0, 1, 4, 2)
|
|
175
|
+
map3_list = [arr.squeeze(axis=-1) for arr in np.split(list_data, indz3, axis=-1)]
|
|
176
|
+
res += (map3_list,)
|
|
177
|
+
|
|
178
|
+
return res
|
|
179
|
+
|
|
180
|
+
def map_ztuples(ntomobins, order):
|
|
181
|
+
"""
|
|
182
|
+
Maps indices of tomobin list with (z1,z2,...,zm): zi<n to indices with z1<=z2 etc.
|
|
183
|
+
Example: for ntomobins=3, order=2 we have
|
|
184
|
+
Sorted tuples = [00 01 02 11 12 22], unsorted tuples = [00 01 02 10 11 12 20 21 22]
|
|
185
|
+
--> index_mapper = [0, 1, 2, 1, 3, 4, 2, 4, 5]
|
|
186
|
+
"""
|
|
187
|
+
|
|
188
|
+
# Build and annotated sorted tuples
|
|
189
|
+
sorted_tuples = list(combinations_with_replacement(range(ntomobins), order))
|
|
190
|
+
sorted_tuples_indices = {t: r for r, t in enumerate(sorted_tuples)}
|
|
191
|
+
|
|
192
|
+
# Map index of sorted tuples to unsorted tuples indices
|
|
193
|
+
index_mapper = np.zeros(ntomobins ** order, dtype=int)
|
|
194
|
+
for idx, t in enumerate(product(range(ntomobins), repeat=order)):
|
|
195
|
+
sorted_tuple = tuple(sorted(t))
|
|
196
|
+
index_mapper[idx] = sorted_tuples_indices[sorted_tuple]
|
|
197
|
+
|
|
198
|
+
return len(sorted_tuples), len(index_mapper), index_mapper
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: orpheus-npcf
|
|
3
|
+
Version: 0.2.1
|
|
4
|
+
Summary: Compute N-point correlation functions of spin-s fields.
|
|
5
|
+
Home-page: https://github.com/lporth93/orpheus
|
|
6
|
+
Author: Lucas Porth
|
|
7
|
+
License: MIT
|
|
8
|
+
Classifier: Development Status :: 4 - Beta
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Requires-Python: >=3.9
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Dist: astropy>=6
|
|
15
|
+
Requires-Dist: healpy>=1.17
|
|
16
|
+
Requires-Dist: coverage>=7.6.1
|
|
17
|
+
Requires-Dist: numba<=0.62.1,>=0.58
|
|
18
|
+
Requires-Dist: numpy<1.27,>=1.22
|
|
19
|
+
Requires-Dist: scipy>=1.15
|
|
20
|
+
Requires-Dist: scikit-learn
|
|
21
|
+
Dynamic: author
|
|
22
|
+
Dynamic: classifier
|
|
23
|
+
Dynamic: description
|
|
24
|
+
Dynamic: description-content-type
|
|
25
|
+
Dynamic: home-page
|
|
26
|
+
Dynamic: license
|
|
27
|
+
Dynamic: license-file
|
|
28
|
+
Dynamic: requires-dist
|
|
29
|
+
Dynamic: requires-python
|
|
30
|
+
Dynamic: summary
|
|
31
|
+
|
|
32
|
+
<p align="center">
|
|
33
|
+
<img src="docs/orpheus_logov1.png" alt="Orpheus logo" width="500"/>
|
|
34
|
+
</p>
|
|
35
|
+
|
|
36
|
+
Orpheus is python package for the calculation of second- third- and fourth-order correlation functions of scalar and polar fields such as weak lensing shear. To efficiently perform the calculations, orpheus makes use of a mulitpole decomposition of the N>2 correlation functions and uses parallelized C code for the heavy lifting.
|
|
37
|
+
|
|
38
|
+
## Installation, Documentation and Examples
|
|
39
|
+
Installation steps, documentation and examples are provided at [orpheus.readthedocs.io](https://orpheus.readthedocs.io/).
|
|
40
|
+
|
|
41
|
+
### Installation
|
|
42
|
+
First clone the directory via:
|
|
43
|
+
```shell
|
|
44
|
+
git clone git@github.com:lporth93/orpheus.git
|
|
45
|
+
```
|
|
46
|
+
or
|
|
47
|
+
```shell
|
|
48
|
+
git clone https://github.com/lporth/orpheus.git
|
|
49
|
+
```
|
|
50
|
+
Then navigate to the cloned directory
|
|
51
|
+
```shell
|
|
52
|
+
cd orpheus
|
|
53
|
+
conda env create -f orpheus_env.yaml
|
|
54
|
+
conda activate orpheus_env
|
|
55
|
+
pip install .
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Documentation
|
|
59
|
+
In the [documentation](https://orpheus.readthedocs.io/) you find more information about the algorithms and approximation schemes employed in orpheus, as well as a series of jupyter notebooks that give examples of how to use the different estimators implemented in orpheus.
|
|
60
|
+
|
|
61
|
+
## Using the code
|
|
62
|
+
As at this moment there is no dedicated orpheus paper, please cite the paper that introduced the functionality implemented in orpheus:
|
|
63
|
+
* If you use the three-point functionality, please cite [Porth+2024](https://doi.org/10.1051/0004-6361/202347987)
|
|
64
|
+
* If you use the four-point functionality, please cite [Porth+2025](https://arxiv.org/abs/2509.07974)
|
|
65
|
+
* If you use the direct estimator functionality, please cite [Porth & Smith 2022](https://doi.org/10.1093/mnras/stab2819)
|
|
66
|
+
|
|
67
|
+
In each of the papers, you can find the main equations implemented in orpheus.
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
orpheus/__init__.py,sha256=BQkHVn8Z8s0zxm9rTw-wMgYl1kl27F--16cvXgky_s4,222
|
|
2
|
+
orpheus/catalog.py,sha256=AllY23dVt1d4C7Vs3vn45QxEYp_TBxMbH7i7u4-dvPo,57187
|
|
3
|
+
orpheus/covariance.py,sha256=iyjQ7fgPSNLbEMn400T02sGG0lML1yzTUkqOL6PoqeM,5660
|
|
4
|
+
orpheus/direct.py,sha256=6m04idNLovNnlXaTiiLR01xV5-hJfvDSHx52zFC5DKQ,48057
|
|
5
|
+
orpheus/flat2dgrid.py,sha256=JzkmvMfRcQ7N667A6plXP1-ME8vVKxXLW7igVOMSXb4,3089
|
|
6
|
+
orpheus/npcf_base.py,sha256=CiSjvSapv_0RMEhIOAsEKXIas7pTsnylrOed1Ni_aBs,44085
|
|
7
|
+
orpheus/npcf_fourth.py,sha256=5zj7-4468mOCTw_cOyer3-qjvHYgU56FnbSnBqM7p9s,91354
|
|
8
|
+
orpheus/npcf_second.py,sha256=X24QFTfS8YCTgrPpNYQtO5LWDVwPcGDMiJH3gBXWoLg,30679
|
|
9
|
+
orpheus/npcf_third.py,sha256=GuMNFRxoTULBfqS_wdVg0CoS7CPnz1vyvyFZXegZudk,95528
|
|
10
|
+
orpheus/orpheus_clib.cpython-310-x86_64-linux-gnu.so,sha256=rWlvlJaFKupW2EFVG0_MSJSw7-AAGo9tupz1p1r-uHs,551761
|
|
11
|
+
orpheus/patchutils.py,sha256=IEuTLSm84uIdRB_JcM7B36xvaV9NI31Ij5rDGkaYX8Q,15138
|
|
12
|
+
orpheus/utils.py,sha256=AU8VPdTh_PB4LdgZxex5gCTP1ERXthyuwWwNBHCTw4E,8598
|
|
13
|
+
orpheus_npcf.libs/libgomp-8949ffbe.so.1.0.0,sha256=QmRceMRE0kKk51YfPxg65utFTBYay_HEet-h-X65kMQ,349217
|
|
14
|
+
orpheus_npcf-0.2.1.dist-info/METADATA,sha256=5VoHn-bHgZcBjmbgO2vZtz743crEPt2itnKhEvCiXSo,2625
|
|
15
|
+
orpheus_npcf-0.2.1.dist-info/WHEEL,sha256=uEjkgWoqFUqGUEWmvjbn2GyypyJC7tX5jyOGRid8u80,113
|
|
16
|
+
orpheus_npcf-0.2.1.dist-info/top_level.txt,sha256=EGQN7bZMbZZGcbqsLDoUVjVl6glgjt9qmT6FZt5N-Yo,8
|
|
17
|
+
orpheus_npcf-0.2.1.dist-info/RECORD,,
|
|
18
|
+
orpheus_npcf-0.2.1.dist-info/licenses/LICENSE,sha256=ixuiBLtpoK3iv89l7ylKkg9rs2GzF9ukPH7ynZYzK5s,35148
|
|
19
|
+
orpheus_npcf-0.2.1.dist-info/sboms/auditwheel.cdx.json,sha256=ElfymN-vGCppGaF10n74SgWyGbaztVRc41PcRwystKk,1286
|