ler 0.3.8__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ler might be problematic. Click here for more details.

ler/utils/plots.py CHANGED
@@ -63,13 +63,14 @@ def param_plot(
63
63
  Examples
64
64
  ----------
65
65
  >>> import matplotlib.pyplot as plt
66
+ >>> from ler.utils import param_plot
66
67
  >>> from ler.rates import LeR
67
- >>> ler = LeR()
68
+ >>> ler = LeR(verbose=False)
68
69
  >>> param = ler.unlensed_cbc_statistics();
69
70
  >>> rate, param_detectable = ler.unlensed_rate()
70
71
  >>> plt.figure(figsize=(6, 4))
71
- >>> ler.param_plot(param_name='zs', param_dict=param, plot_label='all events')
72
- >>> ler.param_plot(param_name='zs', param_dict=param_detectable, plot_label='detectable events')
72
+ >>> param_plot(param_name='zs', param_dict=param, plot_label='all events')
73
+ >>> param_plot(param_name='zs', param_dict=param_detectable, plot_label='detectable events')
73
74
  >>> plt.xlabel('source redshift')
74
75
  >>> plt.ylabel('probability density')
75
76
  >>> plt.title('source redshift distribution')
@@ -102,7 +103,7 @@ def param_plot(
102
103
  plt.plot(x, kde(x), label=plot_label+" kde")
103
104
  plt.legend()
104
105
 
105
- def relative_mu_dt_unlensed(param, size=100):
106
+ def relative_mu_dt_unlensed(param, size=100, randomize=True):
106
107
  """
107
108
  Function to generate relative magnification vs time delay difference for unlensed samples.
108
109
 
@@ -111,249 +112,291 @@ def relative_mu_dt_unlensed(param, size=100):
111
112
  param : `dict`
112
113
  dictionary of unlensed GW source parameters.
113
114
  unlensed_param.keys() = ['m1', 'm2', 'z', 'snr', 'theta_jn', 'ra', 'dec', 'psi', 'phase', 'geocent_time']
115
+ size : `int`
116
+ number of samples.
117
+ default size = 100.
118
+ randomize : `bool`
119
+ if True, it will randomize the samples.
120
+ default randomize = True.
114
121
 
115
122
  Returns
116
123
  ----------
117
124
  dmu : `float.array`
118
- relative magnification.
125
+ relative magnification: abs(mu2/mu1) or abs(dl1/dl2)**2.
119
126
  dt : `float.array`
120
- relative time delay.
121
-
127
+ relative time delay: abs(t1-t2) in days.
122
128
  """
123
129
 
124
130
  t = param["geocent_time"]
125
131
  mu = param["luminosity_distance"]
126
-
127
132
  len_ = len(t)
128
- t_ = []
129
- mu_ = []
130
- while len(t_) < size:
131
- idx1 = np.random.choice(np.arange(0,len_), size, replace=False)
132
- idx2 = np.random.choice(np.arange(0,len_), size, replace=False)
133
- t_.append(t[idx2] - t[idx1])
134
- mu_.append(mu[idx2] / mu[idx1])
135
-
136
- dt = np.abs(np.array(t_)) / (60 * 60 * 24) # in days
137
- dmu = np.sqrt(np.abs(np.array(mu_)))
138
-
139
- return (dmu, dt)
140
-
141
- def relative_mu_dt_lensed(lensed_param, snr_threshold=[8.0, 8.0]):
133
+ # randomize it
134
+ if randomize:
135
+ idx_ = np.random.permutation(len_)
136
+ t = t[idx_]
137
+ mu = mu[idx_]
138
+
139
+ # Ensure enough unique pairs can be formed
140
+ if size > (len(t) * (len(t) - 1)) // 2:
141
+ raise ValueError(f"size should be less than the number of unique pairs {len(t) * (len(t) - 1) // 2}")
142
+
143
+ # Generate unique pairs
144
+ # find idx1 and idx2
145
+ idx1 = np.array([])
146
+ idx2 = np.array([])
147
+ while len(idx1) < size:
148
+ idx1_ = np.random.choice(len_, size=size, replace=True)
149
+ idx2_ = np.random.choice(len_, size=size, replace=True)
150
+ idx1 = np.concatenate((idx1, idx1_))
151
+ idx2 = np.concatenate((idx2, idx2_))
152
+ idx = np.where(idx1 != idx2)[0]
153
+ idx1 = idx1[idx]
154
+ idx2 = idx2[idx]
155
+ idx1 = idx1[:size].astype(int)
156
+ idx2 = idx2[:size].astype(int)
157
+
158
+ dt = abs(t[idx1] - t[idx2]) / (60 * 60 * 24) # in days
159
+ dmu = abs(mu[idx1]/mu[idx2])**2
160
+
161
+ return dmu, dt
162
+
163
+ def relative_mu_dt_lensed(
164
+ lensed_param,
165
+ snr_threshold=[8.0, 8.0],
166
+ classification_type='morse_phase'
167
+ ):
142
168
  """
143
169
  Function to classify the lensed images wrt to the morse phase difference.
144
170
 
145
- Parameters
146
- ----------
147
- lensed_param : `dict`
148
- dictionary of lensed GW source parameters, lens galaxy parameters and image paramters.
149
- lensed_param.keys() = ['zl', 'zs', 'sigma', 'q', 'e1', 'e2', 'gamma1', 'gamma2', 'Dl',
150
- 'Ds', 'Dls', 'theta_E', 'gamma', 'mass_1', 'mass_2', 'mass_1_source', 'mass_2_source',
151
- 'luminosity_distance', 'theta_jn', 'psi', 'phase', 'geocent_time', 'ra', 'dec', 'n_images',
152
- 'x0_image_positions', 'x1_image_positions', 'magnifications', 'time_delays', 'traces',
153
- 'determinants', 'image_type', 'weights', 'optimal_snr_net', 'L1', 'H1', 'V1']
154
- snr_threshold : `float`
155
- threshold for detection signal to noise ratio.
156
- e.g. snr_threshold = [8.,8.] or [8.,6.] for subthreshold
157
-
158
- Returns
159
- ----------
160
- mu_rel0 : `float.array`
161
- relative magnification for 0 degree phase difference.
162
- dt_rel0 : `float.array`
163
- relative time delay for 0 degree phase difference.
164
- mu_rel90 : `float.array`
165
- relative magnification for 90 degree phase difference.
166
- dt_rel90 : `float.array`
167
- relative time delay for 90 degree phase difference.
171
+
168
172
  """
169
173
 
170
174
  # get magnifications, time_delays and snr
171
- mu = np.nan_to_num(lensed_param["magnifications"])
172
- dt = np.nan_to_num(lensed_param["time_delays"])
173
- snr = np.nan_to_num(lensed_param["optimal_snr_net"])
174
-
175
- # for 0 degree phase difference
176
- # get the index of the image which cross the threshold
177
- # get snr_threshold sorted first in descending order
178
- snr_threshold = -np.sort(-np.array(snr_threshold))
179
- # for type I
180
- snr1 = -np.sort(-snr[:, [0, 1]], axis=1)
181
- # for type II
182
- snr2 = -np.sort(-snr[:, [2, 3]], axis=1)
183
-
184
- # checking for zero values
185
- # check for threshold condition
186
- idx1, idx2 = [], []
187
- for i in range(len(snr)):
188
- if (
189
- any(x != 0.0 for x in snr1[i])
190
- and snr1[i][0] > snr_threshold[0]
191
- and snr1[i][1] > snr_threshold[1]
192
- ):
193
- idx1.append(i)
194
- if (
195
- any(x != 0.0 for x in snr2[i])
196
- and snr2[i][0] > snr_threshold[0]
197
- and snr2[i][1] > snr_threshold[1]
198
- ):
199
- idx2.append(i)
200
-
201
- # combine magnifications and time_delays
202
- mu_ = np.concatenate((mu[idx1][:, [0, 1]], mu[idx2][:, [2, 3]]), axis=0)
203
- dt_ = np.concatenate((dt[idx1][:, [0, 1]], dt[idx2][:, [2, 3]]), axis=0) / (
204
- 60 * 60 * 24
205
- ) # to days
206
-
207
- # relative magnification
208
- mu_rel0 = np.abs(mu_[:, 1] / mu_[:, 0])
209
- # relative time delay
210
- dt_rel0 = np.abs(dt_[:, 1] - dt_[:, 0])
211
-
212
- # for 90 degree phase difference
213
- # for type I
214
- snr1 = -np.sort(-snr[:, [0, 2]], axis=1)
215
- # for type II
216
- snr2 = -np.sort(-snr[:, [1, 3]], axis=1)
217
-
218
- # checking for zero values
219
- # check for threshold condition
220
- idx1, idx2 = [], []
221
- for i in range(len(snr)):
222
- if (
223
- any(x != 0.0 for x in snr1[i])
224
- and snr1[i][0] > snr_threshold[0]
225
- and snr1[i][1] > snr_threshold[1]
226
- ):
227
- idx1.append(i)
228
- if (
229
- any(x != 0.0 for x in snr2[i])
230
- and snr2[i][0] > snr_threshold[0]
231
- and snr2[i][1] > snr_threshold[1]
232
- ):
233
- idx2.append(i)
234
-
235
- # combine magnifications and time_delays
236
- mu_ = np.concatenate((mu[idx1][:, [0, 2]], mu[idx2][:, [1, 3]]), axis=0)
237
- dt_ = np.concatenate((dt[idx1][:, [0, 2]], dt[idx2][:, [1, 3]]), axis=0) / (
238
- 60 * 60 * 24
239
- ) # in days
240
-
241
- # relative magnification
242
- mu_rel90 = np.abs(mu_[:, 1] / mu_[:, 0])
243
- # relative time delay
244
- dt_rel90 = np.abs(dt_[:, 1] - dt_[:, 0])
245
-
246
- return (mu_rel0, dt_rel0, mu_rel90, dt_rel90)
175
+ mu = lensed_param["magnifications"]
176
+ dt = lensed_param["time_delays"]
177
+ snr = lensed_param["optimal_snr_net"]
178
+ image_type = lensed_param["image_type"]
179
+
180
+ # pair images wrt to image_type
181
+ if classification_type == 'morse_phase':
182
+ dt_rel0 = []
183
+ mu_rel0 = []
184
+ dt_rel90 = []
185
+ mu_rel90 = []
186
+ for i in range(len(image_type)):
187
+ if image_type[i,0]==image_type[i,1]:
188
+ # snr check
189
+ # below will also take care of the nan values
190
+ if snr[i,0]>snr_threshold[0] and snr[i,1]>snr_threshold[1]:
191
+ dt_rel0.append(abs(dt[i,1]-dt[i,0])/ (60 * 60 * 24))
192
+ mu_rel0.append(abs(mu[i,1]/mu[i,0]))
193
+ else:
194
+ if snr[i,0]>snr_threshold[0] and snr[i,1]>snr_threshold[1]:
195
+ dt_rel90.append(abs(dt[i,1]-dt[i,0])/ (60 * 60 * 24))
196
+ mu_rel90.append(abs(mu[i,1]/mu[i,0]))
197
+ if image_type[i,0]==image_type[i,2]:
198
+ # snr check
199
+ # below will also take care of the nan values
200
+ if snr[i,0]>snr_threshold[0] and snr[i,2]>snr_threshold[1]:
201
+ dt_rel0.append(abs(dt[i,2]-dt[i,0])/ (60 * 60 * 24))
202
+ mu_rel0.append(abs(mu[i,2]/mu[i,0]))
203
+ else:
204
+ if snr[i,0]>snr_threshold[0] and snr[i,2]>snr_threshold[1]:
205
+ dt_rel90.append(abs(dt[i,2]-dt[i,0])/ (60 * 60 * 24))
206
+ mu_rel90.append(abs(mu[i,2]/mu[i,0]))
207
+ if image_type[i,0]==image_type[i,3]:
208
+ # snr check
209
+ # below will also take care of the nan values
210
+ if snr[i,0]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
211
+ dt_rel0.append(abs(dt[i,3]-dt[i,0])/ (60 * 60 * 24))
212
+ mu_rel0.append(abs(mu[i,3]/mu[i,0]))
213
+ else:
214
+ if snr[i,0]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
215
+ dt_rel90.append(abs(dt[i,3]-dt[i,0])/ (60 * 60 * 24))
216
+ mu_rel90.append(abs(mu[i,3]/mu[i,0]))
217
+ if image_type[i,1]==image_type[i,2]:
218
+ # snr check
219
+ # below will also take care of the nan values
220
+ if snr[i,1]>snr_threshold[0] and snr[i,2]>snr_threshold[1]:
221
+ dt_rel0.append(abs(dt[i,2]-dt[i,1])/ (60 * 60 * 24))
222
+ mu_rel0.append(abs(mu[i,2]/mu[i,1]))
223
+ else:
224
+ if snr[i,1]>snr_threshold[0] and snr[i,2]>snr_threshold[1]:
225
+ dt_rel90.append(abs(dt[i,2]-dt[i,1])/ (60 * 60 * 24))
226
+ mu_rel90.append(abs(mu[i,2]/mu[i,1]))
227
+ if image_type[i,1]==image_type[i,3]:
228
+ # snr check
229
+ # below will also take care of the nan values
230
+ if snr[i,1]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
231
+ dt_rel0.append(abs(dt[i,3]-dt[i,1])/ (60 * 60 * 24))
232
+ mu_rel0.append(abs(mu[i,3]/mu[i,1]))
233
+ else:
234
+ if snr[i,1]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
235
+ dt_rel90.append(abs(dt[i,3]-dt[i,1])/ (60 * 60 * 24))
236
+ mu_rel90.append(abs(mu[i,3]/mu[i,1]))
237
+ if image_type[i,2]==image_type[i,3]:
238
+ # snr check
239
+ # below will also take care of the nan values
240
+ if snr[i,2]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
241
+ dt_rel0.append(abs(dt[i,3]-dt[i,2])/ (60 * 60 * 24))
242
+ mu_rel0.append(abs(mu[i,3]/mu[i,2]))
243
+ else:
244
+ if snr[i,2]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
245
+ dt_rel90.append(abs(dt[i,3]-dt[i,2])/ (60 * 60 * 24))
246
+ mu_rel90.append(abs(mu[i,3]/mu[i,2]))
247
+
248
+ return {
249
+ "dt_rel0": np.array(dt_rel0), "mu_rel0": np.array(mu_rel0),
250
+ "dt_rel90": np.array(dt_rel90), "mu_rel90": np.array(mu_rel90),
251
+ }
252
+
253
+ if classification_type == 'arrival_time':
254
+ print('classification_type = arrival_time')
255
+ print('make sure that the images are sorted wrt to arrival time')
256
+ print('direct output from "ler" should be sorted')
257
+ dt_12, dt_13, dt_14, dt_23, dt_24, dt_34 = [], [], [], [], [], []
258
+ mu_12, mu_13, mu_14, mu_23, mu_24, mu_34 = [], [], [], [], [], []
259
+
260
+ for i in range(len(image_type)):
261
+ if snr[i,0]>snr_threshold[0] and snr[i,1]>snr_threshold[1]:
262
+ dt_12.append(abs(dt[i,1]-dt[i,0])/ (60 * 60 * 24))
263
+ mu_12.append(abs(mu[i,1]/mu[i,0]))
264
+ if snr[i,0]>snr_threshold[0] and snr[i,2]>snr_threshold[1]:
265
+ dt_13.append(abs(dt[i,2]-dt[i,0])/ (60 * 60 * 24))
266
+ mu_13.append(abs(mu[i,2]/mu[i,0]))
267
+ if snr[i,0]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
268
+ dt_14.append(abs(dt[i,3]-dt[i,0])/ (60 * 60 * 24))
269
+ mu_14.append(abs(mu[i,3]/mu[i,0]))
270
+ if snr[i,1]>snr_threshold[0] and snr[i,2]>snr_threshold[1]:
271
+ dt_23.append(abs(dt[i,2]-dt[i,1])/ (60 * 60 * 24))
272
+ mu_23.append(abs(mu[i,2]/mu[i,1]))
273
+ if snr[i,1]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
274
+ dt_24.append(abs(dt[i,3]-dt[i,1])/ (60 * 60 * 24))
275
+ mu_24.append(abs(mu[i,3]/mu[i,1]))
276
+ if snr[i,2]>snr_threshold[0] and snr[i,3]>snr_threshold[1]:
277
+ dt_34.append(abs(dt[i,3]-dt[i,2])/ (60 * 60 * 24))
278
+ mu_34.append(abs(mu[i,3]/mu[i,2]))
279
+
280
+ return {
281
+ "dt_12": np.array(dt_12), "mu_12": np.array(mu_12),
282
+ "dt_13": np.array(dt_13), "mu_13": np.array(mu_13),
283
+ "dt_14": np.array(dt_14), "mu_14": np.array(mu_14),
284
+ "dt_23": np.array(dt_23), "mu_23": np.array(mu_23),
285
+ "dt_24": np.array(dt_24), "mu_24": np.array(mu_24),
286
+ "dt_34": np.array(dt_34), "mu_34": np.array(mu_34),
287
+ }
247
288
 
248
289
  def mu_vs_dt_plot(
249
290
  x_array,
250
291
  y_array,
251
- savefig=False,
252
- ax=None,
253
- colors="blue",
254
- linestyles="-",
255
- origin="upper",
292
+ xscale = 'log10',
293
+ yscale = 'log10',
256
294
  alpha=0.6,
257
- extent=[1e-2, 5e2, 1e-2, 1e2],
258
- contour_levels=[0.10, 0.40, 0.68, 0.95],
295
+ extent=None,
296
+ contour_levels=[10, 40, 68, 95],
297
+ colors=['blue', 'blue', 'blue', 'blue', 'blue'],
259
298
  ):
260
299
  """
261
- Function to generate 2D KDE and plot the relative magnification vs time delay difference for lensed samples.
262
-
263
- Parameters
264
- ----------
265
- x_array : `float.array`
266
- x array.
267
- y_array : `float.array`
268
- y array.
269
- xlabel : `str`
270
- x label.
271
- ylabel : `str`
272
- y label.
273
- title : `str`
274
- title.
275
- savefig : `bool`
276
- if True, it will save the figure.
277
- default savefig = False.
278
- ax : `matplotlib.axes`
279
- matplotlib axes.
280
- default ax = None.
281
- colors : `str`
282
- color of the plot.
283
- default colors = 'blue'.
284
- linestyles : `str`
285
- linestyle of the plot.
286
- default linestyles = '-'.
287
- origin : `str`
288
- origin of the plot.
289
- default origin = 'upper'.
290
- alpha : `float`
291
- alpha of the plot.
292
- default alpha = 0.6.
293
- extent : `list`
294
- extent of the plot.
295
- default extent = [1e-2,5e2,1e-2,1e2].
296
- contour_levels : `list`
297
- contour levels of the plot.
298
- default contour_levels = [0.10,0.40,0.68,0.95] which corresponds to 1,2,3,4 sigma.
300
+ Function to generate 2D KDE and plot the relative magnification vs time delay difference for lensed samples.
301
+
302
+ Parameters
303
+ ----------
304
+ x_array : `float.array`
305
+ x array.
306
+ y_array : `float.array`
307
+ y array.
308
+ xscale : `str`
309
+ x-axis scale.
310
+ default xscale = 'log10'. other options: 'log', None.
311
+ yscale : `str`
312
+ y-axis scale.
313
+ default yscale = 'log10'. other options: 'log', None.
314
+ alpha : `float`
315
+ transparency of the contour plot.
316
+ default alpha = 0.6.
317
+ extent : `list`
318
+ extent of the plot.
319
+ default extent = None. It will consider the full range of x_array and y_array.
320
+ contour_levels : `list`
321
+ levels for contour plot.
322
+ default contour_levels = [10, 40, 68, 95].
323
+ colors : `str`
324
+ colors for contour plot.
325
+ default colors = ['blue', 'blue', 'blue', 'blue', 'blue'].
326
+
327
+ Examples
328
+ ----------
329
+ >>> import numpy as np
330
+ >>> import matplotlib.pyplot as plt
331
+ >>> from ler.utils import param_plot, mu_vs_dt_plot, get_param_from_json, relative_mu_dt_unlensed, relative_mu_dt_lensed
332
+ >>> # get the parameters. For data generation, refer to the 'LeR complete example' in the documentation.
333
+ >>> unlensed_param = get_param_from_json('ler_data/unlensed_param.json')
334
+ >>> unlensed_param_detectable = get_param_from_json('ler_data/unlensed_param_detectable.json')
335
+ >>> lensed_param = get_param_from_json('ler_data/lensed_param.json')
336
+ >>> lensed_param_detectable = get_param_from_json('ler_data/lensed_param_detectable.json')
337
+ >>> # get the relative mu and dt
338
+ >>> ans = relative_mu_dt_lensed(lensed_param_detectable)
339
+ >>> dmu, dt = relative_mu_dt_unlensed(unlensed_param_detectable, size=1000, randomize=True)
340
+ >>> # plot
341
+ >>> plt.figure(figsize=(4, 4))
342
+ >>> mu_vs_dt_plot(ans['dt_rel90'], ans['mu_rel90'], colors='b')
343
+ >>> mu_vs_dt_plot(ans['dt_rel0'], ans['mu_rel0'], colors='g')
344
+ >>> mu_vs_dt_plot(dt, dmu, colors='r')
345
+ >>> # Create proxy artists for legend
346
+ >>> proxy1 = plt.Line2D([0], [0], linestyle='-', color='b', label=r'Lensed ($\Delta \phi=90$)')
347
+ >>> proxy2 = plt.Line2D([0], [0], linestyle='-', color='g', label=r'Lensed ($\Delta \phi=0$)')
348
+ >>> proxy3 = plt.Line2D([0], [0], linestyle='-', color='r', label=r'Unlensed')
349
+ >>> plt.legend(handles=[proxy1, proxy2, proxy3], loc='upper left')
350
+ >>> plt.xlim(-5, 2.5)
351
+ >>> plt.ylim(-2.5, 2.5)
352
+ >>> plt.grid(alpha=0.4)
353
+ >>> plt.show()
354
+ """
299
355
 
300
- Returns
301
- ----------
302
- None
356
+ x_min = min(x_array)
357
+ x_max = max(x_array)
358
+ y_min = min(y_array)
359
+ y_max = max(y_array)
303
360
 
304
- """
305
361
  # applying cutt-off
306
- idx = (
307
- (x_array > extent[0])
308
- & (x_array < extent[1])
309
- & (y_array > extent[2])
310
- & (y_array < extent[3])
311
- )
312
- x_array = x_array[idx]
313
- y_array = y_array[idx]
314
-
315
- xu = np.log10(x_array)
316
- yu = np.log10(y_array)
317
-
318
- xmin = np.log10(1e-2)
319
- xmax = np.log10(5e2)
320
- ymin = np.log10(1e-2)
321
- ymax = np.log10(1e2)
322
-
323
- xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
324
- positions = np.vstack([xx.ravel(), yy.ravel()])
325
- values = np.vstack([xu, yu])
326
- kernel = gaussian_kde(values)
327
- ff = np.reshape(kernel(positions).T, xx.shape)
328
-
329
- zsort = -np.sort(-ff.flatten())
330
-
331
- cumz = np.cumsum(zsort) / np.sum(zsort)
332
- spl = interp1d(cumz, zsort, kind="cubic", fill_value="extrapolate")
333
-
334
- levels = []
335
- for i in contour_levels:
336
- levels.append(spl(i))
337
- levels = np.array(levels)[::-1]
338
-
339
- ax.contour(
340
- np.rot90(ff),
341
- levels,
342
- colors=colors,
343
- linestyles=linestyles,
344
- origin=origin,
345
- alpha=alpha,
346
- extent=np.log10(extent),
347
- )
348
-
349
- # labels
350
- ax.xlabel(r"$log_{10}\Delta t$ (days)")
351
- ax.ylabel(r"$\Delta log_{10}\mu$")
352
- ax.title(r"relative magnification vs relative time delay")
353
-
354
- # save figure
355
- if savefig:
356
- ax.savefig("mu_vs_dt.png", dpi=300, bbox_inches="tight")
357
-
358
- return None
362
+ if extent:
363
+ x_min, x_max, y_min, y_max = extent
364
+ x_array = x_array[(x_array >= x_min) & (x_array <= x_max)]
365
+ y_array = y_array[(y_array >= y_min) & (y_array <= y_max)]
366
+
367
+ # convert to log scale
368
+ if xscale == 'log10':
369
+ x_array = np.log10(x_array)
370
+ x_min = np.log10(x_min)
371
+ x_max = np.log10(x_max)
372
+ if yscale == 'log10':
373
+ y_array = np.log10(y_array)
374
+ y_min = np.log10(y_min)
375
+ y_max = np.log10(y_max)
376
+ if xscale == 'log':
377
+ x_array = np.log(x_array)
378
+ x_min = np.log(x_min)
379
+ x_max = np.log(x_max)
380
+ if yscale == 'log':
381
+ y_array = np.log(y_array)
382
+ y_min = np.log(y_min)
383
+ y_max = np.log(y_max)
384
+
385
+ # Perform a kernel density estimation (KDE)
386
+ xy = np.vstack([x_array, y_array])
387
+ kde = gaussian_kde(xy)(xy)
388
+
389
+ # Define the levels for contour as percentiles of the density
390
+ levels = np.percentile(kde, [10, 40, 68, 95])
391
+
392
+ # Create a grid for contour plot
393
+ xgrid = np.linspace(x_min, x_max, 1000)
394
+ ygrid = np.linspace(y_min, y_max, 1000)
395
+ X1, Y1 = np.meshgrid(xgrid, ygrid)
396
+ Z1 = gaussian_kde(xy)(np.vstack([X1.ravel(), Y1.ravel()])).reshape(X1.shape)
397
+
398
+ if isinstance(colors, str):
399
+ colors = [colors] * len(contour_levels)
400
+
401
+ plt.contour(X1, Y1, Z1, levels=levels, colors=colors, alpha=alpha)
359
402
 
ler/utils/utils.py CHANGED
@@ -69,15 +69,37 @@ def load_json(file_name):
69
69
 
70
70
  return param
71
71
 
72
- def append_json(file_name, new_dictionary, old_dictionary=None, replace=False):
73
- """Append and update a json file with a dictionary.
72
+ def save_json(file_name, param):
73
+ """Save a dictionary as a json file.
74
74
 
75
75
  Parameters
76
76
  ----------
77
77
  file_name : `str`
78
78
  json file name for storing the parameters.
79
+ param : `dict`
80
+ dictionary to be saved as a json file.
81
+ """
82
+ with open(file_name, "w", encoding="utf-8") as write_file:
83
+ json.dump(param, write_file)
84
+
85
+ def append_json(file_name, new_dictionary, old_dictionary=None, replace=False):
86
+ """
87
+ Append (values with corresponding keys) and update a json file with a dictionary. There are four options:
88
+
89
+ 1. If old_dictionary is provided, the values of the new dictionary will be appended to the old dictionary and save in the 'file_name' json file.
90
+ 2. If replace is True, replace the json file (with the 'file_name') content with the new_dictionary.
91
+ 3. If the file does not exist, create a new one with the new_dictionary.
92
+ 4. If none of the above, append the new dictionary to the content of the json file.
93
+
94
+ Parameters
95
+ ----------
96
+ file_name : `str`
97
+ json file name for storing the parameters.
79
98
  new_dictionary : `dict`
80
99
  dictionary to be appended to the json file.
100
+ old_dictionary : `dict`, optional
101
+ If provided the values of the new dictionary will be appended to the old dictionary and save in the 'file_name' json file.
102
+ Default is None.
81
103
  replace : `bool`, optional
82
104
  If True, replace the json file with the dictionary. Default is False.
83
105
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ler
3
- Version: 0.3.8
3
+ Version: 0.4.0
4
4
  Summary: Gravitational waves Lensing Rates
5
5
  Home-page: https://github.com/hemantaph/ler
6
6
  Author: Hemantakumar
@@ -11,11 +11,11 @@ Description-Content-Type: text/markdown
11
11
  License-File: LICENSE
12
12
  Requires-Dist: setuptools >=65.5.0
13
13
  Requires-Dist: matplotlib >=3.4.2
14
- Requires-Dist: pycbc >=1.18.0
14
+ Requires-Dist: pycbc >=2.0.4
15
15
  Requires-Dist: numpy >=1.18
16
16
  Requires-Dist: numba >=0.57.1
17
17
  Requires-Dist: bilby >=1.0.2
18
- Requires-Dist: gwsnr >=0.2.0
18
+ Requires-Dist: gwsnr >=0.3.2
19
19
  Requires-Dist: scipy <1.14.0
20
20
  Requires-Dist: lenstronomy >=1.10.4
21
21
  Requires-Dist: astropy >=5.1
@@ -1,24 +1,25 @@
1
- ler/__init__.py,sha256=mTa4tgjjmfWuadm6yhsGqo-92BYy_Ad0lbBcvRZnJn4,804
1
+ ler/__init__.py,sha256=Ip4IJByGm_ESuIsV_ICkM50WwUIjnk_G1DE-S1TjDPA,804
2
2
  ler/gw_source_population/__init__.py,sha256=HG0ve5wTpBDN2fNMxHLnoOqTz-S0jXM_DsWEJ5PEHAw,126
3
- ler/gw_source_population/cbc_source_parameter_distribution.py,sha256=jSqAE4tm-4ZmQiKRpxZ7-LGAP_uGpEMGX197Eh2Iuyg,67862
3
+ ler/gw_source_population/cbc_source_parameter_distribution.py,sha256=e0-Sqcx7WblWkqX1WQg6sCzrpMbkQ5tUcMu9sbBcp9Y,67455
4
4
  ler/gw_source_population/cbc_source_redshift_distribution.py,sha256=o2qAM_-9SeLxxfGwqXrdVWTCeEAaXVan_OPDd4jrplg,28559
5
5
  ler/gw_source_population/jit_functions.py,sha256=aQV9mv3IY5b3OLiPeXmoLWJ_TbFUS9M1OgnIyIY3eX4,8668
6
6
  ler/image_properties/__init__.py,sha256=XfJFlyZuOrKODT-z9WxjR9mI8eT399YJV-jzcJKTqGo,71
7
- ler/image_properties/image_properties.py,sha256=as-6ZKoA7bvOTDNsCE0o9LyoKzfdHip-nRRq86UkjX0,25166
7
+ ler/image_properties/image_properties.py,sha256=QmZ27y4CFR-DvzBxJewgaH3kEAXW6UDPxbyI7zwjdP4,25302
8
8
  ler/image_properties/multiprocessing_routine.py,sha256=hYnQTM7PSIj3X-5YNDqMxH9UgeXHUPPdLG70h_r6sEY,18333
9
9
  ler/lens_galaxy_population/__init__.py,sha256=TXk1nwiYy0tvTpKs35aYK0-ZK63g2JLPyGG_yfxD0YU,126
10
10
  ler/lens_galaxy_population/jit_functions.py,sha256=tCTcr4FWyQXH7SQlHsUWeZBpv4jnG00DsBIljdWFs5M,8472
11
- ler/lens_galaxy_population/lens_galaxy_parameter_distribution.py,sha256=074RNd1EPilObL0dh7NLW1bxl9xRqv0TqdFkrAx4Ebw,48246
11
+ ler/lens_galaxy_population/lens_galaxy_parameter_distribution.py,sha256=_o_kaIIqps9i-5RYQ8PIaofblAwTBSIokjYPEbo1Rh4,48278
12
12
  ler/lens_galaxy_population/mp.py,sha256=TPnFDEzojEqJzE3b0g39emZasHeeaeXN2q7JtMcgihk,6387
13
- ler/lens_galaxy_population/optical_depth.py,sha256=fcZSXaIjx7v8WDZuiIIMEqR1gtKJc4V4gTnQpVlTpas,42437
13
+ ler/lens_galaxy_population/optical_depth.py,sha256=rZ_Inpw7ChpFdDLp3kJrCmA0PL3RxN6T_W_NTFhj_ko,42542
14
14
  ler/rates/__init__.py,sha256=N4li9NouSVjZl5HIhyuiKKRyrpUgQkBZaUeDgL1m4ic,43
15
- ler/rates/gwrates.py,sha256=2svyxdEzChiK4YDN4rRVrm0Yhh0TXrRXxobg9d1sF-4,40865
16
- ler/rates/ler.py,sha256=QySWLxrjP9XJGH423lHlwL006znde-lNvxTTV3O7CFI,84164
15
+ ler/rates/gwrates.py,sha256=akw6rKAkETr_ERmymVJx3APRXs0XqqFccZ-LIzAV4jM,58465
16
+ ler/rates/ler copy.py,sha256=BlnGlRISUwiWUhUNwp32_lvh7tHdT-d1VDhFelwKO_c,101873
17
+ ler/rates/ler.py,sha256=pBFTeYlranu5A81yENC3lUC5ebalUmr-g8neyWJUhgk,106112
17
18
  ler/utils/__init__.py,sha256=JWF9SKoqj1BThpV_ynfoyUeU06NQQ45DHCUGaaMSp_8,42
18
- ler/utils/plots.py,sha256=D8MjTrfyE4cc0D6KBu1Mw4VMllp9Yp73bSi2cqPmNOM,10742
19
- ler/utils/utils.py,sha256=FI_o6klrkZGzDlKYxSV3S5ovGQFfCNuNoc_m2qBD7pg,27945
20
- ler-0.3.8.dist-info/LICENSE,sha256=9LeXXC3WaHBpiUGhLVgOVnz0F12olPma1RX5zgpfp8Q,1081
21
- ler-0.3.8.dist-info/METADATA,sha256=wsmsniEhQ7kv8zjiRqZSXvGiJaVwSkEMMulE9aFkpfQ,6521
22
- ler-0.3.8.dist-info/WHEEL,sha256=y4mX-SOX4fYIkonsAGA5N0Oy-8_gI4FXw5HNI1xqvWg,91
23
- ler-0.3.8.dist-info/top_level.txt,sha256=VWeWLF_gNMjzquGmqrLXqp2J5WegY86apTUimMTh68I,4
24
- ler-0.3.8.dist-info/RECORD,,
19
+ ler/utils/plots.py,sha256=uq-usKRnEymtOSAPeHFOfMQW1XX76_WP2aBkT40RvLo,15664
20
+ ler/utils/utils.py,sha256=HzRgpDjxXqaZ0jUjYU79IRzaFFK66rAhNAoqXdUHJJo,28976
21
+ ler-0.4.0.dist-info/LICENSE,sha256=9LeXXC3WaHBpiUGhLVgOVnz0F12olPma1RX5zgpfp8Q,1081
22
+ ler-0.4.0.dist-info/METADATA,sha256=T9JDeo2k-a0XmLpHG5U1gKy3Wlhake6ebe-Xy6EYGO0,6520
23
+ ler-0.4.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
24
+ ler-0.4.0.dist-info/top_level.txt,sha256=VWeWLF_gNMjzquGmqrLXqp2J5WegY86apTUimMTh68I,4
25
+ ler-0.4.0.dist-info/RECORD,,