ler 0.1.3__tar.gz → 0.1.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ler might be problematic. Click here for more details.
- {ler-0.1.3 → ler-0.1.6}/PKG-INFO +1 -1
- {ler-0.1.3 → ler-0.1.6}/ler/__init__.py +2 -1
- ler-0.1.6/ler/helperroutines.py +177 -0
- ler-0.1.6/ler/lens_galaxy_population.py +948 -0
- ler-0.1.6/ler/ler.py +1536 -0
- ler-0.1.6/ler/multiprocessing_routine.py +393 -0
- ler-0.1.6/ler/source_population.py +873 -0
- {ler-0.1.3 → ler-0.1.6}/ler.egg-info/PKG-INFO +1 -1
- ler-0.1.6/ler.egg-info/requires.txt +12 -0
- ler-0.1.6/setup.py +27 -0
- ler-0.1.3/ler/helperroutines.py +0 -159
- ler-0.1.3/ler/lens_galaxy_population.py +0 -545
- ler-0.1.3/ler/ler.py +0 -726
- ler-0.1.3/ler/multiprocessing_routine.py +0 -250
- ler-0.1.3/ler/source_population.py +0 -257
- ler-0.1.3/ler.egg-info/requires.txt +0 -9
- ler-0.1.3/setup.py +0 -22
- {ler-0.1.3 → ler-0.1.6}/README.md +0 -0
- {ler-0.1.3 → ler-0.1.6}/ler.egg-info/SOURCES.txt +0 -0
- {ler-0.1.3 → ler-0.1.6}/ler.egg-info/dependency_links.txt +0 -0
- {ler-0.1.3 → ler-0.1.6}/ler.egg-info/top_level.txt +0 -0
- {ler-0.1.3 → ler-0.1.6}/setup.cfg +0 -0
{ler-0.1.3 → ler-0.1.6}/PKG-INFO
RENAMED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from .ler import LeR
|
|
2
2
|
from .lens_galaxy_population import LensGalaxyPopulation
|
|
3
3
|
from .source_population import SourceGalaxyPopulationModel, CompactBinaryPopulation
|
|
4
|
-
from .helperroutines import add_dictionaries_together, rejection_sample
|
|
5
4
|
from .multiprocessing_routine import solve_lens_equation1, solve_lens_equation2
|
|
5
|
+
from .helperroutines import add_dictionaries_together, rejection_sample
|
|
6
|
+
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
This module contains helper routines for other modules in the ler package.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
chunk_size = 10000
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class NumpyEncoder(json.JSONEncoder):
|
|
13
|
+
"""
|
|
14
|
+
class for storing a numpy.ndarray or any nested-list composition as JSON file
|
|
15
|
+
|
|
16
|
+
Parameters
|
|
17
|
+
----------
|
|
18
|
+
json.JSONEncoder : `class`
|
|
19
|
+
class for encoding JSON file
|
|
20
|
+
|
|
21
|
+
Returns
|
|
22
|
+
----------
|
|
23
|
+
json.JSONEncoder.default : `function`
|
|
24
|
+
function for encoding JSON file
|
|
25
|
+
|
|
26
|
+
Example
|
|
27
|
+
----------
|
|
28
|
+
>>> import numpy as np
|
|
29
|
+
>>> import json
|
|
30
|
+
>>> from ler import helperroutines as hr
|
|
31
|
+
>>> # create a dictionary
|
|
32
|
+
>>> param = {'a': np.array([1,2,3]), 'b': np.array([4,5,6])}
|
|
33
|
+
>>> # save the dictionary as json file
|
|
34
|
+
>>> with open('param.json', 'w') as f:
|
|
35
|
+
>>> json.dump(param, f, cls=hr.NumpyEncoder)
|
|
36
|
+
>>> # load the dictionary from json file
|
|
37
|
+
>>> with open('param.json', 'r') as f:
|
|
38
|
+
>>> param = json.load(f)
|
|
39
|
+
>>> # print the dictionary
|
|
40
|
+
>>> print(param)
|
|
41
|
+
{'a': [1, 2, 3], 'b': [4, 5, 6]}
|
|
42
|
+
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
def default(self, obj):
|
|
46
|
+
if isinstance(obj, np.ndarray):
|
|
47
|
+
return obj.tolist()
|
|
48
|
+
return json.JSONEncoder.default(self, obj)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def append_json(file_name, dictionary, replace=False):
|
|
52
|
+
"""Append and update a json file with a dictionary.
|
|
53
|
+
|
|
54
|
+
Parameters
|
|
55
|
+
----------
|
|
56
|
+
file_name : `str`
|
|
57
|
+
json file name for storing the parameters.
|
|
58
|
+
dictionary : `dict`
|
|
59
|
+
dictionary to be appended to the json file.
|
|
60
|
+
replace : `bool`, optional
|
|
61
|
+
If True, replace the json file with the dictionary. Default is False.
|
|
62
|
+
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
# check if the file exists
|
|
66
|
+
try:
|
|
67
|
+
with open(file_name, "r", encoding="utf-8") as f:
|
|
68
|
+
data = json.load(f)
|
|
69
|
+
except:
|
|
70
|
+
"File does not exist. Creating a new one..."
|
|
71
|
+
replace = True
|
|
72
|
+
|
|
73
|
+
if replace:
|
|
74
|
+
data = dictionary
|
|
75
|
+
else:
|
|
76
|
+
data_key = data.keys()
|
|
77
|
+
for key, value in dictionary.items():
|
|
78
|
+
if key in data_key:
|
|
79
|
+
data[key] = np.concatenate((data[key], value))
|
|
80
|
+
|
|
81
|
+
json_dump = json.dumps(data, cls=NumpyEncoder)
|
|
82
|
+
with open(file_name, "w", encoding="utf-8") as write_file:
|
|
83
|
+
json.dump(json.loads(json_dump), write_file, indent=4)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def get_param_from_json(json_file):
|
|
87
|
+
"""
|
|
88
|
+
Function to get the parameters from json file.
|
|
89
|
+
|
|
90
|
+
Parameters
|
|
91
|
+
----------
|
|
92
|
+
json_file : `str`
|
|
93
|
+
json file name for storing the parameters.
|
|
94
|
+
|
|
95
|
+
Returns
|
|
96
|
+
----------
|
|
97
|
+
param : `dict`
|
|
98
|
+
"""
|
|
99
|
+
with open(json_file, "r", encoding="utf-8") as f:
|
|
100
|
+
param = json.load(f)
|
|
101
|
+
|
|
102
|
+
for key, value in param.items():
|
|
103
|
+
param[key] = np.array(value)
|
|
104
|
+
return param
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def rejection_sample(pdf, xmin, xmax, size=100):
|
|
108
|
+
"""
|
|
109
|
+
Helper function for rejection sampling from a pdf with maximum and minimum arguments.
|
|
110
|
+
Input parameters:
|
|
111
|
+
pdf: the pdf to sample from
|
|
112
|
+
xmin: the minimum argument of the pdf
|
|
113
|
+
xmax: the maximum argument of the pdf
|
|
114
|
+
size: the number of samples to draw
|
|
115
|
+
Output:
|
|
116
|
+
samples: the samples drawn from the pdf
|
|
117
|
+
"""
|
|
118
|
+
x = np.linspace(xmin, xmax, 1000)
|
|
119
|
+
y = pdf(x)
|
|
120
|
+
ymax = np.max(y)
|
|
121
|
+
# Rejection sample in chunks
|
|
122
|
+
x_sample = []
|
|
123
|
+
while len(x_sample) < size:
|
|
124
|
+
x_try = np.random.uniform(xmin, xmax, size=chunk_size)
|
|
125
|
+
y_try = np.random.uniform(0, ymax, size=chunk_size)
|
|
126
|
+
ymax = max(ymax, np.max(y_try))
|
|
127
|
+
# Add while retaining 1D shape of the list
|
|
128
|
+
x_sample += list(x_try[y_try < pdf(x_try)])
|
|
129
|
+
# Transform the samples to a 1D numpy array
|
|
130
|
+
x_sample = np.array(x_sample).flatten()
|
|
131
|
+
# Return the correct number of samples
|
|
132
|
+
return x_sample[:size]
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def add_dictionaries_together(dictionary1, dictionary2):
|
|
136
|
+
"""Adds two dictionaries with the same keys together."""
|
|
137
|
+
dictionary = {}
|
|
138
|
+
# Check if either dictionary empty, in which case only return the dictionary with values
|
|
139
|
+
if len(dictionary1) == 0:
|
|
140
|
+
return dictionary2
|
|
141
|
+
elif len(dictionary2) == 0:
|
|
142
|
+
return dictionary1
|
|
143
|
+
# Check if the keys are the same
|
|
144
|
+
if dictionary1.keys() != dictionary2.keys():
|
|
145
|
+
raise ValueError("The dictionaries have different keys.")
|
|
146
|
+
for key in dictionary1.keys():
|
|
147
|
+
# Check if the item is an ndarray
|
|
148
|
+
if isinstance(dictionary1[key], np.ndarray):
|
|
149
|
+
dictionary[key] = np.concatenate((dictionary1[key], dictionary2[key]))
|
|
150
|
+
# Check if the item is a nested dictionary
|
|
151
|
+
elif isinstance(dictionary1[key], dict):
|
|
152
|
+
dictionary[key] = add_dictionaries_together(
|
|
153
|
+
dictionary1[key], dictionary2[key]
|
|
154
|
+
)
|
|
155
|
+
else:
|
|
156
|
+
raise ValueError(
|
|
157
|
+
"The dictionary contains an item which is neither an ndarray nor a dictionary."
|
|
158
|
+
)
|
|
159
|
+
return dictionary
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def trim_dictionary(dictionary, size):
|
|
163
|
+
"""Filters an event dictionary to only contain the size."""
|
|
164
|
+
for key in dictionary.keys():
|
|
165
|
+
# Check if the item is an ndarray
|
|
166
|
+
if isinstance(dictionary[key], np.ndarray):
|
|
167
|
+
dictionary[key] = dictionary[key][:size] # Trim the array
|
|
168
|
+
# Check if the item is a nested dictionary
|
|
169
|
+
elif isinstance(dictionary[key], dict):
|
|
170
|
+
dictionary[key] = trim_dictionary(
|
|
171
|
+
dictionary[key], size
|
|
172
|
+
) # Trim the nested dictionary
|
|
173
|
+
else:
|
|
174
|
+
raise ValueError(
|
|
175
|
+
"The dictionary contains an item which is neither an ndarray nor a dictionary."
|
|
176
|
+
)
|
|
177
|
+
return dictionary
|