chanter 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chanter-0.0.1/PKG-INFO ADDED
@@ -0,0 +1,14 @@
1
+ Metadata-Version: 2.2
2
+ Name: chanter
3
+ Version: 0.0.1
4
+ Summary: A very simple galaxy spectrum modeller
5
+ Author-email: Struan Stevenson <struan.stevenson@ed.ac.uk>
6
+ Requires-Python: >=3.12
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: numpy
9
+ Requires-Dist: astropy
10
+ Requires-Dist: nautilus-sampler
11
+ Requires-Dist: spectres
12
+ Requires-Dist: matplotlib
13
+
14
+ # Chanter
@@ -0,0 +1 @@
1
+ # Chanter
@@ -0,0 +1,4 @@
1
+ from .model_galaxy import modelgalaxy
2
+ from .galaxy_fitter import galaxyfitter
3
+
4
+ from . import utils
@@ -0,0 +1,118 @@
1
+ import numpy as np
2
+ from nautilus import Sampler
3
+ from .model_galaxy import modelgalaxy
4
+ from .utils import filter_set
5
+ import time
6
+
7
+
8
+ class galaxyfitter(object):
9
+
10
+ # Generative function for the likelihood (calls model galaxy)
11
+ def gen_phot(self, fit_instruc, filt_files):
12
+ """ Linear function.
13
+
14
+ Parameters
15
+ ----------
16
+
17
+ par : numpy.ndarray
18
+ Free parameters.
19
+ """
20
+ model = modelgalaxy(fit_instruc)
21
+ wavs, flux = model.get_spectrum()
22
+ effwavs, phot = model.get_photometry(wavs, flux, filt_files)
23
+ return phot
24
+
25
+ # Liklihood function for a certain point in parameter space
26
+ def log_likelihood(self, data, gen_func, filt_files, param_dict):
27
+ """ Return the natural logarithm of the Likelihood. Constructed from a single gaussian.
28
+
29
+ Parameters
30
+ ----------
31
+
32
+ data : numpy.ndarray
33
+ Dataset containing x, y and y_uncert values arranged in 3 rows.
34
+
35
+ gen_func : function
36
+ Generative function used to model the data.
37
+
38
+ params : numpy.ndarray
39
+ Free parameters of the generative model.
40
+ """
41
+
42
+ # Given Data
43
+ yi, yisig = data[0], data[1]
44
+
45
+ exp = {}
46
+ exp["age"] = param_dict["age"]
47
+ exp["tau"] = param_dict["tau"]
48
+ exp["massformed"] = param_dict["massformed"]
49
+ #exp["metallicity"] = param_dict["metallicity"] # Z/Z_oldsolar
50
+ dust = {}
51
+ dust["type"] = "Calzetti"
52
+ dust["Av"] = param_dict["Av"]
53
+ fit_instruc = {}
54
+ fit_instruc["redshift"] = param_dict["redshift"]
55
+ fit_instruc["exponential"] = exp
56
+ fit_instruc["dust"] = dust
57
+
58
+
59
+ # Likelihood of belonging to the 'good' gaussian
60
+ log_li = np.log((1/(np.sqrt(2*np.pi*(yisig**2))))) + ((-(yi - gen_func(fit_instruc, filt_files))**2) / (2*(yisig**2)))
61
+
62
+ # Sum likelihood and take logarithm (note sometimes recieve underflow error due to small likelihoods)
63
+ log_l_total = np.sum(log_li)
64
+
65
+ return(log_l_total)
66
+
67
+ # Fit an array of photometry, output the best galaxy parameters
68
+ def fit(self, photdata, prior, filt_files):
69
+
70
+ filt = filter_set(filt_files)
71
+ effwavs = filt.eff_wavs
72
+
73
+ # Run fitter
74
+ t0 = time.time()
75
+
76
+ print('Computing '+str(id)+'...')
77
+ # Load data
78
+ dat= photdata.T
79
+ dat[0] = 2.99792458E-05 * ((1e-6 * dat[0]) / ((effwavs)**2))
80
+ dat[1] = 2.99792458E-05 * ((1e-6 * dat[1]) / ((effwavs)**2))
81
+
82
+ # Run the nested sampler over the likelihood function
83
+ sampler = Sampler(prior, lambda param_dict: self.log_likelihood(dat, self.gen_phot, filt_files, param_dict))
84
+ success = sampler.run(verbose=True, discard_exploration=True, timeout=1800)
85
+
86
+ # Plot results
87
+ points, log_w, log_l = sampler.posterior()
88
+
89
+ dic = {}
90
+ # Plot bestfit
91
+ for i in range(len(prior.keys)):
92
+ key = prior.keys[i]
93
+ points_i = points.T[i]
94
+ samples = np.random.choice(points_i, size = points_i.shape[0], p=np.exp(log_w))
95
+ dic[key + "_50"] = np.percentile(samples, 50)
96
+
97
+ exp = {}
98
+ exp["age"] = dic['age_50']
99
+ exp["tau"] = dic['tau_50']
100
+ exp["massformed"] = dic['massformed_50']
101
+ dust = {}
102
+ dust["type"] = "Calzetti"
103
+ dust["Av"] = dic['Av_50']
104
+ fit_instructions = {}
105
+ fit_instructions["redshift"] = dic['redshift_50']
106
+ fit_instructions["exponential"] = exp
107
+ fit_instructions["dust"] = dust
108
+
109
+ print('Finished in ' + str(time.time() - t0)+ ' seconds.')
110
+ print(' ')
111
+
112
+ return fit_instructions
113
+
114
+
115
+
116
+
117
+
118
+
@@ -0,0 +1,257 @@
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ from astropy.cosmology import Planck13 # Planck 2013
4
+ from astropy.io import fits
5
+ from spectres import spectres
6
+ from .utils import filter_set
7
+
8
+
9
+ # Load SSPs and resample
10
+ def resample_ssp_wavs(target_wavs):
11
+
12
+ grid_raw_ages = np.zeros((master_base.shape[0], master_base.shape[1], target_wavs.shape[0]+1))
13
+ grid_raw_ages[:, :, 0] = master_base[:, :, 0]
14
+ grid_raw_ages[:, 0, 1:] = target_wavs
15
+
16
+ old_wavs = master_base[0][0][1:]
17
+
18
+ for i in range(master_base.shape[0]):
19
+ for j in range(1, master_base.shape[1]):
20
+
21
+ old_flux = master_base[i][j][1:]
22
+ new_wavs = target_wavs
23
+ new_flux = spectres(new_wavs, old_wavs, old_flux, fill=0)
24
+ grid_raw_ages[i, j, 1:] = new_flux
25
+
26
+ grid_raw_ages[0].T[0, 1:] = grid_raw_ages[0].T[0, 1:] / 1e9
27
+
28
+ return grid_raw_ages
29
+
30
+ def resample_igm_grid(wavs):
31
+ # Import IGM grid
32
+ raw_igm_grid = fits.open('chanter/chanter/grids/d_igm_grid_inoue14.fits')[1].data
33
+ igm_wavelengths = np.arange(1.0, 1225.01, 1.0)
34
+
35
+ # Resample in wavelength
36
+ grid = np.zeros((raw_igm_grid.shape[0], wavs.shape[0]))
37
+
38
+ for i in range(raw_igm_grid.shape[0]):
39
+
40
+ old_wavs = igm_wavelengths
41
+ old_trans = raw_igm_grid[i]
42
+ new_wavs = wavs
43
+ new_trans = spectres(new_wavs, old_wavs, old_trans)
44
+ grid[i] = new_trans
45
+
46
+ return grid
47
+
48
+
49
+ hdul = fits.open('chanter/chanter/grids/ssps.fits')
50
+ master_base = np.array((hdul[1].data, hdul[2].data, hdul[3].data, hdul[4].data, hdul[5].data, hdul[6].data, hdul[7].data))
51
+
52
+ rest_target_wavs = np.arange(100., 50000., 10.)
53
+ grid = resample_ssp_wavs(rest_target_wavs)
54
+ igm_grid = resample_igm_grid(rest_target_wavs)
55
+
56
+
57
+ '''
58
+ # !!AGE RESAMPLING NOT WORKING!!
59
+
60
+ def resample_ssp_ages(grid_raw_ages, target_ages):
61
+
62
+ grid = np.zeros((grid_raw_ages.shape[0], target_ages.shape[0]+1, grid_raw_ages.shape[2]))
63
+ grid[:, 0, :] = grid_raw_ages[0, 0, :]
64
+ grid[:, 1:, 0] = target_ages
65
+
66
+ old_ages = grid_raw_ages[0].T[0, 1:] / 1e9
67
+
68
+ for i in range(grid_raw_ages.shape[0]):
69
+ for j in range(1, grid_raw_ages.shape[2]):
70
+
71
+ old_age_flux = grid_raw_ages[i].T[j, 1:]
72
+ new_ages = target_ages
73
+ new_age_flux = spectres(new_ages, old_ages, old_age_flux, fill=0)
74
+ grid[i].T[j, 1:] = new_age_flux
75
+
76
+ return grid
77
+
78
+ num_agebins = 100
79
+ target_ages = np.linspace(0, Planck13.age(0).value, num_agebins)
80
+ grid = resample_ssp_ages(grid_resampled, target_ages)
81
+ '''
82
+
83
+
84
+
85
+ class modelgalaxy(object):
86
+
87
+
88
+ def __init__(self, fit_instructions):
89
+ self.fit_instructions = fit_instructions
90
+ self.universe_age = Planck13.age(0).value
91
+ self.lookback_time = Planck13.lookback_time(self.fit_instructions["redshift"]).value
92
+
93
+
94
+ def sfr(self, age, ageform, tau, massformed):
95
+
96
+ # Create exp time
97
+ t = ageform - age
98
+ sfr = np.exp(-t/ tau)
99
+ # Enforce no SF before ageform
100
+ sfr[t<0]=0
101
+ # Normalise to massformed
102
+ tdiff = np.diff(age)
103
+ area = np.sum(tdiff * sfr[:-1])
104
+ if area!=0:
105
+ sfr = sfr * ((10**massformed) / area)
106
+ else:
107
+ sfr = sfr * 0
108
+
109
+ if ageform + self.lookback_time > self.universe_age:
110
+ sfr *= 0
111
+
112
+ return sfr
113
+
114
+
115
+ def calzetti_dust(self, wavs):
116
+ """ Calculate the ratio A(lambda)/A(V) for the Calzetti et al.
117
+ (2000) attenuation curve. """
118
+
119
+ A_lambda = np.zeros_like(wavs)
120
+
121
+ wavs_mic = wavs*10**-4
122
+
123
+ mask1 = (wavs < 1200.)
124
+ mask2 = (wavs < 6300.) & (wavs >= 1200.)
125
+ mask3 = (wavs < 31000.) & (wavs >= 6300.)
126
+
127
+ A_lambda[mask1] = ((wavs_mic[mask1]/0.12)**-0.77
128
+ * (4.05 + 2.695*(- 2.156 + 1.509/0.12
129
+ - 0.198/0.12**2 + 0.011/0.12**3)))
130
+
131
+ A_lambda[mask2] = (4.05 + 2.695*(- 2.156
132
+ + 1.509/wavs_mic[mask2]
133
+ - 0.198/wavs_mic[mask2]**2
134
+ + 0.011/wavs_mic[mask2]**3))
135
+
136
+ A_lambda[mask3] = 2.659*(-1.857 + 1.040/wavs_mic[mask3]) + 4.05
137
+
138
+ A_lambda /= 4.05
139
+
140
+ return A_lambda
141
+
142
+
143
+ def transmission_neutral(self, wavs):
144
+ trans = 10**(-self.fit_instructions["dust"]["Av"]*self.calzetti_dust(wavs) / 2.5)
145
+ return trans
146
+
147
+ def transmission_igm(self, redshift):
148
+
149
+ max_redshift=10.
150
+ igm_redshifts = np.arange(0.0, max_redshift + 0.01, 0.01)
151
+
152
+ """ Get the IGM transmission at a given redshift. """
153
+
154
+ redshift_mask = (igm_redshifts < redshift)
155
+ zred_ind = igm_redshifts[redshift_mask].shape[0]
156
+
157
+ zred_fact = ((redshift - igm_redshifts[zred_ind-1])
158
+ / (igm_redshifts[zred_ind]
159
+ - igm_redshifts[zred_ind-1]))
160
+
161
+ if zred_ind == 0:
162
+ zred_ind += 1
163
+ zred_fact = 0.
164
+
165
+ weights = np.array([[1. - zred_fact, zred_fact]])
166
+
167
+ igm_trans = np.sum(weights.T*igm_grid[zred_ind-1:zred_ind+1], axis=0)
168
+
169
+ igm_trans[np.isnan(igm_trans)] = 1.
170
+
171
+ return igm_trans
172
+
173
+
174
+ def redshift_effects(self, wavs, lum):
175
+
176
+ obswavs = (1+self.fit_instructions["redshift"]) * wavs
177
+ lum_distance = Planck13.luminosity_distance(self.fit_instructions["redshift"]).value
178
+ lum = 3.826e33 * lum # change luminosity units to erg/s
179
+ lum_distance = lum_distance * 1e6 * 3.086e16 * 100 # change distance units to cm
180
+ flux = lum / (4 * np.pi * (1+self.fit_instructions["redshift"]) * (lum_distance**2)) # * IGM transmission function
181
+ return obswavs, flux
182
+
183
+
184
+ def get_spectrum(self):
185
+
186
+ # Get model spectra, use a single population
187
+ ssp_age = grid[0].T[0, 1:]
188
+ wavs = grid[0, 0, 1:]
189
+ fluxes = grid[4, 1:, 1:]
190
+
191
+
192
+ agebinwidths = np.diff(ssp_age)
193
+ sfrs = self.sfr(ssp_age[1:], self.fit_instructions["exponential"]["age"], self.fit_instructions["exponential"]["tau"], self.fit_instructions["exponential"]["massformed"])
194
+ ssps = fluxes[1:]
195
+ trans_neutral = self.transmission_neutral(wavs)
196
+ trans_igm = self.transmission_igm(self.fit_instructions["redshift"])
197
+
198
+ # Find the total product
199
+ lums = (np.array(([(agebinwidths * sfrs)])).T * ssps) * trans_neutral
200
+ lumtot = np.sum(lums, axis=0)
201
+
202
+ lumtot *= trans_igm
203
+
204
+ # Add in the effects of ISM dust attenuation (calzetti)
205
+ obswavs, obsflux = self.redshift_effects(wavs, lumtot)
206
+
207
+ self.wavs = obswavs
208
+ self.flux = obsflux
209
+
210
+ return obswavs, obsflux
211
+
212
+
213
+ def get_photometry(self, wavs, flux, filt_list):
214
+ filt = filter_set(filt_list)
215
+ filt.resample_filter_curves(wavs)
216
+ effwavs = filt.eff_wavs
217
+ phot = filt.get_photometry(flux, 0)
218
+ return effwavs, phot
219
+
220
+
221
+ def plot_spec(self, ax, kwargs):
222
+
223
+ #effwavs, phot = self.get_photometry(self.wavs, self.flux)
224
+
225
+ self.get_spectrum()
226
+
227
+ ax.step(self.wavs, self.flux, **kwargs)
228
+ ax.set_ylabel('Observed Flux / erg s$^{-1}$ cm$^{-2}$ Å$^{-1}$')
229
+ ax.set_xlabel('Observed Wavelength / Å')
230
+
231
+
232
+
233
+ def plot_sfh(self):
234
+ ssp_age = grid[0].T[0, 1:]
235
+ sfrs = self.sfr(ssp_age, self.fit_instructions["exponential"]["age"], self.fit_instructions["exponential"]["tau"], self.fit_instructions["exponential"]["massformed"])
236
+ fig, ax = plt.subplots()
237
+ ax.set_xlabel('Age / Gyr')
238
+ ax.set_ylabel(' Star Formation / M$_{sun}$ Gyr$^{-1}$')
239
+ ax.plot((self.universe_age-self.lookback_time) - ssp_age, sfrs/1e9, color='black')
240
+ ax.invert_xaxis()
241
+ ax.set_ylim(0, )
242
+ return fig, ax
243
+
244
+
245
+
246
+
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+
255
+
256
+
257
+
@@ -0,0 +1 @@
1
+ from .filter_set import filter_set
@@ -0,0 +1,175 @@
1
+ import numpy as np
2
+
3
+
4
+ class filter_set(object):
5
+ """ Class for loading and manipulating sets of filter curves. This
6
+ is where integration over filter curves to get photometry happens.
7
+
8
+ Parameters
9
+ ----------
10
+
11
+ filt_list : list
12
+ List of strings containing paths from the working directory to
13
+ files where filter curves are stored. The filter curve files
14
+ should contain an array of wavelengths in Angstroms followed by
15
+ a column of relative transmission values.
16
+ """
17
+
18
+ def __init__(self, filt_list):
19
+ self.filt_list = filt_list
20
+ self.wavelengths = None
21
+ self._load_filter_curves()
22
+ self._calculate_min_max_wavelengths()
23
+ self._calculate_effective_wavelengths()
24
+
25
+ def _load_filter_curves(self):
26
+ """ Loads filter files for the specified filt_list and truncates
27
+ any zeros from either of their edges. """
28
+
29
+ self.filt_dict = {}
30
+
31
+ for filt in self.filt_list:
32
+ self.filt_dict[filt] = np.loadtxt(filt, usecols=(0, 1))
33
+
34
+
35
+ def _calculate_min_max_wavelengths(self):
36
+ """ Finds the min and max wavelength values across all of the
37
+ filter curves. """
38
+
39
+ self.min_phot_wav = 9.9*10**99
40
+ self.max_phot_wav = 0.
41
+
42
+ for filt in self.filt_list:
43
+ min_wav = (self.filt_dict[filt][0, 0]
44
+ - 2*(self.filt_dict[filt][1, 0]
45
+ - self.filt_dict[filt][0, 0]))
46
+
47
+ max_wav = (self.filt_dict[filt][-1, 0]
48
+ + 2*(self.filt_dict[filt][-1, 0]
49
+ - self.filt_dict[filt][-2, 0]))
50
+
51
+ if min_wav < self.min_phot_wav:
52
+ self.min_phot_wav = min_wav
53
+
54
+ if max_wav > self.max_phot_wav:
55
+ self.max_phot_wav = max_wav
56
+
57
+ def _calculate_effective_wavelengths(self):
58
+ """ Calculates effective wavelengths for each filter curve. """
59
+
60
+ self.eff_wavs = np.zeros(len(self.filt_list))
61
+
62
+ for i in range(len(self.filt_list)):
63
+ filt = self.filt_list[i]
64
+ dlambda = self.make_bins(self.filt_dict[filt][:, 0])[1]
65
+ filt_weights = dlambda*self.filt_dict[filt][:, 1]
66
+ self.eff_wavs[i] = np.sqrt(np.sum(filt_weights*self.filt_dict[filt][:, 0])
67
+ / np.sum(filt_weights
68
+ / self.filt_dict[filt][:, 0]))
69
+
70
+
71
+
72
+ def make_bins(self, midpoints, make_rhs=False):
73
+ """ A general function for turning an array of bin midpoints into an
74
+ array of bin left hand side positions and bin widths. Splits the
75
+ distance between bin midpoints equally in linear space.
76
+
77
+ Parameters
78
+ ----------
79
+
80
+ midpoints : numpy.ndarray
81
+ Array of bin midpoint positions
82
+
83
+ make_rhs : bool
84
+ Whether to add the position of the right hand side of the final
85
+ bin to bin_lhs, defaults to false.
86
+ """
87
+
88
+ bin_widths = np.zeros_like(midpoints)
89
+ if make_rhs:
90
+ bin_lhs = np.zeros(midpoints.shape[0]+1)
91
+ bin_lhs[0] = midpoints[0] - (midpoints[1]-midpoints[0])/2
92
+ bin_widths[-1] = (midpoints[-1] - midpoints[-2])
93
+ bin_lhs[-1] = midpoints[-1] + (midpoints[-1]-midpoints[-2])/2
94
+ bin_lhs[1:-1] = (midpoints[1:] + midpoints[:-1])/2
95
+ bin_widths[:-1] = bin_lhs[1:-1]-bin_lhs[:-2]
96
+
97
+ else:
98
+ bin_lhs = np.zeros_like(midpoints)
99
+ bin_lhs[0] = midpoints[0] - (midpoints[1]-midpoints[0])/2
100
+ bin_widths[-1] = (midpoints[-1] - midpoints[-2])
101
+ bin_lhs[1:] = (midpoints[1:] + midpoints[:-1])/2
102
+ bin_widths[:-1] = bin_lhs[1:]-bin_lhs[:-1]
103
+
104
+ return bin_lhs, bin_widths
105
+
106
+
107
+ def resample_filter_curves(self, wavelengths):
108
+ """ Resamples the filter curves onto a new set of wavelengths
109
+ and creates a 2D array of filter curves on this sampling. """
110
+
111
+ self.wavelengths = wavelengths # Wavelengths for new sampling
112
+
113
+ # Array containing filter profiles on new wavelength sampling
114
+ self.filt_array = np.zeros((wavelengths.shape[0], len(self.filt_list)))
115
+
116
+ # Array containing the width in wavelength space for each point
117
+ self.widths = self.make_bins(wavelengths)[1]
118
+
119
+ for i in range(len(self.filt_list)):
120
+ filt = self.filt_list[i]
121
+ self.filt_array[:, i] = np.interp(wavelengths,
122
+ self.filt_dict[filt][:, 0],
123
+ self.filt_dict[filt][:, 1],
124
+ left=0, right=0)
125
+
126
+ def get_photometry(self, spectrum, redshift, unit_conv=None):
127
+ """ Calculates photometric fluxes. The filters are first re-
128
+ sampled onto the same wavelength grid with transmission values
129
+ blueshifted by (1+z). This is followed by an integration over
130
+ the observed spectrum in the rest frame:
131
+
132
+ flux = integrate[(f_lambda*lambda*T(lambda*(1+z))*dlambda)]
133
+ norm = integrate[(lambda*T(lambda*(1+z))*dlambda))]
134
+ photometry = flux/norm
135
+
136
+ lambda: rest-frame wavelength array
137
+ f_lambda: observed spectrum
138
+ T(lambda*(1+z)): transmission of blueshifted filters
139
+ dlambda: width of each wavelength bin
140
+
141
+ The integrals over all filters are done in one array operation
142
+ to improve the speed of the code.
143
+ """
144
+
145
+ if self.wavelengths is None:
146
+ raise ValueError("Please use resample_filter_curves method to set"
147
+ + " wavelengths before calculating photometry.")
148
+
149
+ redshifted_wavs = self.wavelengths*(1. + redshift)
150
+
151
+ # Array containing blueshifted filter curves
152
+ filters_z = np.zeros_like(self.filt_array)
153
+
154
+ # blueshift filter curves to sample right bit of rest frame spec
155
+ for i in range(len(self.filt_list)):
156
+ filters_z[:, i] = np.interp(redshifted_wavs, self.wavelengths,
157
+ self.filt_array[:, i],
158
+ left=0, right=0)
159
+
160
+ # Calculate numerator of expression
161
+ flux = np.expand_dims(spectrum*self.widths*self.wavelengths, axis=1)
162
+ flux = np.sum(flux*filters_z, axis=0)
163
+
164
+ # Calculate denominator of expression
165
+ norm = filters_z*np.expand_dims(self.widths*self.wavelengths, axis=1)
166
+ norm = np.sum(norm, axis=0)
167
+
168
+ photometry = np.squeeze(flux/norm)
169
+
170
+ # This is a little dodgy as pointed out by Ivo, it should depend
171
+ # on the spectral shape however only currently used for UVJ mags
172
+ if unit_conv == "cgs_to_mujy":
173
+ photometry /= (10**-29*2.9979*10**18/self.eff_wavs**2)
174
+
175
+ return photometry