jaxspec 0.3.2__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -77,8 +77,9 @@ class FitResult:
77
77
  r"""
78
78
  Convergence of the chain as computed by the $\hat{R}$ statistic.
79
79
  """
80
+ rhat = az.rhat(self.inference_data)
80
81
 
81
- return all(az.rhat(self.inference_data) < 1.01)
82
+ return bool((rhat.to_array() < 1.01).all())
82
83
 
83
84
  def _ppc_folded_branches(self, obs_id):
84
85
  obs = self.obsconfs[obs_id]
@@ -167,6 +168,7 @@ class FitResult:
167
168
  e_max: float,
168
169
  unit: Unit = u.photon / u.cm**2 / u.s,
169
170
  register: bool = False,
171
+ n_points: int = 100,
170
172
  ) -> ArrayLike:
171
173
  """
172
174
  Compute the unfolded photon flux in a given energy band. The flux is then added to
@@ -177,6 +179,7 @@ class FitResult:
177
179
  e_max: The upper bound of the energy band in observer frame.
178
180
  unit: The unit of the photon flux.
179
181
  register: Whether to register the flux with the other posterior parameters.
182
+ n_points: The number of points to use for computing the unfolded spectrum.
180
183
 
181
184
  !!! warning
182
185
  Computation of the folded flux is not implemented yet. Feel free to open an
@@ -188,18 +191,25 @@ class FitResult:
188
191
  def vectorized_flux(*pars):
189
192
  parameters_pytree = jax.tree.unflatten(pytree_def, pars)
190
193
  return self.model.photon_flux(
191
- parameters_pytree, jnp.asarray([e_min]), jnp.asarray([e_max]), n_points=100
194
+ parameters_pytree, jnp.asarray([e_min]), jnp.asarray([e_max]), n_points=n_points
192
195
  )[0]
193
196
 
194
197
  flat_tree, pytree_def = jax.tree.flatten(self.input_parameters)
195
198
  flux = vectorized_flux(*flat_tree)
196
- conversion_factor = (u.photon / u.cm**2 / u.s).to(unit)
197
- value = flux * conversion_factor
199
+ conversion_factor = float((u.photon / u.cm**2 / u.s).to(unit))
200
+ value = np.asarray(flux * conversion_factor)
198
201
 
199
202
  if register:
200
- self.inference_data.posterior[f"photon_flux_{e_min:.1f}_{e_max:.1f}"] = (
201
- list(self.inference_data.posterior.coords),
202
- value,
203
+ self.inference_data.posterior[f"mod/~/photon_flux_{e_min:.1f}_{e_max:.1f}"] = (
204
+ xr.DataArray(
205
+ value,
206
+ dims=self.inference_data.posterior.dims,
207
+ coords={
208
+ "chain": self.inference_data.posterior.chain,
209
+ "draw": self.inference_data.posterior.draw,
210
+ },
211
+ name=f"mod/~/photon_flux_{e_min:.1f}_{e_max:.1f}",
212
+ )
203
213
  )
204
214
 
205
215
  return value
@@ -210,6 +220,7 @@ class FitResult:
210
220
  e_max: float,
211
221
  unit: Unit = u.erg / u.cm**2 / u.s,
212
222
  register: bool = False,
223
+ n_points: int = 100,
213
224
  ) -> ArrayLike:
214
225
  """
215
226
  Compute the unfolded energy flux in a given energy band. The flux is then added to
@@ -220,6 +231,7 @@ class FitResult:
220
231
  e_max: The upper bound of the energy band in observer frame.
221
232
  unit: The unit of the energy flux.
222
233
  register: Whether to register the flux with the other posterior parameters.
234
+ n_points: The number of points to use for computing the unfolded spectrum.
223
235
 
224
236
  !!! warning
225
237
  Computation of the folded flux is not implemented yet. Feel free to open an
@@ -231,18 +243,25 @@ class FitResult:
231
243
  def vectorized_flux(*pars):
232
244
  parameters_pytree = jax.tree.unflatten(pytree_def, pars)
233
245
  return self.model.energy_flux(
234
- parameters_pytree, jnp.asarray([e_min]), jnp.asarray([e_max]), n_points=100
246
+ parameters_pytree, jnp.asarray([e_min]), jnp.asarray([e_max]), n_points=n_points
235
247
  )[0]
236
248
 
237
249
  flat_tree, pytree_def = jax.tree.flatten(self.input_parameters)
238
250
  flux = vectorized_flux(*flat_tree)
239
- conversion_factor = (u.keV / u.cm**2 / u.s).to(unit)
240
- value = flux * conversion_factor
251
+ conversion_factor = float((u.keV / u.cm**2 / u.s).to(unit))
252
+ value = np.asarray(flux * conversion_factor)
241
253
 
242
254
  if register:
243
- self.inference_data.posterior[f"energy_flux_{e_min:.1f}_{e_max:.1f}"] = (
244
- list(self.inference_data.posterior.coords),
245
- value,
255
+ self.inference_data.posterior[f"mod/~/energy_flux_{e_min:.1f}_{e_max:.1f}"] = (
256
+ xr.DataArray(
257
+ value,
258
+ dims=self.inference_data.posterior.dims,
259
+ coords={
260
+ "chain": self.inference_data.posterior.chain,
261
+ "draw": self.inference_data.posterior.draw,
262
+ },
263
+ name=f"mod/~/energy_flux_{e_min:.1f}_{e_max:.1f}",
264
+ )
246
265
  )
247
266
 
248
267
  return value
@@ -257,6 +276,7 @@ class FitResult:
257
276
  cosmology: Cosmology = Planck18,
258
277
  unit: Unit = u.erg / u.s,
259
278
  register: bool = False,
279
+ n_points=100,
260
280
  ) -> ArrayLike:
261
281
  """
262
282
  Compute the luminosity of the source specifying its redshift. The luminosity is then added to
@@ -294,17 +314,26 @@ class FitResult:
294
314
  parameters_pytree,
295
315
  jnp.asarray([e_min]) * (1 + redshift),
296
316
  jnp.asarray([e_max]) * (1 + redshift),
297
- n_points=100,
317
+ n_points=n_points,
298
318
  )[0]
299
319
 
300
320
  flat_tree, pytree_def = jax.tree.flatten(self.input_parameters)
301
321
  flux = vectorized_flux(*flat_tree) * (u.keV / u.cm**2 / u.s)
302
- value = (flux * (4 * np.pi * cosmology.luminosity_distance(redshift) ** 2)).to(unit)
322
+ value = np.asarray(
323
+ (flux * (4 * np.pi * cosmology.luminosity_distance(redshift) ** 2)).to(unit)
324
+ )
303
325
 
304
326
  if register:
305
- self.inference_data.posterior[f"luminosity_{e_min:.1f}_{e_max:.1f}"] = (
306
- list(self.inference_data.posterior.coords),
307
- value,
327
+ self.inference_data.posterior[f"mod/~/luminosity_{e_min:.1f}_{e_max:.1f}"] = (
328
+ xr.DataArray(
329
+ value,
330
+ dims=self.inference_data.posterior.dims,
331
+ coords={
332
+ "chain": self.inference_data.posterior.chain,
333
+ "draw": self.inference_data.posterior.draw,
334
+ },
335
+ name=f"mod/~/luminosity_{e_min:.1f}_{e_max:.1f}",
336
+ )
308
337
  )
309
338
 
310
339
  return value
@@ -315,10 +344,13 @@ class FitResult:
315
344
 
316
345
  Parameters:
317
346
  name: The name of the chain.
347
+ parameter_kind: The kind of parameters to keep.
318
348
  """
319
349
 
320
350
  keys_to_drop = [
321
- key for key in self.inference_data.posterior.keys() if not key.startswith("mod")
351
+ key
352
+ for key in self.inference_data.posterior.keys()
353
+ if not key.startswith(parameter_kind)
322
354
  ]
323
355
 
324
356
  reduced_id = az.extract(
@@ -403,6 +435,7 @@ class FitResult:
403
435
  title: str | None = None,
404
436
  figsize: tuple[float, float] = (6, 6),
405
437
  x_lims: tuple[float, float] | None = None,
438
+ rescale_background: bool = False,
406
439
  ) -> list[plt.Figure]:
407
440
  r"""
408
441
  Plot the posterior predictive distribution of the model. It also features a residual plot, defined using the
@@ -423,6 +456,7 @@ class FitResult:
423
456
  title: The title of the plot.
424
457
  figsize: The size of the figure.
425
458
  x_lims: The limits of the x-axis.
459
+ rescale_background: Whether to rescale the background model to the data with backscal ratio.
426
460
 
427
461
  Returns:
428
462
  A list of matplotlib figures for each observation in the model.
@@ -573,10 +607,14 @@ class FitResult:
573
607
  )
574
608
  )
575
609
 
610
+ rescale_background_factor = (
611
+ obsconf.folded_backratio.data if rescale_background else 1.0
612
+ )
613
+
576
614
  model_bkg_plot = _plot_binned_samples_with_error(
577
615
  ax[0],
578
616
  xbins.value,
579
- y_samples_bkg.value,
617
+ y_samples_bkg.value * rescale_background_factor,
580
618
  color=BACKGROUND_COLOR,
581
619
  alpha_envelope=alpha_envelope,
582
620
  n_sigmas=n_sigmas,
@@ -585,9 +623,9 @@ class FitResult:
585
623
  true_bkg_plot = _plot_poisson_data_with_error(
586
624
  ax[0],
587
625
  xbins.value,
588
- y_observed_bkg.value,
589
- y_observed_bkg_low.value,
590
- y_observed_bkg_high.value,
626
+ y_observed_bkg.value * rescale_background_factor,
627
+ y_observed_bkg_low.value * rescale_background_factor,
628
+ y_observed_bkg_high.value * rescale_background_factor,
591
629
  color=BACKGROUND_DATA_COLOR,
592
630
  alpha=0.7,
593
631
  )
@@ -1,4 +1,5 @@
1
1
  import os
2
+ import sparse
2
3
 
3
4
  import numpy as np
4
5
  import xarray as xr
@@ -92,7 +93,7 @@ class Instrument(xr.Dataset):
92
93
 
93
94
  else:
94
95
  specresp = rmf.matrix.sum(axis=0)
95
- rmf.sparse_matrix /= specresp
96
+ rmf.sparse_matrix = sparse.COO( rmf.matrix / specresp )
96
97
 
97
98
  return cls.from_matrix(
98
99
  rmf.sparse_matrix, specresp, rmf.energ_lo, rmf.energ_hi, rmf.e_min, rmf.e_max
jaxspec/data/util.py CHANGED
@@ -152,11 +152,11 @@ def forward_model_with_multiple_inputs(
152
152
  transfer_matrix = BCOO.from_scipy_sparse(
153
153
  obs_configuration.transfer_matrix.data.to_scipy_sparse().tocsr()
154
154
  )
155
+ expected_counts = transfer_matrix @ flux_func(parameters).T
155
156
 
156
157
  else:
157
158
  transfer_matrix = np.asarray(obs_configuration.transfer_matrix.data.todense())
158
-
159
- expected_counts = jnp.matvec(transfer_matrix, flux_func(parameters))
159
+ expected_counts = jnp.matvec(transfer_matrix, flux_func(parameters))
160
160
 
161
161
  # The result is clipped at 1e-6 to avoid 0 round-off and diverging likelihoods
162
162
  return jnp.clip(expected_counts, a_min=1e-6)
jaxspec/fit/_fitter.py CHANGED
@@ -9,6 +9,7 @@ import matplotlib.pyplot as plt
9
9
  import numpyro
10
10
 
11
11
  from jax import random
12
+ from jax.numpy import concatenate
12
13
  from jax.random import PRNGKey
13
14
  from numpyro.infer import AIES, ESS, MCMC, NUTS, SVI, Predictive, Trace_ELBO
14
15
  from numpyro.infer.autoguide import AutoMultivariateNormal
@@ -52,9 +53,18 @@ class BayesianModelFitter(BayesianModel, ABC):
52
53
  )
53
54
 
54
55
  log_likelihood = numpyro.infer.log_likelihood(numpyro_model, posterior_samples)
56
+ if len(log_likelihood.keys()) > 1:
57
+ log_likelihood["full"] = concatenate([ll for _, ll in log_likelihood.items()], axis=1)
58
+ log_likelihood["obs/~/all"] = concatenate(
59
+ [ll for k, ll in log_likelihood.items() if "obs" in k], axis=1
60
+ )
61
+ if self.background_model is not None:
62
+ log_likelihood["bkg/~/all"] = concatenate(
63
+ [ll for k, ll in log_likelihood.items() if "bkg" in k], axis=1
64
+ )
55
65
 
56
66
  seeded_model = numpyro.handlers.substitute(
57
- numpyro.handlers.seed(numpyro_model, keys[3]),
67
+ numpyro.handlers.seed(numpyro_model, keys[2]),
58
68
  substitute_fn=numpyro.infer.init_to_sample,
59
69
  )
60
70
 
@@ -108,12 +118,10 @@ class BayesianModelFitter(BayesianModel, ABC):
108
118
  predictive_parameters = []
109
119
 
110
120
  for key, value in self._observation_container.items():
121
+ predictive_parameters.append(f"obs/~/{key}")
111
122
  if self.background_model is not None:
112
- predictive_parameters.append(f"obs/~/{key}")
113
123
  predictive_parameters.append(f"bkg/~/{key}")
114
124
  # predictive_parameters.append(f"ins/~/{key}")
115
- else:
116
- predictive_parameters.append(f"obs/~/{key}")
117
125
  # predictive_parameters.append(f"ins/~/{key}")
118
126
 
119
127
  inference_data.posterior_predictive = inference_data.posterior_predictive[
@@ -247,7 +255,7 @@ class VIFitter(BayesianModelFitter):
247
255
 
248
256
  svi = SVI(bayesian_model, guide, optimizer, loss=loss)
249
257
 
250
- keys = random.split(random.PRNGKey(rng_key), 3)
258
+ keys = random.split(random.PRNGKey(rng_key), 2)
251
259
  svi_result = svi.run(keys[0], num_steps)
252
260
  params = svi_result.params
253
261
 
jaxspec/model/abc.py CHANGED
@@ -372,9 +372,10 @@ class AdditiveComponent(ModelComponent):
372
372
  continuum = self.continuum(energy)
373
373
  integrated_continuum = self.integrated_continuum(e_low, e_high)
374
374
 
375
- return jsp.integrate.trapezoid(
376
- continuum * energy**2, jnp.log(energy), axis=-1
377
- ) + integrated_continuum * (e_high - e_low)
375
+ return (
376
+ jsp.integrate.trapezoid(continuum * energy**2, jnp.log(energy), axis=-1)
377
+ + integrated_continuum * (e_high + e_low) / 2.0
378
+ )
378
379
 
379
380
  @partial(jax.jit, static_argnums=0, static_argnames="n_points")
380
381
  def photon_flux(self, params, e_low, e_high, n_points=2):
@@ -25,4 +25,5 @@ table_manager = pooch.create(
25
25
  "example_data/NGC7793_ULX4/MOS2.arf": "sha256:a126ff5a95a5f4bb93ed846944cf411d6e1c448626cb73d347e33324663d8b3f",
26
26
  "example_data/NGC7793_ULX4/PNbackground_spectrum.fits": "sha256:55e017e0c19b324245fef049dff2a7a2e49b9a391667ca9c4f667c4f683b1f49",
27
27
  },
28
+ retry_if_failed=10,
28
29
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: jaxspec
3
- Version: 0.3.2
3
+ Version: 0.3.3
4
4
  Summary: jaxspec is a bayesian spectral fitting library for X-ray astronomy.
5
5
  Project-URL: Homepage, https://github.com/renecotyfanboy/jaxspec
6
6
  Project-URL: Documentation, https://jaxspec.readthedocs.io/en/latest/
@@ -16,19 +16,15 @@ Requires-Dist: cmasher<2,>=1.6.3
16
16
  Requires-Dist: flax>0.10.5
17
17
  Requires-Dist: interpax<0.4,>=0.3.5
18
18
  Requires-Dist: jax<0.7,>=0.5.0
19
- Requires-Dist: jaxns<3,>=2.6.7
20
- Requires-Dist: jaxopt<0.9,>=0.8.3
21
19
  Requires-Dist: matplotlib<4,>=3.8.0
22
20
  Requires-Dist: mendeleev<1.2,>=0.15
23
21
  Requires-Dist: networkx~=3.1
24
22
  Requires-Dist: numpy<3.0.0
25
23
  Requires-Dist: numpyro<0.20,>=0.17.0
26
- Requires-Dist: optimistix<0.0.12,>=0.0.10
27
24
  Requires-Dist: pandas<3,>=2.2.0
28
25
  Requires-Dist: pooch<2,>=1.8.2
29
26
  Requires-Dist: scipy<1.16
30
- Requires-Dist: seaborn<0.14,>=0.13.1
31
- Requires-Dist: simpleeval<1.1.0,>=0.9.13
27
+ Requires-Dist: seaborn>=0.13.2
32
28
  Requires-Dist: sparse>0.15
33
29
  Requires-Dist: tinygp<0.4,>=0.3.0
34
30
  Requires-Dist: watermark<3,>=2.4.3
@@ -2,13 +2,13 @@ jaxspec/__init__.py,sha256=Sbn02lX6Y-zNXk17N8dec22c5jeypiS0LkHmGfz7lWA,126
2
2
  jaxspec/analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  jaxspec/analysis/_plot.py,sha256=0xEz-e_xk7XvU6PUfbNwxaWg1-SxAF2XAqhkxWEhIFs,6239
4
4
  jaxspec/analysis/compare.py,sha256=g2UFhmR9Zt-7cz5gQFOB6lXuklXB3yTyUvjTypOzoSY,725
5
- jaxspec/analysis/results.py,sha256=tIBWmLoX43EY2BXt50ec8A-DqQ98PMd3m-FqTRT4iRE,26073
5
+ jaxspec/analysis/results.py,sha256=WoYpCR485TdMorTLZhBXXrPnw8qidD5LkOGmHHu-hZE,27923
6
6
  jaxspec/data/__init__.py,sha256=aantcYKC9kZFvaE-V2SIwSuLhIld17Kjrd9CIUu___Y,415
7
- jaxspec/data/instrument.py,sha256=RDiG_LkucvnF2XE_ghTFME6d_2YirgQUcEY0gEle6dk,4775
7
+ jaxspec/data/instrument.py,sha256=weiPcEll1jZM6lqhxpF1aPIRwvaP6bygSB8jLBABXto,4815
8
8
  jaxspec/data/obsconf.py,sha256=bkYuD6mJgj8QmRaDVhcnXwUukVdo20xllzaI57prHag,10056
9
9
  jaxspec/data/observation.py,sha256=7FHJm1jHEEFyrqxg3COsGmfdh5dg-5XnfKCp1yb5fNY,7411
10
10
  jaxspec/data/ogip.py,sha256=eMmBuROW4eMRxRHkPPyGHf933e0IcREqB8WMQFMS2lY,9810
11
- jaxspec/data/util.py,sha256=4_f6ByGjUEZXTwrB37dCyYaTB1pjF10Z0ho7-4GrQuc,9761
11
+ jaxspec/data/util.py,sha256=2JWoHsKJqGXUn74zPeoAqdU86x2n8NyfZGvpqC21ZaY,9832
12
12
  jaxspec/experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  jaxspec/experimental/interpolator.py,sha256=mJRdCB4B71le3dQL_S_E6Wkqpb6QLT7Wdzlok-rU6Ok,2652
14
14
  jaxspec/experimental/interpolator_jax.py,sha256=13lflsjbImDRZTObSRDtZnujrXBvEP367Rn20eByONs,2967
@@ -18,10 +18,10 @@ jaxspec/experimental/tabulated.py,sha256=H0llUiso2KGH4xUzTUSVPy-6I8D3wm707lU_Z1P
18
18
  jaxspec/fit/__init__.py,sha256=OaS0-Hkb3Hd-AkE2o-KWfoWMX0NSCPY-_FP2znHf9l0,153
19
19
  jaxspec/fit/_bayesian_model.py,sha256=7c2Twgz06QV1S9DdctdVk5YT1v7P-ln100bWXAvv7Go,15179
20
20
  jaxspec/fit/_build_model.py,sha256=pNZVuVfwOq3Pg23opH7xRv28DsSkQZpvy2Z-1hQSfNs,3219
21
- jaxspec/fit/_fitter.py,sha256=92gd1P7CNIqusGb64x_DpBcb0KcoGyfvSDiEnRbfqP0,9709
21
+ jaxspec/fit/_fitter.py,sha256=KjOqKVoB9vd2Lg-8-j-3pS78ZmhA2_ICHLkxVEJHiYc,10160
22
22
  jaxspec/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  jaxspec/model/_graph_util.py,sha256=hPvHYmAxb7P3nyIecaZ7RqWOjwcZ1WvUByt_yNANiaY,4552
24
- jaxspec/model/abc.py,sha256=RGrqDrXVNjCy7GYBZL-l1PZ3Lpr37SsMIw7L9_B8WJ4,14773
24
+ jaxspec/model/abc.py,sha256=vvHM4teepc8VLqbpAtqf1b55oF00R_Lo_6nrBO5KmmQ,14793
25
25
  jaxspec/model/additive.py,sha256=rEONSy7b7lwfXIhuPqtI4y2Yhq55EqrlEtEckEe6TA0,20538
26
26
  jaxspec/model/background.py,sha256=VLSrU0YCW9GSHCtaEdcth-sp74aPyEVSizIMFkTpM7M,7759
27
27
  jaxspec/model/instrument.py,sha256=1zLZgHmBZs8RLKTMT3Wu4bCx6JnxBUjhRIpYG2rLaZM,2947
@@ -33,10 +33,10 @@ jaxspec/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
33
  jaxspec/util/abundance.py,sha256=fsC313taIlGzQsZNwbYsJupDWm7ZbqzGhY66Ku394Mw,8546
34
34
  jaxspec/util/integrate.py,sha256=7GwBSagmDzsF3P53tPs-oakeq0zHEwmZZS2zQlXngbE,4634
35
35
  jaxspec/util/misc.py,sha256=O3qorCL1Y2X1BS2jdd36C1eDHK9QDXTSOr9kj3uqcJo,654
36
- jaxspec/util/online_storage.py,sha256=vm56RfcbFKpkRVfr0bXO7J9aQxuBq-I_oEgA26YIhCo,2469
36
+ jaxspec/util/online_storage.py,sha256=wwpowxmDgAqKzeUwmGUIxttA4VKUoR270Ew-F_0DrkE,2493
37
37
  jaxspec/util/typing.py,sha256=ZQM_l68qyYnIBZPz_1mKvwPMx64jvVBD8Uj6bx9sHv0,140
38
- jaxspec-0.3.2.dist-info/METADATA,sha256=10PjN7QwhbU8BoZc9f1Lga2n1u1_j4p8Lk2Syy6cJC8,4199
39
- jaxspec-0.3.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
40
- jaxspec-0.3.2.dist-info/entry_points.txt,sha256=4ffU5AImfcEBxgWTqopQll2YffpFldOswXRh16pd0Dc,72
41
- jaxspec-0.3.2.dist-info/licenses/LICENSE.md,sha256=2q5XoWzddts5IqzIcgYYMOL21puU3MfO8gvT3Ype1eQ,1073
42
- jaxspec-0.3.2.dist-info/RECORD,,
38
+ jaxspec-0.3.3.dist-info/METADATA,sha256=RUeQs3cLTKOL_7lJHR2Nmjg-E0YMaHafSIN2YssSkx4,4045
39
+ jaxspec-0.3.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
40
+ jaxspec-0.3.3.dist-info/entry_points.txt,sha256=4ffU5AImfcEBxgWTqopQll2YffpFldOswXRh16pd0Dc,72
41
+ jaxspec-0.3.3.dist-info/licenses/LICENSE.md,sha256=2q5XoWzddts5IqzIcgYYMOL21puU3MfO8gvT3Ype1eQ,1073
42
+ jaxspec-0.3.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: hatchling 1.27.0
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any