bartz 0.6.0__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
bartz/testing/_dgp.py ADDED
@@ -0,0 +1,442 @@
1
+ # bartz/src/bartz/testing/_dgp.py
2
+ #
3
+ # Copyright (c) 2026, The Bartz Contributors
4
+ #
5
+ # This file is part of bartz.
6
+ #
7
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ # of this software and associated documentation files (the "Software"), to deal
9
+ # in the Software without restriction, including without limitation the rights
10
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ # copies of the Software, and to permit persons to whom the Software is
12
+ # furnished to do so, subject to the following conditions:
13
+ #
14
+ # The above copyright notice and this permission notice shall be included in all
15
+ # copies or substantial portions of the Software.
16
+ #
17
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ # SOFTWARE.
24
+
25
+
26
+ """Define `gen_data` that generates simulated data for testing."""
27
+
28
+ from dataclasses import replace
29
+
30
+ from equinox import Module, error_if
31
+ from jax import numpy as jnp
32
+ from jax import random
33
+ from jaxtyping import Array, Bool, Float, Int, Integer, Key
34
+
35
+ from bartz.jaxext import split
36
+
37
+
38
+ def generate_x(key: Key[Array, ''], n: int, p: int) -> Float[Array, 'p n']:
39
+ """Generate predictors with mean 0 and variance 1.
40
+
41
+ x_rj ~iid U(-√3, √3)
42
+ """
43
+ return random.uniform(key, (p, n), minval=-jnp.sqrt(3.0), maxval=jnp.sqrt(3.0))
44
+
45
+
46
+ def generate_partition(key: Key[Array, ''], p: int, k: int) -> Bool[Array, 'k p']:
47
+ """Partition x components amongst y components.
48
+
49
+ Each row i has either p // k or p // k + 1 non-zero entries.
50
+ """
51
+ keys = split(key)
52
+ indices: Int[Array, 'p'] = jnp.linspace(0, k, p, endpoint=False)
53
+ indices = jnp.trunc(indices).astype(jnp.int32)
54
+ indices = random.permutation(keys.pop(), indices)
55
+ assignments: Int[Array, 'k'] = random.permutation(keys.pop(), k)
56
+ return indices == assignments[:, None]
57
+
58
+
59
+ def generate_beta_shared(
60
+ key: Key[Array, ''], p: int, sigma2_lin: Float[Array, '']
61
+ ) -> Float[Array, ' p']:
62
+ """Generate shared linear coefficients for the lambda=1 case."""
63
+ sigma2_beta = sigma2_lin / p
64
+ return random.normal(key, (p,)) * jnp.sqrt(sigma2_beta)
65
+
66
+
67
+ def generate_beta_separate(
68
+ key: Key[Array, ''], partition: Bool[Array, 'k p'], sigma2_lin: Float[Array, '']
69
+ ) -> Float[Array, 'k p']:
70
+ """Generate separate linear coefficients for the lambda=0 case."""
71
+ k, p = partition.shape
72
+ beta_separate: Float[Array, 'k p'] = random.normal(key, (k, p))
73
+ sigma2_beta = sigma2_lin / (p / k)
74
+ return jnp.where(partition, beta_separate, 0.0) * jnp.sqrt(sigma2_beta)
75
+
76
+
77
+ def compute_linear_mean_shared(
78
+ beta_shared: Float[Array, ' p'], x: Float[Array, 'p n']
79
+ ) -> Float[Array, ' n']:
80
+ """mulin_ij = beta_r x_rj."""
81
+ return beta_shared @ x
82
+
83
+
84
+ def compute_linear_mean_separate(
85
+ beta_separate: Float[Array, 'k p'], x: Float[Array, 'p n']
86
+ ) -> Float[Array, 'k n']:
87
+ """mulin_ij = beta_ir x_rj."""
88
+ return beta_separate @ x
89
+
90
+
91
+ def combine_mulin(
92
+ mulin_shared: Float[Array, ' n'],
93
+ mulin_separate: Float[Array, 'k n'],
94
+ lam: Float[Array, ''],
95
+ ) -> Float[Array, 'k n']:
96
+ """Combine shared and separate linear means."""
97
+ return jnp.sqrt(1.0 - lam) * mulin_separate + jnp.sqrt(lam) * mulin_shared
98
+
99
+
100
+ def interaction_pattern(p: int, q: Integer[Array, ''] | int) -> Bool[Array, 'p p']:
101
+ """Create a symmetric interaction pattern for q interactions per variable.
102
+
103
+ Parameters
104
+ ----------
105
+ p
106
+ Number of predictors
107
+ q
108
+ Number of interactions per predictor (must be even)
109
+
110
+ Returns
111
+ -------
112
+ Symmetric binary pattern of shape (p, p) where each row/col sums to q+1
113
+ """
114
+ q = error_if(q, q % 2 != 0, 'q must be even')
115
+ q = error_if(q, q >= p, 'q must be less than p')
116
+
117
+ i, j = jnp.ogrid[:p, :p]
118
+ dist = jnp.minimum(jnp.abs(i - j), p - jnp.abs(i - j))
119
+ return dist <= (q // 2)
120
+
121
+
122
+ def generate_A_shared(
123
+ key: Key[Array, ''],
124
+ p: int,
125
+ q: Integer[Array, ''],
126
+ sigma2_quad: Float[Array, ''],
127
+ kurt_x: float,
128
+ ) -> Float[Array, 'p p']:
129
+ """Generate shared quadratic coefficients for the lambda=1 case."""
130
+ pattern: Bool[Array, 'p p'] = interaction_pattern(p, q)
131
+ A_shared: Float[Array, 'p p'] = random.normal(key, (p, p))
132
+ A_shared = jnp.where(pattern, A_shared, 0.0)
133
+ sigma2_A = sigma2_quad / (p * (kurt_x - 1 + q))
134
+ return A_shared * jnp.sqrt(sigma2_A)
135
+
136
+
137
+ def partitioned_interaction_pattern(
138
+ partition: Bool[Array, 'k p'], q: Integer[Array, ''] | int
139
+ ) -> Bool[Array, 'k p p']:
140
+ """Create k interaction patterns that use disjoint variable sets.
141
+
142
+ Parameters
143
+ ----------
144
+ partition
145
+ Binary partition of shape (k, p) indicating variable assignments
146
+ to components
147
+ q
148
+ Number of interactions per predictor (must be even and < p // k)
149
+
150
+ Returns
151
+ -------
152
+ Interaction patterns of shape (k, p, p)
153
+ """
154
+ k, p = partition.shape
155
+ q = error_if(q, q % 2 != 0, 'q must be even')
156
+ q = error_if(q, q >= p // k, 'q must be less than p // k')
157
+
158
+ indices: Int[Array, 'k p'] = jnp.cumsum(partition, axis=1)
159
+ linear_dist: Int[Array, 'k p p'] = jnp.abs(
160
+ indices[:, :, None] - indices[:, None, :]
161
+ )
162
+ num_vars: Int[Array, 'k'] = jnp.max(indices, axis=1)
163
+ wrapped_dist: Int[Array, 'k p p'] = jnp.minimum(
164
+ linear_dist, num_vars[:, None, None] - linear_dist
165
+ )
166
+ interacts: Bool[Array, 'k p p'] = wrapped_dist <= (q // 2)
167
+ interacts = jnp.where(partition[:, :, None], interacts, False)
168
+ return jnp.where(partition[:, None, :], interacts, False)
169
+
170
+
171
+ def generate_A_separate(
172
+ key: Key[Array, ''],
173
+ partition: Bool[Array, 'k p'],
174
+ q: Integer[Array, ''],
175
+ sigma2_quad: Float[Array, ''],
176
+ kurt_x: float,
177
+ ) -> Float[Array, 'k p p']:
178
+ """Generate separate quadratic coefficients for the lambda=0 case."""
179
+ k, p = partition.shape
180
+ A_separate: Float[Array, 'k p p'] = random.normal(key, (k, p, p))
181
+ component_pattern: Bool[Array, 'k p p'] = partitioned_interaction_pattern(
182
+ partition, q
183
+ )
184
+ A_separate = jnp.where(component_pattern, A_separate, 0.0)
185
+ sigma2_A = sigma2_quad / (p / k * (kurt_x - 1 + q))
186
+ return A_separate * jnp.sqrt(sigma2_A)
187
+
188
+
189
+ def compute_muquad_shared(
190
+ A_shared: Float[Array, 'p p'], x: Float[Array, 'p n']
191
+ ) -> Float[Array, ' n']:
192
+ """Compute quadratic mean for the lambda=1 case.
193
+
194
+ muquad_ij = A_rs x_rj x_sj
195
+ Rows identical across components.
196
+ """
197
+ return jnp.einsum('rs,rj,sj->j', A_shared, x, x)
198
+
199
+
200
+ def compute_muquad_separate(
201
+ A_separate: Float[Array, 'k p p'], x: Float[Array, 'p n']
202
+ ) -> Float[Array, 'k n']:
203
+ """Compute quadratic mean for the lambda=0 case.
204
+
205
+ muquad_ij = A_irs x_rj x_sj
206
+ Rows independent across components.
207
+ """
208
+ return jnp.einsum('irs,rj,sj->ij', A_separate, x, x)
209
+
210
+
211
+ def combine_muquad(
212
+ muquad_shared: Float[Array, ' n'],
213
+ muquad_separate: Float[Array, 'k n'],
214
+ lam: Float[Array, ''],
215
+ ) -> Float[Array, 'k n']:
216
+ """Combine shared and separate quadratic means."""
217
+ return jnp.sqrt(1.0 - lam) * muquad_separate + jnp.sqrt(lam) * muquad_shared
218
+
219
+
220
+ def compute_quadratic_mean(
221
+ A: Float[Array, 'k p p'], x: Float[Array, 'p n']
222
+ ) -> Float[Array, 'k n']:
223
+ """Compute quadratic part of the latent mean."""
224
+ return jnp.einsum('irs,rj,sj->ij', A, x, x)
225
+
226
+
227
+ def generate_outcome(
228
+ key: Key[Array, ''], mu: Float[Array, 'k n'], sigma2_eps: Float[Array, '']
229
+ ) -> Float[Array, 'k n']:
230
+ """Generate noisy outcome."""
231
+ eps: Float[Array, 'k n'] = random.normal(key, mu.shape)
232
+ return mu + eps * jnp.sqrt(sigma2_eps)
233
+
234
+
235
+ class DGP(Module):
236
+ """Quadratic multivariate DGP.
237
+
238
+ Parameters
239
+ ----------
240
+ x
241
+ Predictors of shape (p, n), variance 1
242
+ y
243
+ Noisy outcomes of shape (k, n)
244
+ partition
245
+ Predictor-outcome assignment partition of shape (k, p)
246
+ beta_shared
247
+ Shared linear coefficients of shape (p,)
248
+ beta_separate
249
+ Separate linear coefficients of shape (k, p)
250
+ mulin_shared
251
+ Linear mean at lambda=1 (shared), shape (k, n), rows identical
252
+ mulin_separate
253
+ Linear mean at lambda=0 (separate), shape (k, n), rows independent
254
+ mulin
255
+ Linear part of latent mean of shape (k, n)
256
+ A_shared
257
+ Shared quadratic coefficients of shape (p, p)
258
+ A_separate
259
+ Separate quadratic coefficients of shape (k, p, p)
260
+ muquad_shared
261
+ Quadratic mean at lambda=1 (shared), shape (k, n), rows identical
262
+ muquad_separate
263
+ Quadratic mean at lambda=0 (separate), shape (k, n), rows independent
264
+ muquad
265
+ Quadratic part of latent mean of shape (k, n)
266
+ mu
267
+ True latent means of shape (k, n)
268
+ q
269
+ Number of interactions per predictor
270
+ lam
271
+ Coupling parameter in [0, 1]
272
+ sigma2_lin
273
+ Prior and expected population variance of mulin
274
+ sigma2_quad
275
+ Expected population variance of muquad
276
+ sigma2_eps
277
+ Variance of the error
278
+ """
279
+
280
+ # Main outputs
281
+ x: Float[Array, 'p n']
282
+ y: Float[Array, 'k n']
283
+
284
+ # Intermediate results
285
+ partition: Bool[Array, 'k p']
286
+ beta_shared: Float[Array, ' p']
287
+ beta_separate: Float[Array, 'k p']
288
+ mulin_shared: Float[Array, ' n']
289
+ mulin_separate: Float[Array, 'k n']
290
+ mulin: Float[Array, 'k n']
291
+ A_shared: Float[Array, 'p p']
292
+ A_separate: Float[Array, 'k p p']
293
+ muquad_shared: Float[Array, ' n']
294
+ muquad_separate: Float[Array, 'k n']
295
+ muquad: Float[Array, 'k n']
296
+ mu: Float[Array, 'k n']
297
+
298
+ # Params
299
+ q: Integer[Array, '']
300
+ lam: Float[Array, '']
301
+ sigma2_lin: Float[Array, '']
302
+ sigma2_quad: Float[Array, '']
303
+ sigma2_eps: Float[Array, '']
304
+
305
+ kurt_x: float = 9 / 5 # kurtosis of uniform distribution
306
+
307
+ @property
308
+ def sigma2_pri(self) -> Float[Array, '']:
309
+ """Prior variance of y."""
310
+ return self.sigma2_pop + self.sigma2_mean
311
+
312
+ @property
313
+ def sigma2_pop(self) -> Float[Array, '']:
314
+ """Expected population variance of y."""
315
+ return self.sigma2_lin + self.sigma2_quad + self.sigma2_eps
316
+
317
+ @property
318
+ def sigma2_mean(self) -> Float[Array, '']:
319
+ """Variance of the mean function."""
320
+ return self.sigma2_quad / (self.kurt_x - 1 + self.q)
321
+
322
+ def split(self, n_train: int | None = None) -> tuple['DGP', 'DGP']:
323
+ """Split the data into training and test sets."""
324
+ if n_train is None:
325
+ n_train = self.x.shape[1] // 2
326
+ assert 0 < n_train < self.x.shape[1], 'n_train must be in (0, n)'
327
+ train = replace(
328
+ self,
329
+ x=self.x[:, :n_train],
330
+ y=self.y[:, :n_train],
331
+ mulin_shared=self.mulin_shared[:n_train],
332
+ mulin_separate=self.mulin_separate[:, :n_train],
333
+ mulin=self.mulin[:, :n_train],
334
+ muquad_shared=self.muquad_shared[:n_train],
335
+ muquad_separate=self.muquad_separate[:, :n_train],
336
+ muquad=self.muquad[:, :n_train],
337
+ mu=self.mu[:, :n_train],
338
+ )
339
+ test = replace(
340
+ self,
341
+ x=self.x[:, n_train:],
342
+ y=self.y[:, n_train:],
343
+ mulin_shared=self.mulin_shared[n_train:],
344
+ mulin_separate=self.mulin_separate[:, n_train:],
345
+ mulin=self.mulin[:, n_train:],
346
+ muquad_shared=self.muquad_shared[n_train:],
347
+ muquad_separate=self.muquad_separate[:, n_train:],
348
+ muquad=self.muquad[:, n_train:],
349
+ mu=self.mu[:, n_train:],
350
+ )
351
+ return train, test
352
+
353
+
354
+ def gen_data(
355
+ key: Key[Array, ''],
356
+ *,
357
+ n: int,
358
+ p: int,
359
+ k: int,
360
+ q: Integer[Array, ''] | int,
361
+ lam: Float[Array, ''] | float,
362
+ sigma2_lin: Float[Array, ''] | float,
363
+ sigma2_quad: Float[Array, ''] | float,
364
+ sigma2_eps: Float[Array, ''] | float,
365
+ ) -> DGP:
366
+ """Generate data from a quadratic multivariate DGP.
367
+
368
+ Parameters
369
+ ----------
370
+ key
371
+ JAX random key
372
+ n
373
+ Number of observations
374
+ p
375
+ Number of predictors
376
+ k
377
+ Number of outcome components
378
+ q
379
+ Number of interactions per predictor (must be even and < p // k)
380
+ lam
381
+ Coupling parameter in [0, 1]. 0=independent, 1=identical components
382
+ sigma2_lin
383
+ Prior and expected population variance of the linear term
384
+ sigma2_quad
385
+ Expected population variance of the quadratic term
386
+ sigma2_eps
387
+ Variance of the error term
388
+
389
+ Returns
390
+ -------
391
+ An object with all generated data and parameters.
392
+ """
393
+ assert p >= k, 'p must be at least k'
394
+
395
+ # check q
396
+ q = jnp.asarray(q)
397
+ q = error_if(q, q % 2 != 0, 'q must be even')
398
+ q = error_if(q, q >= p // k, 'q must be less than p // k')
399
+
400
+ keys = split(key, 7)
401
+
402
+ lam = jnp.asarray(lam)
403
+ sigma2_lin = jnp.asarray(sigma2_lin)
404
+ sigma2_quad = jnp.asarray(sigma2_quad)
405
+ sigma2_eps = jnp.asarray(sigma2_eps)
406
+
407
+ x = generate_x(keys.pop(), n, p)
408
+ partition = generate_partition(keys.pop(), p, k)
409
+ beta_shared = generate_beta_shared(keys.pop(), p, sigma2_lin)
410
+ beta_separate = generate_beta_separate(keys.pop(), partition, sigma2_lin)
411
+ mulin_shared = compute_linear_mean_shared(beta_shared, x)
412
+ mulin_separate = compute_linear_mean_separate(beta_separate, x)
413
+ mulin = combine_mulin(mulin_shared, mulin_separate, lam)
414
+ A_shared = generate_A_shared(keys.pop(), p, q, sigma2_quad, DGP.kurt_x)
415
+ A_separate = generate_A_separate(keys.pop(), partition, q, sigma2_quad, DGP.kurt_x)
416
+ muquad_shared = compute_muquad_shared(A_shared, x)
417
+ muquad_separate = compute_muquad_separate(A_separate, x)
418
+ muquad = combine_muquad(muquad_shared, muquad_separate, lam)
419
+ mu = mulin + muquad
420
+ y = generate_outcome(keys.pop(), mu, sigma2_eps)
421
+
422
+ return DGP(
423
+ x=x,
424
+ y=y,
425
+ partition=partition,
426
+ beta_shared=beta_shared,
427
+ beta_separate=beta_separate,
428
+ mulin_shared=mulin_shared,
429
+ mulin_separate=mulin_separate,
430
+ mulin=mulin,
431
+ A_shared=A_shared,
432
+ A_separate=A_separate,
433
+ muquad_shared=muquad_shared,
434
+ muquad_separate=muquad_separate,
435
+ muquad=muquad,
436
+ mu=mu,
437
+ q=q,
438
+ lam=lam,
439
+ sigma2_lin=sigma2_lin,
440
+ sigma2_quad=sigma2_quad,
441
+ sigma2_eps=sigma2_eps,
442
+ )
@@ -1,20 +1,23 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: bartz
3
- Version: 0.6.0
3
+ Version: 0.8.0
4
4
  Summary: Super-fast BART (Bayesian Additive Regression Trees) in Python
5
5
  Author: Giacomo Petrillo
6
6
  Author-email: Giacomo Petrillo <info@giacomopetrillo.com>
7
7
  License-Expression: MIT
8
8
  Requires-Dist: equinox>=0.12.2
9
- Requires-Dist: jax>=0.4.35
10
- Requires-Dist: jaxlib>=0.4.35
9
+ Requires-Dist: jax>=0.5.3
11
10
  Requires-Dist: jaxtyping>=0.3.2
12
11
  Requires-Dist: numpy>=1.25.2
13
12
  Requires-Dist: scipy>=1.11.4
13
+ Requires-Dist: jax[cuda12] ; extra == 'cuda12'
14
+ Requires-Dist: jax[cuda13] ; extra == 'cuda13'
14
15
  Requires-Python: >=3.10
15
- Project-URL: Documentation, https://gattocrucco.github.io/bartz/docs-dev
16
- Project-URL: Homepage, https://github.com/Gattocrucco/bartz
17
- Project-URL: Issues, https://github.com/Gattocrucco/bartz/issues
16
+ Project-URL: Documentation, https://bartz-org.github.io/bartz/docs-dev
17
+ Project-URL: Homepage, https://github.com/bartz-org/bartz
18
+ Project-URL: Issues, https://github.com/bartz-org/bartz/issues
19
+ Provides-Extra: cuda12
20
+ Provides-Extra: cuda13
18
21
  Description-Content-Type: text/markdown
19
22
 
20
23
  [![PyPI](https://img.shields.io/pypi/v/bartz)](https://pypi.org/project/bartz/)
@@ -32,16 +35,18 @@ This Python module provides an implementation of BART that runs on GPU, to proce
32
35
 
33
36
  On CPU, bartz runs at the speed of dbarts (the fastest implementation I know of) if n > 20,000, but using 1/20 of the memory. On GPU, the speed premium depends on sample size; it is convenient over CPU only for n > 10,000. The maximum speedup is currently 200x, on an Nvidia A100 and with at least 2,000,000 observations.
34
37
 
35
- [This Colab notebook](https://colab.research.google.com/github/Gattocrucco/bartz/blob/main/docs/examples/basic_simdata.ipynb) runs bartz with n = 100,000 observations, p = 1000 predictors, 10,000 trees, for 1000 MCMC iterations, in 5 minutes.
38
+ [This Colab notebook](https://colab.research.google.com/github/bartz-org/bartz/blob/main/docs/examples/basic_simdata.ipynb) runs bartz with n = 100,000 observations, p = 1000 predictors, 10,000 trees, for 1000 MCMC iterations, in 10 minutes.
39
+
40
+ BART is a very flexible method with many variants. This implementation provides only a small subset of the possible features. If you need a feature from [another BART implementation](https://bartz-org.github.io/bartz/docs-dev/pkglist.html) or from the BART literature, please [open an issue on github](https://github.com/bartz-org/bartz/issues).
36
41
 
37
42
  ## Links
38
43
 
39
- - [Documentation (latest release)](https://gattocrucco.github.io/bartz/docs)
40
- - [Documentation (development version)](https://gattocrucco.github.io/bartz/docs-dev)
41
- - [Repository](https://github.com/Gattocrucco/bartz)
42
- - [Code coverage](https://gattocrucco.github.io/bartz/coverage)
43
- - [Benchmarks](https://gattocrucco.github.io/bartz/benchmarks)
44
- - [List of BART packages](https://gattocrucco.github.io/bartz/docs-dev/pkglist.html)
44
+ - [Documentation (latest release)](https://bartz-org.github.io/bartz/docs)
45
+ - [Documentation (development version)](https://bartz-org.github.io/bartz/docs-dev)
46
+ - [Repository](https://github.com/bartz-org/bartz)
47
+ - [Code coverage](https://bartz-org.github.io/bartz/coverage)
48
+ - [Benchmarks](https://bartz-org.github.io/bartz/benchmarks)
49
+ - [List of BART packages](https://bartz-org.github.io/bartz/docs-dev/pkglist.html)
45
50
 
46
51
  ## Citing bartz
47
52
 
@@ -0,0 +1,25 @@
1
+ bartz/.DS_Store,sha256=cZGvRte4wNTAPFAvlOsBNTvC5hXXXEWzrw4xqyOANLU,6148
2
+ bartz/BART/__init__.py,sha256=1vTvv46Jw74DDG0KoydcjCE0pMr2hGpHPKQog-gax5U,1312
3
+ bartz/BART/_gbart.py,sha256=zJuSlBmbDwPyPFrL3BdxBYM31voJTNEXKMAOftQe-NU,20439
4
+ bartz/__init__.py,sha256=-gvZRcWph0WZLiHPc1kH-udnPz9CzlhWtVjP9khAU3Y,1543
5
+ bartz/_interface.py,sha256=zj05aZBQXSx9I-pz06BMNKW0mlwkgORXzcmxXy1joBk,34329
6
+ bartz/_profiler.py,sha256=1rNIEtsTF7I47O_9ey2kNtxzxvzo8g67erVWpx_nPw8,9152
7
+ bartz/_version.py,sha256=27YY3zFpeaDh6JoC40AqkjBrn68SqFlsWZzjZtw5jwU,22
8
+ bartz/debug.py,sha256=_iNwaccmJzSDjYO6w3iD5u0C91FQc8FUId5M9vyZRq0,43831
9
+ bartz/grove.py,sha256=LtC0v9mMfocgHDYNVUMRYUj0lHsyggAv8D2lma8qIPo,12876
10
+ bartz/jaxext/__init__.py,sha256=6wZ_hy1UZIC1C7GP3GcKPPjIAZF2nUiQ2x6nt5R4sY8,8901
11
+ bartz/jaxext/_autobatch.py,sha256=Fsu50woqb0AWBI9cqlAZxqlx5_feTGvFoyKAh3J6TZg,14337
12
+ bartz/jaxext/scipy/__init__.py,sha256=XF-5N7T-DHlVPcBksHGZP4fewKA14XCmVXmt9oKv9WQ,1233
13
+ bartz/jaxext/scipy/special.py,sha256=T8HIdZ6Oeb_5YnXrRZtrHET7diur0XjQ9BznHXAOTsc,8081
14
+ bartz/jaxext/scipy/stats.py,sha256=6OwC3_oQ6Ry44cvNFNvdH2SB5YOOGclKQgvFDaqcfCc,1489
15
+ bartz/mcmcloop.py,sha256=apUODUmqWY10XY0LDEWwiz8GCOj_OoNcieJDsNh1FyY,28368
16
+ bartz/mcmcstep/__init__.py,sha256=8GufOXK964AWEkyc8w0YQK0Xxkoctl2HJtQHEesUYtA,1564
17
+ bartz/mcmcstep/_moves.py,sha256=Gs0vtss_lbxxjocGF35cKWfJvG92XRIpf_bZBFDfFNI,31027
18
+ bartz/mcmcstep/_state.py,sha256=0KPebAeMkX33tqBcUWWlMQSQe07m3qHkNGIVgitHfh4,40688
19
+ bartz/mcmcstep/_step.py,sha256=je2QUeD33NpP6_NkWIeVF_QAwY0NaqGs_8tbmZ1eHRc,50921
20
+ bartz/prepcovars.py,sha256=M5jGyvciob_BwwEHcpQxhdgVRRCby1vSJH2Iw61loAo,8734
21
+ bartz/testing/__init__.py,sha256=TXJZAbUKVQ8upUce46HoPbMypvRODxhTOc2SACbreZw,1266
22
+ bartz/testing/_dgp.py,sha256=Kq7Jeb-G9xexZUziPKNjGT-PldwiC_jSeJLC_0tgjIs,14517
23
+ bartz-0.8.0.dist-info/WHEEL,sha256=YUH1mBqsx8Dh2cQG2rlcuRYUhJddG9iClegy4IgnHik,79
24
+ bartz-0.8.0.dist-info/METADATA,sha256=WV4vD_GuqFK8VlS1EiacANhfSi0Jw-bydxF4pLlPPx0,3283
25
+ bartz-0.8.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: uv 0.7.8
2
+ Generator: uv 0.9.11
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any