arviz 0.17.0__py3-none-any.whl → 0.17.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,165 @@
1
+ """Functions for evaluating ECDFs and their confidence bands."""
2
+ from typing import Any, Callable, Optional, Tuple
3
+ import warnings
4
+
5
+ import numpy as np
6
+ from scipy.stats import uniform, binom
7
+
8
+
9
+ def compute_ecdf(sample: np.ndarray, eval_points: np.ndarray) -> np.ndarray:
10
+ """Compute ECDF of the sorted `sample` at the evaluation points."""
11
+ return np.searchsorted(sample, eval_points, side="right") / len(sample)
12
+
13
+
14
+ def _get_ecdf_points(
15
+ sample: np.ndarray, eval_points: np.ndarray, difference: bool
16
+ ) -> Tuple[np.ndarray, np.ndarray]:
17
+ """Compute the coordinates for the ecdf points using compute_ecdf."""
18
+ x = eval_points
19
+ y = compute_ecdf(sample, eval_points)
20
+
21
+ if not difference and y[0] > 0:
22
+ x = np.insert(x, 0, x[0])
23
+ y = np.insert(y, 0, 0)
24
+ return x, y
25
+
26
+
27
+ def _simulate_ecdf(
28
+ ndraws: int,
29
+ eval_points: np.ndarray,
30
+ rvs: Callable[[int, Optional[Any]], np.ndarray],
31
+ random_state: Optional[Any] = None,
32
+ ) -> np.ndarray:
33
+ """Simulate ECDF at the `eval_points` using the given random variable sampler"""
34
+ sample = rvs(ndraws, random_state=random_state)
35
+ sample.sort()
36
+ return compute_ecdf(sample, eval_points)
37
+
38
+
39
+ def _fit_pointwise_band_probability(
40
+ ndraws: int,
41
+ ecdf_at_eval_points: np.ndarray,
42
+ cdf_at_eval_points: np.ndarray,
43
+ ) -> float:
44
+ """Compute the smallest marginal probability of a pointwise confidence band that
45
+ contains the ECDF."""
46
+ ecdf_scaled = (ndraws * ecdf_at_eval_points).astype(int)
47
+ prob_lower_tail = np.amin(binom.cdf(ecdf_scaled, ndraws, cdf_at_eval_points))
48
+ prob_upper_tail = np.amin(binom.sf(ecdf_scaled - 1, ndraws, cdf_at_eval_points))
49
+ prob_pointwise = 1 - 2 * min(prob_lower_tail, prob_upper_tail)
50
+ return prob_pointwise
51
+
52
+
53
+ def _get_pointwise_confidence_band(
54
+ prob: float, ndraws: int, cdf_at_eval_points: np.ndarray
55
+ ) -> Tuple[np.ndarray, np.ndarray]:
56
+ """Compute the `prob`-level pointwise confidence band."""
57
+ count_lower, count_upper = binom.interval(prob, ndraws, cdf_at_eval_points)
58
+ prob_lower = count_lower / ndraws
59
+ prob_upper = count_upper / ndraws
60
+ return prob_lower, prob_upper
61
+
62
+
63
+ def ecdf_confidence_band(
64
+ ndraws: int,
65
+ eval_points: np.ndarray,
66
+ cdf_at_eval_points: np.ndarray,
67
+ prob: float = 0.95,
68
+ method="simulated",
69
+ **kwargs,
70
+ ) -> Tuple[np.ndarray, np.ndarray]:
71
+ """Compute the `prob`-level confidence band for the ECDF.
72
+
73
+ Arguments
74
+ ---------
75
+ ndraws : int
76
+ Number of samples in the original dataset.
77
+ eval_points : np.ndarray
78
+ Points at which the ECDF is evaluated. If these are dependent on the sample
79
+ values, simultaneous confidence bands may not be correctly calibrated.
80
+ cdf_at_eval_points : np.ndarray
81
+ CDF values at the evaluation points.
82
+ prob : float, default 0.95
83
+ The target probability that a true ECDF lies within the confidence band.
84
+ method : string, default "simulated"
85
+ The method used to compute the confidence band. Valid options are:
86
+ - "pointwise": Compute the pointwise (i.e. marginal) confidence band.
87
+ - "simulated": Use Monte Carlo simulation to estimate a simultaneous confidence band.
88
+ `rvs` must be provided.
89
+ rvs: callable, optional
90
+ A function that takes an integer `ndraws` and optionally the object passed to
91
+ `random_state` and returns an array of `ndraws` samples from the same distribution
92
+ as the original dataset. Required if `method` is "simulated" and variable is discrete.
93
+ num_trials : int, default 1000
94
+ The number of random ECDFs to generate for constructing simultaneous confidence bands
95
+ (if `method` is "simulated").
96
+ random_state : {None, int, `numpy.random.Generator`,
97
+ `numpy.random.RandomState`}, optional
98
+ If `None`, the `numpy.random.RandomState` singleton is used. If an `int`, a new
99
+ ``numpy.random.RandomState`` instance is used, seeded with seed. If a `RandomState` or
100
+ `Generator` instance, the instance is used.
101
+
102
+ Returns
103
+ -------
104
+ prob_lower : np.ndarray
105
+ Lower confidence band for the ECDF at the evaluation points.
106
+ prob_upper : np.ndarray
107
+ Upper confidence band for the ECDF at the evaluation points.
108
+ """
109
+ if not 0 < prob < 1:
110
+ raise ValueError(f"Invalid value for `prob`. Expected 0 < prob < 1, but got {prob}.")
111
+
112
+ if method == "pointwise":
113
+ prob_pointwise = prob
114
+ elif method == "simulated":
115
+ prob_pointwise = _simulate_simultaneous_ecdf_band_probability(
116
+ ndraws, eval_points, cdf_at_eval_points, prob=prob, **kwargs
117
+ )
118
+ else:
119
+ raise ValueError(f"Unknown method {method}. Valid options are 'pointwise' or 'simulated'.")
120
+
121
+ prob_lower, prob_upper = _get_pointwise_confidence_band(
122
+ prob_pointwise, ndraws, cdf_at_eval_points
123
+ )
124
+
125
+ return prob_lower, prob_upper
126
+
127
+
128
+ def _simulate_simultaneous_ecdf_band_probability(
129
+ ndraws: int,
130
+ eval_points: np.ndarray,
131
+ cdf_at_eval_points: np.ndarray,
132
+ prob: float = 0.95,
133
+ rvs: Optional[Callable[[int, Optional[Any]], np.ndarray]] = None,
134
+ num_trials: int = 1000,
135
+ random_state: Optional[Any] = None,
136
+ ) -> float:
137
+ """Estimate probability for simultaneous confidence band using simulation.
138
+
139
+ This function simulates the pointwise probability needed to construct pointwise
140
+ confidence bands that form a `prob`-level confidence envelope for the ECDF
141
+ of a sample.
142
+ """
143
+ if rvs is None:
144
+ warnings.warn(
145
+ "Assuming variable is continuous for calibration of pointwise bands. "
146
+ "If the variable is discrete, specify random variable sampler `rvs`.",
147
+ UserWarning,
148
+ )
149
+ # if variable continuous, we can calibrate the confidence band using a uniform
150
+ # distribution
151
+ rvs = uniform(0, 1).rvs
152
+ eval_points_sim = cdf_at_eval_points
153
+ else:
154
+ eval_points_sim = eval_points
155
+
156
+ probs_pointwise = np.empty(num_trials)
157
+ for i in range(num_trials):
158
+ ecdf_at_eval_points = _simulate_ecdf(
159
+ ndraws, eval_points_sim, rvs, random_state=random_state
160
+ )
161
+ prob_pointwise = _fit_pointwise_band_probability(
162
+ ndraws, ecdf_at_eval_points, cdf_at_eval_points
163
+ )
164
+ probs_pointwise[i] = prob_pointwise
165
+ return np.quantile(probs_pointwise, prob)
arviz/stats/stats.py CHANGED
@@ -146,6 +146,7 @@ def compare(
146
146
  Compare the centered and non centered models of the eight school problem:
147
147
 
148
148
  .. ipython::
149
+ :okwarning:
149
150
 
150
151
  In [1]: import arviz as az
151
152
  ...: data1 = az.load_arviz_data("non_centered_eight")
@@ -157,6 +158,7 @@ def compare(
157
158
  weights using the stacking method.
158
159
 
159
160
  .. ipython::
161
+ :okwarning:
160
162
 
161
163
  In [1]: az.compare(compare_dict, ic="loo", method="stacking", scale="log")
162
164
 
@@ -180,37 +182,19 @@ def compare(
180
182
  except Exception as e:
181
183
  raise e.__class__("Encountered error in ELPD computation of compare.") from e
182
184
  names = list(ics_dict.keys())
183
- if ic == "loo":
185
+ if ic in {"loo", "waic"}:
184
186
  df_comp = pd.DataFrame(
185
- index=names,
186
- columns=[
187
- "rank",
188
- "elpd_loo",
189
- "p_loo",
190
- "elpd_diff",
191
- "weight",
192
- "se",
193
- "dse",
194
- "warning",
195
- "scale",
196
- ],
197
- dtype=np.float_,
198
- )
199
- elif ic == "waic":
200
- df_comp = pd.DataFrame(
201
- index=names,
202
- columns=[
203
- "rank",
204
- "elpd_waic",
205
- "p_waic",
206
- "elpd_diff",
207
- "weight",
208
- "se",
209
- "dse",
210
- "warning",
211
- "scale",
212
- ],
213
- dtype=np.float_,
187
+ {
188
+ "rank": pd.Series(index=names, dtype="int"),
189
+ f"elpd_{ic}": pd.Series(index=names, dtype="float"),
190
+ f"p_{ic}": pd.Series(index=names, dtype="float"),
191
+ "elpd_diff": pd.Series(index=names, dtype="float"),
192
+ "weight": pd.Series(index=names, dtype="float"),
193
+ "se": pd.Series(index=names, dtype="float"),
194
+ "dse": pd.Series(index=names, dtype="float"),
195
+ "warning": pd.Series(index=names, dtype="boolean"),
196
+ "scale": pd.Series(index=names, dtype="str"),
197
+ }
214
198
  )
215
199
  else:
216
200
  raise NotImplementedError(f"The information criterion {ic} is not supported.")
@@ -632,7 +616,7 @@ def _hdi(ary, hdi_prob, circular, skipna):
632
616
  ary = np.sort(ary)
633
617
  interval_idx_inc = int(np.floor(hdi_prob * n))
634
618
  n_intervals = n - interval_idx_inc
635
- interval_width = np.subtract(ary[interval_idx_inc:], ary[:n_intervals], dtype=np.float_)
619
+ interval_width = np.subtract(ary[interval_idx_inc:], ary[:n_intervals], dtype=np.float64)
636
620
 
637
621
  if len(interval_width) == 0:
638
622
  raise ValueError("Too few elements for interval calculation. ")
@@ -2096,7 +2080,7 @@ def weight_predictions(idatas, weights=None):
2096
2080
  weights /= weights.sum()
2097
2081
 
2098
2082
  len_idatas = [
2099
- idata.posterior_predictive.dims["chain"] * idata.posterior_predictive.dims["draw"]
2083
+ idata.posterior_predictive.sizes["chain"] * idata.posterior_predictive.sizes["draw"]
2100
2084
  for idata in idatas
2101
2085
  ]
2102
2086
 
@@ -484,7 +484,7 @@ class ELPDData(pd.Series): # pylint: disable=too-many-ancestors
484
484
  base += "\n\nThere has been a warning during the calculation. Please check the results."
485
485
 
486
486
  if kind == "loo" and "pareto_k" in self:
487
- bins = np.asarray([-np.Inf, 0.5, 0.7, 1, np.Inf])
487
+ bins = np.asarray([-np.inf, 0.5, 0.7, 1, np.inf])
488
488
  counts, *_ = _histogram(self.pareto_k.values, bins)
489
489
  extended = POINTWISE_LOO_FMT.format(max(4, len(str(np.max(counts)))))
490
490
  extended = extended.format(
@@ -1241,7 +1241,7 @@ class TestDataDict:
1241
1241
  self.check_var_names_coords_dims(inference_data.prior_predictive)
1242
1242
  self.check_var_names_coords_dims(inference_data.sample_stats_prior)
1243
1243
 
1244
- pred_dims = inference_data.predictions.dims["school_pred"]
1244
+ pred_dims = inference_data.predictions.sizes["school_pred"]
1245
1245
  assert pred_dims == 8
1246
1246
 
1247
1247
  def test_inference_data_warmup(self, data, eight_schools_params):
@@ -1586,8 +1586,8 @@ class TestExtractDataset:
1586
1586
  idata = load_arviz_data("centered_eight")
1587
1587
  post = extract(idata, combined=False)
1588
1588
  assert "sample" not in post.dims
1589
- assert post.dims["chain"] == 4
1590
- assert post.dims["draw"] == 500
1589
+ assert post.sizes["chain"] == 4
1590
+ assert post.sizes["draw"] == 500
1591
1591
 
1592
1592
  def test_var_name_group(self):
1593
1593
  idata = load_arviz_data("centered_eight")
@@ -1607,5 +1607,5 @@ class TestExtractDataset:
1607
1607
  def test_subset_samples(self):
1608
1608
  idata = load_arviz_data("centered_eight")
1609
1609
  post = extract(idata, num_samples=10)
1610
- assert post.dims["sample"] == 10
1610
+ assert post.sizes["sample"] == 10
1611
1611
  assert post.attrs == idata.posterior.attrs
@@ -0,0 +1,153 @@
1
+ import pytest
2
+
3
+ import numpy as np
4
+ import scipy.stats
5
+ from ...stats.ecdf_utils import (
6
+ compute_ecdf,
7
+ ecdf_confidence_band,
8
+ _get_ecdf_points,
9
+ _simulate_ecdf,
10
+ _get_pointwise_confidence_band,
11
+ )
12
+
13
+
14
+ def test_compute_ecdf():
15
+ """Test compute_ecdf function."""
16
+ sample = np.array([1, 2, 3, 3, 4, 5])
17
+ eval_points = np.arange(0, 7, 0.1)
18
+ ecdf_expected = (sample[:, None] <= eval_points).mean(axis=0)
19
+ assert np.allclose(compute_ecdf(sample, eval_points), ecdf_expected)
20
+ assert np.allclose(compute_ecdf(sample / 2 + 10, eval_points / 2 + 10), ecdf_expected)
21
+
22
+
23
+ @pytest.mark.parametrize("difference", [True, False])
24
+ def test_get_ecdf_points(difference):
25
+ """Test _get_ecdf_points."""
26
+ # if first point already outside support, no need to insert it
27
+ sample = np.array([1, 2, 3, 3, 4, 5, 5])
28
+ eval_points = np.arange(-1, 7, 0.1)
29
+ x, y = _get_ecdf_points(sample, eval_points, difference)
30
+ assert np.array_equal(x, eval_points)
31
+ assert np.array_equal(y, compute_ecdf(sample, eval_points))
32
+
33
+ # if first point is inside support, insert it if not in difference mode
34
+ eval_points = np.arange(1, 6, 0.1)
35
+ x, y = _get_ecdf_points(sample, eval_points, difference)
36
+ assert len(x) == len(eval_points) + 1 - difference
37
+ assert len(y) == len(eval_points) + 1 - difference
38
+
39
+ # if not in difference mode, first point should be (eval_points[0], 0)
40
+ if not difference:
41
+ assert x[0] == eval_points[0]
42
+ assert y[0] == 0
43
+ assert np.allclose(x[1:], eval_points)
44
+ assert np.allclose(y[1:], compute_ecdf(sample, eval_points))
45
+ assert x[-1] == eval_points[-1]
46
+ assert y[-1] == 1
47
+
48
+
49
+ @pytest.mark.parametrize(
50
+ "dist", [scipy.stats.norm(3, 10), scipy.stats.binom(10, 0.5)], ids=["continuous", "discrete"]
51
+ )
52
+ @pytest.mark.parametrize("seed", [32, 87])
53
+ def test_simulate_ecdf(dist, seed):
54
+ """Test _simulate_ecdf."""
55
+ ndraws = 1000
56
+ eval_points = np.arange(0, 1, 0.1)
57
+
58
+ rvs = dist.rvs
59
+
60
+ random_state = np.random.default_rng(seed)
61
+ ecdf = _simulate_ecdf(ndraws, eval_points, rvs, random_state=random_state)
62
+ random_state = np.random.default_rng(seed)
63
+ ecdf_expected = compute_ecdf(np.sort(rvs(ndraws, random_state=random_state)), eval_points)
64
+
65
+ assert np.allclose(ecdf, ecdf_expected)
66
+
67
+
68
+ @pytest.mark.parametrize("prob", [0.8, 0.9])
69
+ @pytest.mark.parametrize(
70
+ "dist", [scipy.stats.norm(3, 10), scipy.stats.poisson(100)], ids=["continuous", "discrete"]
71
+ )
72
+ @pytest.mark.parametrize("ndraws", [10_000])
73
+ def test_get_pointwise_confidence_band(dist, prob, ndraws, num_trials=1_000, seed=57):
74
+ """Test _get_pointwise_confidence_band."""
75
+ eval_points = np.linspace(*dist.interval(0.99), 10)
76
+ cdf_at_eval_points = dist.cdf(eval_points)
77
+
78
+ ecdf_lower, ecdf_upper = _get_pointwise_confidence_band(prob, ndraws, cdf_at_eval_points)
79
+
80
+ # check basic properties
81
+ assert np.all(ecdf_lower >= 0)
82
+ assert np.all(ecdf_upper <= 1)
83
+ assert np.all(ecdf_lower <= ecdf_upper)
84
+
85
+ # use simulation to estimate lower and upper bounds on pointwise probability
86
+ in_interval = []
87
+ random_state = np.random.default_rng(seed)
88
+ for _ in range(num_trials):
89
+ ecdf = _simulate_ecdf(ndraws, eval_points, dist.rvs, random_state=random_state)
90
+ in_interval.append((ecdf_lower <= ecdf) & (ecdf < ecdf_upper))
91
+ asymptotic_dist = scipy.stats.norm(
92
+ np.mean(in_interval, axis=0), scipy.stats.sem(in_interval, axis=0)
93
+ )
94
+ prob_lower, prob_upper = asymptotic_dist.interval(0.999)
95
+
96
+ # check target probability within all bounds
97
+ assert np.all(prob_lower <= prob)
98
+ assert np.all(prob <= prob_upper)
99
+
100
+
101
+ @pytest.mark.parametrize("prob", [0.8, 0.9])
102
+ @pytest.mark.parametrize(
103
+ "dist, rvs",
104
+ [
105
+ (scipy.stats.norm(3, 10), scipy.stats.norm(3, 10).rvs),
106
+ (scipy.stats.norm(3, 10), None),
107
+ (scipy.stats.poisson(100), scipy.stats.poisson(100).rvs),
108
+ ],
109
+ ids=["continuous", "continuous default rvs", "discrete"],
110
+ )
111
+ @pytest.mark.parametrize("ndraws", [10_000])
112
+ @pytest.mark.parametrize("method", ["pointwise", "simulated"])
113
+ def test_ecdf_confidence_band(dist, rvs, prob, ndraws, method, num_trials=1_000, seed=57):
114
+ """Test test_ecdf_confidence_band."""
115
+ eval_points = np.linspace(*dist.interval(0.99), 10)
116
+ cdf_at_eval_points = dist.cdf(eval_points)
117
+ random_state = np.random.default_rng(seed)
118
+
119
+ ecdf_lower, ecdf_upper = ecdf_confidence_band(
120
+ ndraws,
121
+ eval_points,
122
+ cdf_at_eval_points,
123
+ prob=prob,
124
+ rvs=rvs,
125
+ random_state=random_state,
126
+ method=method,
127
+ )
128
+
129
+ if method == "pointwise":
130
+ # these values tested elsewhere, we just make sure they're the same
131
+ ecdf_lower_pointwise, ecdf_upper_pointwise = _get_pointwise_confidence_band(
132
+ prob, ndraws, cdf_at_eval_points
133
+ )
134
+ assert np.array_equal(ecdf_lower, ecdf_lower_pointwise)
135
+ assert np.array_equal(ecdf_upper, ecdf_upper_pointwise)
136
+ return
137
+
138
+ # check basic properties
139
+ assert np.all(ecdf_lower >= 0)
140
+ assert np.all(ecdf_upper <= 1)
141
+ assert np.all(ecdf_lower <= ecdf_upper)
142
+
143
+ # use simulation to estimate lower and upper bounds on simultaneous probability
144
+ in_envelope = []
145
+ random_state = np.random.default_rng(seed)
146
+ for _ in range(num_trials):
147
+ ecdf = _simulate_ecdf(ndraws, eval_points, dist.rvs, random_state=random_state)
148
+ in_envelope.append(np.all(ecdf_lower <= ecdf) & np.all(ecdf < ecdf_upper))
149
+ asymptotic_dist = scipy.stats.norm(np.mean(in_envelope), scipy.stats.sem(in_envelope))
150
+ prob_lower, prob_upper = asymptotic_dist.interval(0.999)
151
+
152
+ # check target probability within bounds
153
+ assert prob_lower <= prob <= prob_upper
@@ -344,9 +344,9 @@ def test_variance_bad_data():
344
344
 
345
345
  def test_histogram():
346
346
  school = load_arviz_data("non_centered_eight").posterior["mu"].values
347
- k_count_az, k_dens_az, _ = histogram(school, bins=np.asarray([-np.Inf, 0.5, 0.7, 1, np.Inf]))
348
- k_dens_np, *_ = np.histogram(school, bins=[-np.Inf, 0.5, 0.7, 1, np.Inf], density=True)
349
- k_count_np, *_ = np.histogram(school, bins=[-np.Inf, 0.5, 0.7, 1, np.Inf], density=False)
347
+ k_count_az, k_dens_az, _ = histogram(school, bins=np.asarray([-np.inf, 0.5, 0.7, 1, np.inf]))
348
+ k_dens_np, *_ = np.histogram(school, bins=[-np.inf, 0.5, 0.7, 1, np.inf], density=True)
349
+ k_count_np, *_ = np.histogram(school, bins=[-np.inf, 0.5, 0.7, 1, np.inf], density=False)
350
350
  assert np.allclose(k_count_az, k_count_np)
351
351
  assert np.allclose(k_dens_az, k_dens_np)
352
352
 
@@ -101,8 +101,8 @@ class TestDataNumPyro:
101
101
  assert not fails
102
102
 
103
103
  # test dims
104
- dims = inference_data.posterior_predictive.dims["school"]
105
- pred_dims = inference_data.predictions.dims["school_pred"]
104
+ dims = inference_data.posterior_predictive.sizes["school"]
105
+ pred_dims = inference_data.predictions.sizes["school_pred"]
106
106
  assert dims == 8
107
107
  assert pred_dims == 8
108
108
 
@@ -240,7 +240,7 @@ class TestDataNumPyro:
240
240
  def test_inference_data_num_chains(self, predictions_data, chains):
241
241
  predictions = predictions_data
242
242
  inference_data = from_numpyro(predictions=predictions, num_chains=chains)
243
- nchains = inference_data.predictions.dims["chain"]
243
+ nchains = inference_data.predictions.sizes["chain"]
244
244
  assert nchains == chains
245
245
 
246
246
  @pytest.mark.parametrize("nchains", [1, 2])
@@ -83,8 +83,8 @@ class TestDataPyro:
83
83
  assert not fails
84
84
 
85
85
  # test dims
86
- dims = inference_data.posterior_predictive.dims["school"]
87
- pred_dims = inference_data.predictions.dims["school_pred"]
86
+ dims = inference_data.posterior_predictive.sizes["school"]
87
+ pred_dims = inference_data.predictions.sizes["school_pred"]
88
88
  assert dims == 8
89
89
  assert pred_dims == 8
90
90
 
@@ -225,7 +225,7 @@ class TestDataPyro:
225
225
  def test_inference_data_num_chains(self, predictions_data, chains):
226
226
  predictions = predictions_data
227
227
  inference_data = from_pyro(predictions=predictions, num_chains=chains)
228
- nchains = inference_data.predictions.dims["chain"]
228
+ nchains = inference_data.predictions.sizes["chain"]
229
229
  assert nchains == chains
230
230
 
231
231
  @pytest.mark.parametrize("log_likelihood", [True, False])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: arviz
3
- Version: 0.17.0
3
+ Version: 0.17.1
4
4
  Summary: Exploratory analysis of Bayesian models
5
5
  Home-page: http://github.com/arviz-devs/arviz
6
6
  Author: ArviZ Developers
@@ -15,6 +15,7 @@ Classifier: Programming Language :: Python :: 3
15
15
  Classifier: Programming Language :: Python :: 3.9
16
16
  Classifier: Programming Language :: Python :: 3.10
17
17
  Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
18
19
  Classifier: Topic :: Scientific/Engineering
19
20
  Classifier: Topic :: Scientific/Engineering :: Visualization
20
21
  Classifier: Topic :: Scientific/Engineering :: Mathematics
@@ -52,8 +53,7 @@ Requires-Dist: xarray-datatree ; extra == 'all'
52
53
  [![DOI](http://joss.theoj.org/papers/10.21105/joss.01143/status.svg)](https://doi.org/10.21105/joss.01143) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.2540945.svg)](https://doi.org/10.5281/zenodo.2540945)
53
54
  [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org)
54
55
 
55
- ArviZ (pronounced "AR-_vees_") is a Python package for exploratory analysis of Bayesian models.
56
- Includes functions for posterior analysis, data storage, model checking, comparison and diagnostics.
56
+ ArviZ (pronounced "AR-_vees_") is a Python package for exploratory analysis of Bayesian models. It includes functions for posterior analysis, data storage, model checking, comparison and diagnostics.
57
57
 
58
58
  ### ArviZ in other languages
59
59
  ArviZ also has a Julia wrapper available [ArviZ.jl](https://julia.arviz.org/).
@@ -202,6 +202,7 @@ python setup.py install
202
202
 
203
203
  <a href="https://python.arviz.org/en/latest/examples/index.html">And more...</a>
204
204
  </div>
205
+
205
206
  ## Dependencies
206
207
 
207
208
  ArviZ is tested on Python 3.10, 3.11 and 3.12, and depends on NumPy, SciPy, xarray, and Matplotlib.