nextmv 0.10.3.dev0__py3-none-any.whl → 0.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nextmv/__about__.py +1 -1
- nextmv/__entrypoint__.py +39 -0
- nextmv/__init__.py +57 -0
- nextmv/_serialization.py +96 -0
- nextmv/base_model.py +79 -9
- nextmv/cloud/__init__.py +71 -10
- nextmv/cloud/acceptance_test.py +888 -17
- nextmv/cloud/account.py +154 -10
- nextmv/cloud/application.py +3644 -437
- nextmv/cloud/batch_experiment.py +292 -33
- nextmv/cloud/client.py +354 -53
- nextmv/cloud/ensemble.py +247 -0
- nextmv/cloud/input_set.py +121 -4
- nextmv/cloud/instance.py +125 -0
- nextmv/cloud/package.py +474 -0
- nextmv/cloud/scenario.py +410 -0
- nextmv/cloud/secrets.py +234 -0
- nextmv/cloud/url.py +73 -0
- nextmv/cloud/version.py +174 -0
- nextmv/default_app/.gitignore +1 -0
- nextmv/default_app/README.md +32 -0
- nextmv/default_app/app.yaml +12 -0
- nextmv/default_app/input.json +5 -0
- nextmv/default_app/main.py +37 -0
- nextmv/default_app/requirements.txt +2 -0
- nextmv/default_app/src/__init__.py +0 -0
- nextmv/default_app/src/main.py +37 -0
- nextmv/default_app/src/visuals.py +36 -0
- nextmv/deprecated.py +47 -0
- nextmv/input.py +883 -78
- nextmv/local/__init__.py +5 -0
- nextmv/local/application.py +1263 -0
- nextmv/local/executor.py +1040 -0
- nextmv/local/geojson_handler.py +323 -0
- nextmv/local/local.py +97 -0
- nextmv/local/plotly_handler.py +61 -0
- nextmv/local/runner.py +274 -0
- nextmv/logger.py +80 -9
- nextmv/manifest.py +1472 -0
- nextmv/model.py +431 -0
- nextmv/options.py +968 -78
- nextmv/output.py +1363 -231
- nextmv/polling.py +287 -0
- nextmv/run.py +1623 -0
- nextmv/safe.py +145 -0
- nextmv/status.py +122 -0
- {nextmv-0.10.3.dev0.dist-info → nextmv-0.35.0.dist-info}/METADATA +51 -288
- nextmv-0.35.0.dist-info/RECORD +50 -0
- {nextmv-0.10.3.dev0.dist-info → nextmv-0.35.0.dist-info}/WHEEL +1 -1
- nextmv/cloud/status.py +0 -29
- nextmv/nextroute/__init__.py +0 -2
- nextmv/nextroute/check/__init__.py +0 -26
- nextmv/nextroute/check/schema.py +0 -141
- nextmv/nextroute/schema/__init__.py +0 -19
- nextmv/nextroute/schema/input.py +0 -52
- nextmv/nextroute/schema/location.py +0 -13
- nextmv/nextroute/schema/output.py +0 -136
- nextmv/nextroute/schema/stop.py +0 -61
- nextmv/nextroute/schema/vehicle.py +0 -68
- nextmv-0.10.3.dev0.dist-info/RECORD +0 -28
- {nextmv-0.10.3.dev0.dist-info → nextmv-0.35.0.dist-info}/licenses/LICENSE +0 -0
nextmv/cloud/acceptance_test.py
CHANGED
|
@@ -1,25 +1,202 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""
|
|
2
|
+
Definitions for acceptance tests in the Nextmv Cloud platform.
|
|
3
|
+
|
|
4
|
+
This module provides classes and enumerations for working with acceptance tests
|
|
5
|
+
in the Nextmv Cloud platform. Acceptance tests are used to compare the performance
|
|
6
|
+
of different versions of an app against a set of metrics.
|
|
7
|
+
|
|
8
|
+
Classes
|
|
9
|
+
-------
|
|
10
|
+
MetricType : Enum
|
|
11
|
+
Type of metric when doing a comparison.
|
|
12
|
+
StatisticType : Enum
|
|
13
|
+
Type of statistical process for collapsing multiple values of a metric.
|
|
14
|
+
Comparison : Enum
|
|
15
|
+
Comparison operators to use for comparing two metrics.
|
|
16
|
+
ToleranceType : Enum
|
|
17
|
+
Type of tolerance used for a metric.
|
|
18
|
+
ExperimentStatus : Enum
|
|
19
|
+
Status of an acceptance test experiment.
|
|
20
|
+
MetricTolerance : BaseModel
|
|
21
|
+
Tolerance used for a metric in an acceptance test.
|
|
22
|
+
MetricParams : BaseModel
|
|
23
|
+
Parameters of a metric comparison in an acceptance test.
|
|
24
|
+
Metric : BaseModel
|
|
25
|
+
A metric used to evaluate the performance of a test.
|
|
26
|
+
ComparisonInstance : BaseModel
|
|
27
|
+
An app instance used for a comparison in an acceptance test.
|
|
28
|
+
DistributionSummaryStatistics : BaseModel
|
|
29
|
+
Statistics of a distribution summary for metric results.
|
|
30
|
+
DistributionPercentiles : BaseModel
|
|
31
|
+
Percentiles of a metric value distribution.
|
|
32
|
+
ResultStatistics : BaseModel
|
|
33
|
+
Statistics of a single instance's metric results.
|
|
34
|
+
MetricStatistics : BaseModel
|
|
35
|
+
Statistics of a metric comparing control and candidate instances.
|
|
36
|
+
MetricResult : BaseModel
|
|
37
|
+
Result of a metric evaluation in an acceptance test.
|
|
38
|
+
AcceptanceTestResults : BaseModel
|
|
39
|
+
Results of an acceptance test.
|
|
40
|
+
AcceptanceTest : BaseModel
|
|
41
|
+
An acceptance test for evaluating app instances.
|
|
42
|
+
"""
|
|
2
43
|
|
|
3
44
|
from datetime import datetime
|
|
4
45
|
from enum import Enum
|
|
5
|
-
from typing import List
|
|
6
46
|
|
|
7
47
|
from nextmv.base_model import BaseModel
|
|
48
|
+
from nextmv.cloud.batch_experiment import ExperimentStatus
|
|
49
|
+
from nextmv.deprecated import deprecated
|
|
8
50
|
|
|
9
51
|
|
|
10
52
|
class MetricType(str, Enum):
|
|
11
|
-
"""
|
|
53
|
+
"""
|
|
54
|
+
Type of metric when doing a comparison.
|
|
55
|
+
|
|
56
|
+
You can import the `MetricType` class directly from `cloud`:
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
from nextmv.cloud import MetricType
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
This enumeration defines the different types of metrics that can be used
|
|
63
|
+
when comparing two runs in an acceptance test.
|
|
64
|
+
|
|
65
|
+
Attributes
|
|
66
|
+
----------
|
|
67
|
+
direct_comparison : str
|
|
68
|
+
Direct comparison between metric values.
|
|
69
|
+
|
|
70
|
+
Examples
|
|
71
|
+
--------
|
|
72
|
+
>>> from nextmv.cloud import MetricType
|
|
73
|
+
>>> metric_type = MetricType.direct_comparison
|
|
74
|
+
>>> metric_type
|
|
75
|
+
<MetricType.direct_comparison: 'direct-comparison'>
|
|
76
|
+
"""
|
|
12
77
|
|
|
13
|
-
absolute_threshold = "absolute-threshold"
|
|
14
|
-
"""Absolute threshold metric type."""
|
|
15
|
-
difference_threshold = "difference-threshold"
|
|
16
|
-
"""Difference threshold metric type."""
|
|
17
78
|
direct_comparison = "direct-comparison"
|
|
18
79
|
"""Direct comparison metric type."""
|
|
19
80
|
|
|
20
81
|
|
|
82
|
+
class StatisticType(str, Enum):
|
|
83
|
+
"""
|
|
84
|
+
Type of statistical process for collapsing multiple values of a metric.
|
|
85
|
+
|
|
86
|
+
You can import the `StatisticType` class directly from `cloud`:
|
|
87
|
+
|
|
88
|
+
```python
|
|
89
|
+
from nextmv.cloud import StatisticType
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
This enumeration defines the different statistical methods that can be used
|
|
93
|
+
to summarize multiple values of a metric from multiple runs into a single
|
|
94
|
+
value.
|
|
95
|
+
|
|
96
|
+
Attributes
|
|
97
|
+
----------
|
|
98
|
+
min : str
|
|
99
|
+
Minimum value.
|
|
100
|
+
max : str
|
|
101
|
+
Maximum value.
|
|
102
|
+
mean : str
|
|
103
|
+
Mean value.
|
|
104
|
+
std : str
|
|
105
|
+
Standard deviation.
|
|
106
|
+
shifted_geometric_mean : str
|
|
107
|
+
Shifted geometric mean.
|
|
108
|
+
p01 : str
|
|
109
|
+
1st percentile.
|
|
110
|
+
p05 : str
|
|
111
|
+
5th percentile.
|
|
112
|
+
p10 : str
|
|
113
|
+
10th percentile.
|
|
114
|
+
p25 : str
|
|
115
|
+
25th percentile.
|
|
116
|
+
p50 : str
|
|
117
|
+
50th percentile (median).
|
|
118
|
+
p75 : str
|
|
119
|
+
75th percentile.
|
|
120
|
+
p90 : str
|
|
121
|
+
90th percentile.
|
|
122
|
+
p95 : str
|
|
123
|
+
95th percentile.
|
|
124
|
+
p99 : str
|
|
125
|
+
99th percentile.
|
|
126
|
+
|
|
127
|
+
Examples
|
|
128
|
+
--------
|
|
129
|
+
>>> from nextmv.cloud import StatisticType
|
|
130
|
+
>>> stat_type = StatisticType.mean
|
|
131
|
+
>>> stat_type
|
|
132
|
+
<StatisticType.mean: 'mean'>
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
min = "min"
|
|
136
|
+
"""Minimum value."""
|
|
137
|
+
max = "max"
|
|
138
|
+
"""Maximum value."""
|
|
139
|
+
mean = "mean"
|
|
140
|
+
"""Mean value."""
|
|
141
|
+
std = "std"
|
|
142
|
+
"""Standard deviation."""
|
|
143
|
+
shifted_geometric_mean = "shifted_geometric_mean"
|
|
144
|
+
"""Shifted geometric mean."""
|
|
145
|
+
p01 = "p01"
|
|
146
|
+
"""1st percentile."""
|
|
147
|
+
p05 = "p05"
|
|
148
|
+
"""5th percentile."""
|
|
149
|
+
p10 = "p10"
|
|
150
|
+
"""10th percentile."""
|
|
151
|
+
p25 = "p25"
|
|
152
|
+
"""25th percentile."""
|
|
153
|
+
p50 = "p50"
|
|
154
|
+
"""50th percentile."""
|
|
155
|
+
p75 = "p75"
|
|
156
|
+
"""75th percentile."""
|
|
157
|
+
p90 = "p90"
|
|
158
|
+
"""90th percentile."""
|
|
159
|
+
p95 = "p95"
|
|
160
|
+
"""95th percentile."""
|
|
161
|
+
p99 = "p99"
|
|
162
|
+
"""99th percentile."""
|
|
163
|
+
|
|
164
|
+
|
|
21
165
|
class Comparison(str, Enum):
|
|
22
|
-
"""
|
|
166
|
+
"""
|
|
167
|
+
Comparison operators to use for comparing two metrics.
|
|
168
|
+
|
|
169
|
+
You can import the `Comparison` class directly from `cloud`:
|
|
170
|
+
|
|
171
|
+
```python
|
|
172
|
+
from nextmv.cloud import Comparison
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
This enumeration defines the different comparison operators that can be used
|
|
176
|
+
to compare two metric values in an acceptance test.
|
|
177
|
+
|
|
178
|
+
Attributes
|
|
179
|
+
----------
|
|
180
|
+
equal_to : str
|
|
181
|
+
Equal to operator (==).
|
|
182
|
+
greater_than : str
|
|
183
|
+
Greater than operator (>).
|
|
184
|
+
greater_than_or_equal_to : str
|
|
185
|
+
Greater than or equal to operator (>=).
|
|
186
|
+
less_than : str
|
|
187
|
+
Less than operator (<).
|
|
188
|
+
less_than_or_equal_to : str
|
|
189
|
+
Less than or equal to operator (<=).
|
|
190
|
+
not_equal_to : str
|
|
191
|
+
Not equal to operator (!=).
|
|
192
|
+
|
|
193
|
+
Examples
|
|
194
|
+
--------
|
|
195
|
+
>>> from nextmv.cloud import Comparison
|
|
196
|
+
>>> op = Comparison.greater_than
|
|
197
|
+
>>> op
|
|
198
|
+
<Comparison.greater_than: 'gt'>
|
|
199
|
+
"""
|
|
23
200
|
|
|
24
201
|
equal_to = "eq"
|
|
25
202
|
"""Equal to metric type."""
|
|
@@ -35,16 +212,226 @@ class Comparison(str, Enum):
|
|
|
35
212
|
"""Not equal to metric type."""
|
|
36
213
|
|
|
37
214
|
|
|
215
|
+
class ToleranceType(str, Enum):
|
|
216
|
+
"""
|
|
217
|
+
!!! warning
|
|
218
|
+
`ToleranceType` is deprecated, use `MetricToleranceType` instead.
|
|
219
|
+
|
|
220
|
+
Type of tolerance used for a metric.
|
|
221
|
+
|
|
222
|
+
You can import the `ToleranceType` class directly from `cloud`:
|
|
223
|
+
|
|
224
|
+
```python
|
|
225
|
+
from nextmv.cloud import ToleranceType
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
This enumeration defines the different types of tolerances that can be used
|
|
229
|
+
when comparing metrics in acceptance tests.
|
|
230
|
+
|
|
231
|
+
Attributes
|
|
232
|
+
----------
|
|
233
|
+
undefined : str
|
|
234
|
+
Undefined tolerance type (empty string).
|
|
235
|
+
absolute : str
|
|
236
|
+
Absolute tolerance type, using a fixed value.
|
|
237
|
+
relative : str
|
|
238
|
+
Relative tolerance type, using a percentage.
|
|
239
|
+
|
|
240
|
+
Examples
|
|
241
|
+
--------
|
|
242
|
+
>>> from nextmv.cloud import ToleranceType
|
|
243
|
+
>>> tol_type = ToleranceType.absolute
|
|
244
|
+
>>> tol_type
|
|
245
|
+
<ToleranceType.absolute: 'absolute'>
|
|
246
|
+
"""
|
|
247
|
+
|
|
248
|
+
undefined = ""
|
|
249
|
+
"""ToleranceType is deprecated, please use MetricToleranceType instead.
|
|
250
|
+
Undefined tolerance type."""
|
|
251
|
+
absolute = "absolute"
|
|
252
|
+
"""ToleranceType is deprecated, please use MetricToleranceType instead.
|
|
253
|
+
Absolute tolerance type."""
|
|
254
|
+
relative = "relative"
|
|
255
|
+
"""ToleranceType is deprecated, please use MetricToleranceType instead.
|
|
256
|
+
Relative tolerance type."""
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
# Override __getattribute__ to emit deprecation warnings when enum values are accessed
|
|
260
|
+
_original_getattribute = ToleranceType.__class__.__getattribute__
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def _deprecated_getattribute(cls, name: str):
|
|
264
|
+
# Only emit deprecation warning if this is specifically the ToleranceType class
|
|
265
|
+
if cls is ToleranceType and name in ("undefined", "absolute", "relative"):
|
|
266
|
+
deprecated(
|
|
267
|
+
f"ToleranceType.{name}",
|
|
268
|
+
"ToleranceType is deprecated and will be removed in a future version. "
|
|
269
|
+
"Please use MetricToleranceType instead",
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
return _original_getattribute(cls, name)
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
ToleranceType.__class__.__getattribute__ = _deprecated_getattribute
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
class MetricToleranceType(str, Enum):
|
|
279
|
+
"""
|
|
280
|
+
Type of tolerance used for a metric.
|
|
281
|
+
|
|
282
|
+
You can import the `MetricToleranceType` class directly from `cloud`:
|
|
283
|
+
|
|
284
|
+
```python
|
|
285
|
+
from nextmv.cloud import MetricToleranceType
|
|
286
|
+
```
|
|
287
|
+
|
|
288
|
+
This enumeration defines the different types of tolerances that can be used
|
|
289
|
+
when comparing metrics in acceptance tests.
|
|
290
|
+
|
|
291
|
+
Attributes
|
|
292
|
+
----------
|
|
293
|
+
undefined : str
|
|
294
|
+
Undefined tolerance type (empty string).
|
|
295
|
+
absolute : str
|
|
296
|
+
Absolute tolerance type, using a fixed value.
|
|
297
|
+
relative : str
|
|
298
|
+
Relative tolerance type, using a percentage.
|
|
299
|
+
|
|
300
|
+
Examples
|
|
301
|
+
--------
|
|
302
|
+
>>> from nextmv.cloud import MetricToleranceType
|
|
303
|
+
>>> tol_type = MetricToleranceType.absolute
|
|
304
|
+
>>> tol_type
|
|
305
|
+
<MetricToleranceType.absolute: 'absolute'>
|
|
306
|
+
"""
|
|
307
|
+
|
|
308
|
+
undefined = ""
|
|
309
|
+
"""Undefined tolerance type."""
|
|
310
|
+
absolute = "absolute"
|
|
311
|
+
"""Absolute tolerance type."""
|
|
312
|
+
relative = "relative"
|
|
313
|
+
"""Relative tolerance type."""
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
class MetricTolerance(BaseModel):
|
|
317
|
+
"""
|
|
318
|
+
Tolerance used for a metric in an acceptance test.
|
|
319
|
+
|
|
320
|
+
You can import the `MetricTolerance` class directly from `cloud`:
|
|
321
|
+
|
|
322
|
+
```python
|
|
323
|
+
from nextmv.cloud import MetricTolerance
|
|
324
|
+
```
|
|
325
|
+
|
|
326
|
+
This class defines the tolerance to be applied when comparing metric values,
|
|
327
|
+
which can be either absolute or relative.
|
|
328
|
+
|
|
329
|
+
Attributes
|
|
330
|
+
----------
|
|
331
|
+
type : MetricToleranceType
|
|
332
|
+
Type of tolerance (absolute or relative).
|
|
333
|
+
value : float
|
|
334
|
+
Value of the tolerance.
|
|
335
|
+
|
|
336
|
+
Examples
|
|
337
|
+
--------
|
|
338
|
+
>>> from nextmv.cloud import MetricTolerance, MetricToleranceType
|
|
339
|
+
>>> tolerance = MetricTolerance(type=MetricToleranceType.absolute, value=0.1)
|
|
340
|
+
>>> tolerance.type
|
|
341
|
+
<MetricToleranceType.absolute: 'absolute'>
|
|
342
|
+
>>> tolerance.value
|
|
343
|
+
0.1
|
|
344
|
+
"""
|
|
345
|
+
|
|
346
|
+
type: MetricToleranceType
|
|
347
|
+
"""Type of tolerance."""
|
|
348
|
+
value: float
|
|
349
|
+
"""Value of the tolerance."""
|
|
350
|
+
|
|
351
|
+
|
|
38
352
|
class MetricParams(BaseModel):
|
|
39
|
-
"""
|
|
353
|
+
"""
|
|
354
|
+
Parameters of a metric comparison in an acceptance test.
|
|
355
|
+
|
|
356
|
+
You can import the `MetricParams` class directly from `cloud`:
|
|
357
|
+
|
|
358
|
+
```python
|
|
359
|
+
from nextmv.cloud import MetricParams
|
|
360
|
+
```
|
|
361
|
+
|
|
362
|
+
This class defines the parameters used for comparing metric values,
|
|
363
|
+
including the comparison operator and tolerance.
|
|
364
|
+
|
|
365
|
+
Attributes
|
|
366
|
+
----------
|
|
367
|
+
operator : Comparison
|
|
368
|
+
Operator used to compare two metrics (e.g., greater than, less than).
|
|
369
|
+
tolerance : MetricTolerance
|
|
370
|
+
Tolerance used for the comparison.
|
|
371
|
+
|
|
372
|
+
Examples
|
|
373
|
+
--------
|
|
374
|
+
>>> from nextmv.cloud import MetricParams, Comparison, MetricTolerance, ToleranceType
|
|
375
|
+
>>> params = MetricParams(
|
|
376
|
+
... operator=Comparison.less_than,
|
|
377
|
+
... tolerance=MetricTolerance(type=ToleranceType.absolute, value=0.5)
|
|
378
|
+
... )
|
|
379
|
+
>>> params.operator
|
|
380
|
+
<Comparison.less_than: 'lt'>
|
|
381
|
+
"""
|
|
40
382
|
|
|
41
383
|
operator: Comparison
|
|
42
384
|
"""Operator used to compare two metrics."""
|
|
385
|
+
tolerance: MetricTolerance
|
|
386
|
+
"""Tolerance used for the comparison."""
|
|
43
387
|
|
|
44
388
|
|
|
45
389
|
class Metric(BaseModel):
|
|
46
|
-
"""
|
|
47
|
-
performance of a test.
|
|
390
|
+
"""
|
|
391
|
+
A metric used to evaluate the performance of a test.
|
|
392
|
+
|
|
393
|
+
You can import the `Metric` class directly from `cloud`:
|
|
394
|
+
|
|
395
|
+
```python
|
|
396
|
+
from nextmv.cloud import Metric
|
|
397
|
+
```
|
|
398
|
+
|
|
399
|
+
A metric is a key performance indicator that is used to evaluate the
|
|
400
|
+
performance of a test. It defines the field to measure, the type of
|
|
401
|
+
comparison, and the statistical method to use.
|
|
402
|
+
|
|
403
|
+
Attributes
|
|
404
|
+
----------
|
|
405
|
+
field : str
|
|
406
|
+
Field of the metric to measure (e.g., "solution.objective").
|
|
407
|
+
metric_type : MetricType
|
|
408
|
+
Type of the metric comparison.
|
|
409
|
+
params : MetricParams
|
|
410
|
+
Parameters of the metric comparison.
|
|
411
|
+
statistic : StatisticType
|
|
412
|
+
Type of statistical process for collapsing multiple values into a single value.
|
|
413
|
+
|
|
414
|
+
Examples
|
|
415
|
+
--------
|
|
416
|
+
>>> from nextmv.cloud import (
|
|
417
|
+
... Metric, MetricType, MetricParams, Comparison,
|
|
418
|
+
... MetricTolerance, ToleranceType, StatisticType
|
|
419
|
+
... )
|
|
420
|
+
>>> metric = Metric(
|
|
421
|
+
... field="solution.objective",
|
|
422
|
+
... metric_type=MetricType.direct_comparison,
|
|
423
|
+
... params=MetricParams(
|
|
424
|
+
... operator=Comparison.less_than,
|
|
425
|
+
... tolerance=MetricTolerance(
|
|
426
|
+
... type=ToleranceType.relative,
|
|
427
|
+
... value=0.05
|
|
428
|
+
... )
|
|
429
|
+
... ),
|
|
430
|
+
... statistic=StatisticType.mean
|
|
431
|
+
... )
|
|
432
|
+
>>> metric.field
|
|
433
|
+
'solution.objective'
|
|
434
|
+
"""
|
|
48
435
|
|
|
49
436
|
field: str
|
|
50
437
|
"""Field of the metric."""
|
|
@@ -52,22 +439,502 @@ class Metric(BaseModel):
|
|
|
52
439
|
"""Type of the metric."""
|
|
53
440
|
params: MetricParams
|
|
54
441
|
"""Parameters of the metric."""
|
|
55
|
-
statistic:
|
|
56
|
-
"""
|
|
442
|
+
statistic: StatisticType
|
|
443
|
+
"""
|
|
444
|
+
Type of statistical process for collapsing multiple values of a metric
|
|
445
|
+
(from multiple runs) into a single value.
|
|
446
|
+
"""
|
|
57
447
|
|
|
58
448
|
|
|
59
449
|
class ComparisonInstance(BaseModel):
|
|
60
|
-
"""
|
|
450
|
+
"""
|
|
451
|
+
An app instance used for a comparison in an acceptance test.
|
|
452
|
+
|
|
453
|
+
You can import the `ComparisonInstance` class directly from `cloud`:
|
|
454
|
+
|
|
455
|
+
```python
|
|
456
|
+
from nextmv.cloud import ComparisonInstance
|
|
457
|
+
```
|
|
458
|
+
|
|
459
|
+
This class represents an app instance used in a comparison,
|
|
460
|
+
identifying both the instance and its version.
|
|
461
|
+
|
|
462
|
+
Attributes
|
|
463
|
+
----------
|
|
464
|
+
instance_id : str
|
|
465
|
+
ID of the instance.
|
|
466
|
+
version_id : str
|
|
467
|
+
ID of the version.
|
|
468
|
+
|
|
469
|
+
Examples
|
|
470
|
+
--------
|
|
471
|
+
>>> from nextmv.cloud import ComparisonInstance
|
|
472
|
+
>>> instance = ComparisonInstance(
|
|
473
|
+
... instance_id="instance-123",
|
|
474
|
+
... version_id="version-456"
|
|
475
|
+
... )
|
|
476
|
+
>>> instance.instance_id
|
|
477
|
+
'instance-123'
|
|
478
|
+
>>> instance.version_id
|
|
479
|
+
'version-456'
|
|
480
|
+
"""
|
|
481
|
+
|
|
482
|
+
instance_id: str
|
|
483
|
+
"""ID of the instance."""
|
|
484
|
+
version_id: str
|
|
485
|
+
"""ID of the version."""
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
class DistributionSummaryStatistics(BaseModel):
|
|
489
|
+
"""
|
|
490
|
+
Statistics of a distribution summary for metric results.
|
|
491
|
+
|
|
492
|
+
You can import the `DistributionSummaryStatistics` class directly from `cloud`:
|
|
493
|
+
|
|
494
|
+
```python
|
|
495
|
+
from nextmv.cloud import DistributionSummaryStatistics
|
|
496
|
+
```
|
|
497
|
+
|
|
498
|
+
This class contains statistical measures summarizing the distribution of
|
|
499
|
+
metric values across multiple runs.
|
|
500
|
+
|
|
501
|
+
Attributes
|
|
502
|
+
----------
|
|
503
|
+
min : float
|
|
504
|
+
Minimum value in the distribution.
|
|
505
|
+
max : float
|
|
506
|
+
Maximum value in the distribution.
|
|
507
|
+
count : int
|
|
508
|
+
Count of runs in the distribution.
|
|
509
|
+
mean : float
|
|
510
|
+
Mean value of the distribution.
|
|
511
|
+
std : float
|
|
512
|
+
Standard deviation of the distribution.
|
|
513
|
+
shifted_geometric_mean : float
|
|
514
|
+
Shifted geometric mean of the distribution.
|
|
515
|
+
shift_parameter : float
|
|
516
|
+
Shift parameter used for the geometric mean calculation.
|
|
517
|
+
|
|
518
|
+
Examples
|
|
519
|
+
--------
|
|
520
|
+
>>> from nextmv.cloud import DistributionSummaryStatistics
|
|
521
|
+
>>> stats = DistributionSummaryStatistics(
|
|
522
|
+
... min=10.0,
|
|
523
|
+
... max=20.0,
|
|
524
|
+
... count=5,
|
|
525
|
+
... mean=15.0,
|
|
526
|
+
... std=4.0,
|
|
527
|
+
... shifted_geometric_mean=14.5,
|
|
528
|
+
... shift_parameter=1.0
|
|
529
|
+
... )
|
|
530
|
+
>>> stats.mean
|
|
531
|
+
15.0
|
|
532
|
+
>>> stats.count
|
|
533
|
+
5
|
|
534
|
+
"""
|
|
535
|
+
|
|
536
|
+
min: float
|
|
537
|
+
"""Minimum value."""
|
|
538
|
+
max: float
|
|
539
|
+
"""Maximum value."""
|
|
540
|
+
count: int
|
|
541
|
+
"""Count of runs."""
|
|
542
|
+
mean: float
|
|
543
|
+
"""Mean value."""
|
|
544
|
+
std: float
|
|
545
|
+
"""Standard deviation."""
|
|
546
|
+
shifted_geometric_mean: float
|
|
547
|
+
"""Shifted geometric mean."""
|
|
548
|
+
shift_parameter: float
|
|
549
|
+
"""Shift parameter of the geometric mean."""
|
|
550
|
+
|
|
551
|
+
|
|
552
|
+
class DistributionPercentiles(BaseModel):
|
|
553
|
+
"""
|
|
554
|
+
Percentiles of a metric value distribution.
|
|
555
|
+
|
|
556
|
+
You can import the `DistributionPercentiles` class directly from `cloud`:
|
|
557
|
+
|
|
558
|
+
```python
|
|
559
|
+
from nextmv.cloud import DistributionPercentiles
|
|
560
|
+
```
|
|
561
|
+
|
|
562
|
+
This class contains the different percentiles of a distribution of metric values
|
|
563
|
+
across multiple runs.
|
|
564
|
+
|
|
565
|
+
Attributes
|
|
566
|
+
----------
|
|
567
|
+
p01 : float
|
|
568
|
+
1st percentile of the distribution.
|
|
569
|
+
p05 : float
|
|
570
|
+
5th percentile of the distribution.
|
|
571
|
+
p10 : float
|
|
572
|
+
10th percentile of the distribution.
|
|
573
|
+
p25 : float
|
|
574
|
+
25th percentile of the distribution.
|
|
575
|
+
p50 : float
|
|
576
|
+
50th percentile of the distribution (median).
|
|
577
|
+
p75 : float
|
|
578
|
+
75th percentile of the distribution.
|
|
579
|
+
p90 : float
|
|
580
|
+
90th percentile of the distribution.
|
|
581
|
+
p95 : float
|
|
582
|
+
95th percentile of the distribution.
|
|
583
|
+
p99 : float
|
|
584
|
+
99th percentile of the distribution.
|
|
585
|
+
|
|
586
|
+
Examples
|
|
587
|
+
--------
|
|
588
|
+
>>> from nextmv.cloud import DistributionPercentiles
|
|
589
|
+
>>> percentiles = DistributionPercentiles(
|
|
590
|
+
... p01=10.0,
|
|
591
|
+
... p05=12.0,
|
|
592
|
+
... p10=13.0,
|
|
593
|
+
... p25=14.0,
|
|
594
|
+
... p50=15.0,
|
|
595
|
+
... p75=16.0,
|
|
596
|
+
... p90=17.0,
|
|
597
|
+
... p95=18.0,
|
|
598
|
+
... p99=19.0
|
|
599
|
+
... )
|
|
600
|
+
>>> percentiles.p50 # median
|
|
601
|
+
15.0
|
|
602
|
+
"""
|
|
603
|
+
|
|
604
|
+
p01: float
|
|
605
|
+
"""1st percentile."""
|
|
606
|
+
p05: float
|
|
607
|
+
"""5th percentile."""
|
|
608
|
+
p10: float
|
|
609
|
+
"""10th percentile."""
|
|
610
|
+
p25: float
|
|
611
|
+
"""25th percentile."""
|
|
612
|
+
p50: float
|
|
613
|
+
"""50th percentile."""
|
|
614
|
+
p75: float
|
|
615
|
+
"""75th percentile."""
|
|
616
|
+
p90: float
|
|
617
|
+
"""90th percentile."""
|
|
618
|
+
p95: float
|
|
619
|
+
"""95th percentile."""
|
|
620
|
+
p99: float
|
|
621
|
+
"""99th percentile."""
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
class ResultStatistics(BaseModel):
|
|
625
|
+
"""
|
|
626
|
+
Statistics of a single instance's metric results.
|
|
627
|
+
|
|
628
|
+
You can import the `ResultStatistics` class directly from `cloud`:
|
|
629
|
+
|
|
630
|
+
```python
|
|
631
|
+
from nextmv.cloud import ResultStatistics
|
|
632
|
+
```
|
|
633
|
+
|
|
634
|
+
This class aggregates the statistical information about the metric results
|
|
635
|
+
for a specific instance in a comparison.
|
|
636
|
+
|
|
637
|
+
Attributes
|
|
638
|
+
----------
|
|
639
|
+
instance_id : str
|
|
640
|
+
ID of the instance.
|
|
641
|
+
version_id : str
|
|
642
|
+
ID of the version.
|
|
643
|
+
number_of_runs_total : int
|
|
644
|
+
Total number of runs included in the statistics.
|
|
645
|
+
distribution_summary_statistics : DistributionSummaryStatistics
|
|
646
|
+
Summary statistics of the metric value distribution.
|
|
647
|
+
distribution_percentiles : DistributionPercentiles
|
|
648
|
+
Percentiles of the metric value distribution.
|
|
649
|
+
|
|
650
|
+
Examples
|
|
651
|
+
--------
|
|
652
|
+
>>> from nextmv.cloud import (
|
|
653
|
+
... ResultStatistics, DistributionSummaryStatistics, DistributionPercentiles
|
|
654
|
+
... )
|
|
655
|
+
>>> result_stats = ResultStatistics(
|
|
656
|
+
... instance_id="instance-123",
|
|
657
|
+
... version_id="version-456",
|
|
658
|
+
... number_of_runs_total=10,
|
|
659
|
+
... distribution_summary_statistics=DistributionSummaryStatistics(
|
|
660
|
+
... min=10.0,
|
|
661
|
+
... max=20.0,
|
|
662
|
+
... count=10,
|
|
663
|
+
... mean=15.0,
|
|
664
|
+
... std=3.0,
|
|
665
|
+
... shifted_geometric_mean=14.5,
|
|
666
|
+
... shift_parameter=1.0
|
|
667
|
+
... ),
|
|
668
|
+
... distribution_percentiles=DistributionPercentiles(
|
|
669
|
+
... p01=10.5,
|
|
670
|
+
... p05=11.0,
|
|
671
|
+
... p10=12.0,
|
|
672
|
+
... p25=13.5,
|
|
673
|
+
... p50=15.0,
|
|
674
|
+
... p75=16.5,
|
|
675
|
+
... p90=18.0,
|
|
676
|
+
... p95=19.0,
|
|
677
|
+
... p99=19.5
|
|
678
|
+
... )
|
|
679
|
+
... )
|
|
680
|
+
>>> result_stats.number_of_runs_total
|
|
681
|
+
10
|
|
682
|
+
"""
|
|
61
683
|
|
|
62
684
|
instance_id: str
|
|
63
685
|
"""ID of the instance."""
|
|
64
686
|
version_id: str
|
|
65
687
|
"""ID of the version."""
|
|
688
|
+
number_of_runs_total: int
|
|
689
|
+
"""Number of runs."""
|
|
690
|
+
distribution_summary_statistics: DistributionSummaryStatistics
|
|
691
|
+
"""Distribution summary statistics."""
|
|
692
|
+
distribution_percentiles: DistributionPercentiles
|
|
693
|
+
"""Distribution percentiles."""
|
|
694
|
+
|
|
695
|
+
|
|
696
|
+
class MetricStatistics(BaseModel):
|
|
697
|
+
"""
|
|
698
|
+
Statistics of a metric comparing control and candidate instances.
|
|
699
|
+
|
|
700
|
+
You can import the `MetricStatistics` class directly from `cloud`:
|
|
701
|
+
|
|
702
|
+
```python
|
|
703
|
+
from nextmv.cloud import MetricStatistics
|
|
704
|
+
```
|
|
705
|
+
|
|
706
|
+
This class holds the statistical information for both the control and candidate
|
|
707
|
+
instances being compared in the acceptance test.
|
|
708
|
+
|
|
709
|
+
Attributes
|
|
710
|
+
----------
|
|
711
|
+
control : ResultStatistics
|
|
712
|
+
Statistics for the control instance.
|
|
713
|
+
candidate : ResultStatistics
|
|
714
|
+
Statistics for the candidate instance.
|
|
715
|
+
|
|
716
|
+
Examples
|
|
717
|
+
--------
|
|
718
|
+
>>> from nextmv.cloud import (
|
|
719
|
+
... MetricStatistics, ResultStatistics,
|
|
720
|
+
... DistributionSummaryStatistics, DistributionPercentiles
|
|
721
|
+
... )
|
|
722
|
+
>>> stats = MetricStatistics(
|
|
723
|
+
... control=ResultStatistics(
|
|
724
|
+
... instance_id="control-instance",
|
|
725
|
+
... version_id="control-version",
|
|
726
|
+
... number_of_runs_total=10,
|
|
727
|
+
... distribution_summary_statistics=DistributionSummaryStatistics(
|
|
728
|
+
... min=10.0, max=20.0, count=10, mean=15.0, std=3.0,
|
|
729
|
+
... shifted_geometric_mean=14.5, shift_parameter=1.0
|
|
730
|
+
... ),
|
|
731
|
+
... distribution_percentiles=DistributionPercentiles(
|
|
732
|
+
... p01=10.5, p05=11.0, p10=12.0, p25=13.5, p50=15.0,
|
|
733
|
+
... p75=16.5, p90=18.0, p95=19.0, p99=19.5
|
|
734
|
+
... )
|
|
735
|
+
... ),
|
|
736
|
+
... candidate=ResultStatistics(
|
|
737
|
+
... instance_id="candidate-instance",
|
|
738
|
+
... version_id="candidate-version",
|
|
739
|
+
... number_of_runs_total=10,
|
|
740
|
+
... distribution_summary_statistics=DistributionSummaryStatistics(
|
|
741
|
+
... min=9.0, max=18.0, count=10, mean=13.0, std=2.5,
|
|
742
|
+
... shifted_geometric_mean=12.8, shift_parameter=1.0
|
|
743
|
+
... ),
|
|
744
|
+
... distribution_percentiles=DistributionPercentiles(
|
|
745
|
+
... p01=9.5, p05=10.0, p10=11.0, p25=12.0, p50=13.0,
|
|
746
|
+
... p75=14.0, p90=15.5, p95=16.5, p99=17.5
|
|
747
|
+
... )
|
|
748
|
+
... )
|
|
749
|
+
... )
|
|
750
|
+
>>> stats.control.mean > stats.candidate.mean
|
|
751
|
+
True
|
|
752
|
+
"""
|
|
753
|
+
|
|
754
|
+
control: ResultStatistics
|
|
755
|
+
"""Control statistics."""
|
|
756
|
+
candidate: ResultStatistics
|
|
757
|
+
"""Candidate statistics."""
|
|
758
|
+
|
|
759
|
+
|
|
760
|
+
class MetricResult(BaseModel):
|
|
761
|
+
"""
|
|
762
|
+
Result of a metric evaluation in an acceptance test.
|
|
763
|
+
|
|
764
|
+
You can import the `MetricResult` class directly from `cloud`:
|
|
765
|
+
|
|
766
|
+
```python
|
|
767
|
+
from nextmv.cloud import MetricResult
|
|
768
|
+
```
|
|
769
|
+
|
|
770
|
+
This class represents the result of evaluating a specific metric in an
|
|
771
|
+
acceptance test, including whether the candidate passed according to this metric.
|
|
772
|
+
|
|
773
|
+
Attributes
|
|
774
|
+
----------
|
|
775
|
+
metric : Metric
|
|
776
|
+
The metric that was evaluated.
|
|
777
|
+
statistics : MetricStatistics
|
|
778
|
+
Statistics comparing control and candidate instances for this metric.
|
|
779
|
+
passed : bool
|
|
780
|
+
Whether the candidate passed for this metric.
|
|
781
|
+
|
|
782
|
+
Examples
|
|
783
|
+
--------
|
|
784
|
+
>>> from nextmv.cloud import (
|
|
785
|
+
... MetricResult, Metric, MetricType, MetricParams, Comparison,
|
|
786
|
+
... MetricTolerance, ToleranceType, StatisticType, MetricStatistics
|
|
787
|
+
... )
|
|
788
|
+
>>> # Assume we have statistics object already created
|
|
789
|
+
>>> result = MetricResult(
|
|
790
|
+
... metric=Metric(
|
|
791
|
+
... field="solution.objective",
|
|
792
|
+
... metric_type=MetricType.direct_comparison,
|
|
793
|
+
... params=MetricParams(
|
|
794
|
+
... operator=Comparison.less_than,
|
|
795
|
+
... tolerance=MetricTolerance(
|
|
796
|
+
... type=ToleranceType.relative,
|
|
797
|
+
... value=0.05
|
|
798
|
+
... )
|
|
799
|
+
... ),
|
|
800
|
+
... statistic=StatisticType.mean
|
|
801
|
+
... ),
|
|
802
|
+
... statistics=statistics, # previously created statistics object
|
|
803
|
+
... passed=True
|
|
804
|
+
... )
|
|
805
|
+
>>> result.passed
|
|
806
|
+
True
|
|
807
|
+
"""
|
|
808
|
+
|
|
809
|
+
metric: Metric
|
|
810
|
+
"""Metric of the result."""
|
|
811
|
+
statistics: MetricStatistics
|
|
812
|
+
"""Statistics of the metric."""
|
|
813
|
+
passed: bool
|
|
814
|
+
"""Whether the candidate passed for the metric (or not)."""
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
class AcceptanceTestResults(BaseModel):
|
|
818
|
+
"""
|
|
819
|
+
Results of an acceptance test.
|
|
820
|
+
|
|
821
|
+
You can import the `AcceptanceTestResults` class directly from `cloud`:
|
|
822
|
+
|
|
823
|
+
```python
|
|
824
|
+
from nextmv.cloud import AcceptanceTestResults
|
|
825
|
+
```
|
|
826
|
+
|
|
827
|
+
This class contains the overall results of an acceptance test, including
|
|
828
|
+
whether the test passed and detailed results for each metric.
|
|
829
|
+
|
|
830
|
+
Attributes
|
|
831
|
+
----------
|
|
832
|
+
passed : bool
|
|
833
|
+
Whether the acceptance test passed overall.
|
|
834
|
+
metric_results : list[MetricResult], optional
|
|
835
|
+
Results for each metric in the test.
|
|
836
|
+
error : str, optional
|
|
837
|
+
Error message if the acceptance test failed.
|
|
838
|
+
|
|
839
|
+
Examples
|
|
840
|
+
--------
|
|
841
|
+
>>> from nextmv.cloud import AcceptanceTestResults
|
|
842
|
+
>>> # Assume metric_results is a list of MetricResult objects
|
|
843
|
+
>>> results = AcceptanceTestResults(
|
|
844
|
+
... passed=True,
|
|
845
|
+
... metric_results=metric_results # previously created list of results
|
|
846
|
+
... )
|
|
847
|
+
>>> results.passed
|
|
848
|
+
True
|
|
849
|
+
>>>
|
|
850
|
+
>>> # Example with error
|
|
851
|
+
>>> error_results = AcceptanceTestResults(
|
|
852
|
+
... passed=False,
|
|
853
|
+
... error="Experiment failed to complete"
|
|
854
|
+
... )
|
|
855
|
+
>>> error_results.passed
|
|
856
|
+
False
|
|
857
|
+
>>> error_results.error
|
|
858
|
+
'Experiment failed to complete'
|
|
859
|
+
"""
|
|
860
|
+
|
|
861
|
+
passed: bool
|
|
862
|
+
"""Whether the acceptance test passed (or not)."""
|
|
863
|
+
metric_results: list[MetricResult] | None = None
|
|
864
|
+
"""Results of the metrics."""
|
|
865
|
+
error: str | None = None
|
|
866
|
+
"""Error message if the acceptance test failed."""
|
|
66
867
|
|
|
67
868
|
|
|
68
869
|
class AcceptanceTest(BaseModel):
|
|
69
|
-
"""
|
|
70
|
-
|
|
870
|
+
"""
|
|
871
|
+
An acceptance test for evaluating app instances.
|
|
872
|
+
|
|
873
|
+
You can import the `AcceptanceTest` class directly from `cloud`:
|
|
874
|
+
|
|
875
|
+
```python
|
|
876
|
+
from nextmv.cloud import AcceptanceTest
|
|
877
|
+
```
|
|
878
|
+
|
|
879
|
+
An acceptance test gives a go/no-go decision criteria for a set of
|
|
880
|
+
metrics. It relies on a batch experiment to compare a candidate app instance
|
|
881
|
+
against a control app instance.
|
|
882
|
+
|
|
883
|
+
Attributes
|
|
884
|
+
----------
|
|
885
|
+
id : str
|
|
886
|
+
ID of the acceptance test.
|
|
887
|
+
name : str
|
|
888
|
+
Name of the acceptance test.
|
|
889
|
+
description : str
|
|
890
|
+
Description of the acceptance test.
|
|
891
|
+
app_id : str
|
|
892
|
+
ID of the app that owns the acceptance test.
|
|
893
|
+
experiment_id : str
|
|
894
|
+
ID of the batch experiment underlying the acceptance test.
|
|
895
|
+
control : ComparisonInstance
|
|
896
|
+
Control instance of the acceptance test.
|
|
897
|
+
candidate : ComparisonInstance
|
|
898
|
+
Candidate instance of the acceptance test.
|
|
899
|
+
metrics : list[Metric]
|
|
900
|
+
Metrics to evaluate in the acceptance test.
|
|
901
|
+
created_at : datetime
|
|
902
|
+
Creation date of the acceptance test.
|
|
903
|
+
updated_at : datetime
|
|
904
|
+
Last update date of the acceptance test.
|
|
905
|
+
status : ExperimentStatus, optional
|
|
906
|
+
Status of the acceptance test.
|
|
907
|
+
results : AcceptanceTestResults, optional
|
|
908
|
+
Results of the acceptance test.
|
|
909
|
+
|
|
910
|
+
Examples
|
|
911
|
+
--------
|
|
912
|
+
>>> from nextmv.cloud import (
|
|
913
|
+
... AcceptanceTest, ComparisonInstance, Metric, ExperimentStatus
|
|
914
|
+
... )
|
|
915
|
+
>>> from datetime import datetime
|
|
916
|
+
>>> test = AcceptanceTest(
|
|
917
|
+
... id="test-123",
|
|
918
|
+
... name="Performance acceptance test",
|
|
919
|
+
... description="Testing performance improvements",
|
|
920
|
+
... app_id="app-456",
|
|
921
|
+
... experiment_id="exp-789",
|
|
922
|
+
... control=ComparisonInstance(
|
|
923
|
+
... instance_id="control-instance",
|
|
924
|
+
... version_id="control-version"
|
|
925
|
+
... ),
|
|
926
|
+
... candidate=ComparisonInstance(
|
|
927
|
+
... instance_id="candidate-instance",
|
|
928
|
+
... version_id="candidate-version"
|
|
929
|
+
... ),
|
|
930
|
+
... metrics=[metric1, metric2], # previously created metrics
|
|
931
|
+
... created_at=datetime.now(),
|
|
932
|
+
... updated_at=datetime.now(),
|
|
933
|
+
... status=ExperimentStatus.started
|
|
934
|
+
... )
|
|
935
|
+
>>> test.status
|
|
936
|
+
<ExperimentStatus.started: 'started'>
|
|
937
|
+
"""
|
|
71
938
|
|
|
72
939
|
id: str
|
|
73
940
|
"""ID of the acceptance test."""
|
|
@@ -83,9 +950,13 @@ class AcceptanceTest(BaseModel):
|
|
|
83
950
|
"""Control instance of the acceptance test."""
|
|
84
951
|
candidate: ComparisonInstance
|
|
85
952
|
"""Candidate instance of the acceptance test."""
|
|
86
|
-
metrics:
|
|
953
|
+
metrics: list[Metric]
|
|
87
954
|
"""Metrics of the acceptance test."""
|
|
88
955
|
created_at: datetime
|
|
89
956
|
"""Creation date of the acceptance test."""
|
|
90
957
|
updated_at: datetime
|
|
91
958
|
"""Last update date of the acceptance test."""
|
|
959
|
+
status: ExperimentStatus | None = ExperimentStatus.UNKNOWN
|
|
960
|
+
"""Status of the acceptance test."""
|
|
961
|
+
results: AcceptanceTestResults | None = None
|
|
962
|
+
"""Results of the acceptance test."""
|