nextmv 0.40.0__py3-none-any.whl → 1.0.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nextmv/__about__.py +1 -1
- nextmv/__init__.py +2 -0
- nextmv/cli/CONTRIBUTING.md +511 -0
- nextmv/cli/cloud/__init__.py +45 -0
- nextmv/cli/cloud/acceptance/__init__.py +27 -0
- nextmv/cli/cloud/acceptance/create.py +393 -0
- nextmv/cli/cloud/acceptance/delete.py +68 -0
- nextmv/cli/cloud/acceptance/get.py +104 -0
- nextmv/cli/cloud/acceptance/list.py +62 -0
- nextmv/cli/cloud/acceptance/update.py +95 -0
- nextmv/cli/cloud/account/__init__.py +28 -0
- nextmv/cli/cloud/account/create.py +83 -0
- nextmv/cli/cloud/account/delete.py +60 -0
- nextmv/cli/cloud/account/get.py +66 -0
- nextmv/cli/cloud/account/update.py +70 -0
- nextmv/cli/cloud/app/__init__.py +35 -0
- nextmv/cli/cloud/app/create.py +141 -0
- nextmv/cli/cloud/app/delete.py +58 -0
- nextmv/cli/cloud/app/exists.py +44 -0
- nextmv/cli/cloud/app/get.py +66 -0
- nextmv/cli/cloud/app/list.py +61 -0
- nextmv/cli/cloud/app/push.py +137 -0
- nextmv/cli/cloud/app/update.py +124 -0
- nextmv/cli/cloud/batch/__init__.py +29 -0
- nextmv/cli/cloud/batch/create.py +454 -0
- nextmv/cli/cloud/batch/delete.py +68 -0
- nextmv/cli/cloud/batch/get.py +104 -0
- nextmv/cli/cloud/batch/list.py +63 -0
- nextmv/cli/cloud/batch/metadata.py +66 -0
- nextmv/cli/cloud/batch/update.py +95 -0
- nextmv/cli/cloud/data/__init__.py +26 -0
- nextmv/cli/cloud/data/upload.py +162 -0
- nextmv/cli/cloud/ensemble/__init__.py +31 -0
- nextmv/cli/cloud/ensemble/create.py +414 -0
- nextmv/cli/cloud/ensemble/delete.py +67 -0
- nextmv/cli/cloud/ensemble/get.py +65 -0
- nextmv/cli/cloud/ensemble/update.py +103 -0
- nextmv/cli/cloud/input_set/__init__.py +30 -0
- nextmv/cli/cloud/input_set/create.py +168 -0
- nextmv/cli/cloud/input_set/get.py +63 -0
- nextmv/cli/cloud/input_set/list.py +63 -0
- nextmv/cli/cloud/input_set/update.py +123 -0
- nextmv/cli/cloud/instance/__init__.py +35 -0
- nextmv/cli/cloud/instance/create.py +290 -0
- nextmv/cli/cloud/instance/delete.py +62 -0
- nextmv/cli/cloud/instance/exists.py +39 -0
- nextmv/cli/cloud/instance/get.py +62 -0
- nextmv/cli/cloud/instance/list.py +60 -0
- nextmv/cli/cloud/instance/update.py +216 -0
- nextmv/cli/cloud/managed_input/__init__.py +31 -0
- nextmv/cli/cloud/managed_input/create.py +146 -0
- nextmv/cli/cloud/managed_input/delete.py +65 -0
- nextmv/cli/cloud/managed_input/get.py +63 -0
- nextmv/cli/cloud/managed_input/list.py +60 -0
- nextmv/cli/cloud/managed_input/update.py +97 -0
- nextmv/cli/cloud/run/__init__.py +37 -0
- nextmv/cli/cloud/run/cancel.py +37 -0
- nextmv/cli/cloud/run/create.py +530 -0
- nextmv/cli/cloud/run/get.py +199 -0
- nextmv/cli/cloud/run/input.py +86 -0
- nextmv/cli/cloud/run/list.py +80 -0
- nextmv/cli/cloud/run/logs.py +167 -0
- nextmv/cli/cloud/run/metadata.py +67 -0
- nextmv/cli/cloud/run/track.py +501 -0
- nextmv/cli/cloud/scenario/__init__.py +29 -0
- nextmv/cli/cloud/scenario/create.py +451 -0
- nextmv/cli/cloud/scenario/delete.py +65 -0
- nextmv/cli/cloud/scenario/get.py +102 -0
- nextmv/cli/cloud/scenario/list.py +63 -0
- nextmv/cli/cloud/scenario/metadata.py +67 -0
- nextmv/cli/cloud/scenario/update.py +93 -0
- nextmv/cli/cloud/secrets/__init__.py +33 -0
- nextmv/cli/cloud/secrets/create.py +206 -0
- nextmv/cli/cloud/secrets/delete.py +67 -0
- nextmv/cli/cloud/secrets/get.py +66 -0
- nextmv/cli/cloud/secrets/list.py +60 -0
- nextmv/cli/cloud/secrets/update.py +147 -0
- nextmv/cli/cloud/upload/__init__.py +22 -0
- nextmv/cli/cloud/upload/create.py +39 -0
- nextmv/cli/cloud/version/__init__.py +33 -0
- nextmv/cli/cloud/version/create.py +97 -0
- nextmv/cli/cloud/version/delete.py +62 -0
- nextmv/cli/cloud/version/exists.py +39 -0
- nextmv/cli/cloud/version/get.py +62 -0
- nextmv/cli/cloud/version/list.py +60 -0
- nextmv/cli/cloud/version/update.py +92 -0
- nextmv/cli/community/__init__.py +24 -0
- nextmv/cli/community/clone.py +3 -3
- nextmv/cli/community/list.py +1 -1
- nextmv/cli/configuration/__init__.py +23 -0
- nextmv/cli/configuration/config.py +68 -4
- nextmv/cli/configuration/create.py +14 -15
- nextmv/cli/configuration/delete.py +24 -12
- nextmv/cli/configuration/list.py +1 -1
- nextmv/cli/main.py +58 -16
- nextmv/cli/message.py +153 -0
- nextmv/cli/options.py +168 -0
- nextmv/cli/version.py +20 -1
- nextmv/cloud/__init__.py +4 -1
- nextmv/cloud/acceptance_test.py +19 -18
- nextmv/cloud/account.py +268 -24
- nextmv/cloud/application/__init__.py +955 -0
- nextmv/cloud/application/_acceptance.py +419 -0
- nextmv/cloud/application/_batch_scenario.py +860 -0
- nextmv/cloud/application/_ensemble.py +251 -0
- nextmv/cloud/application/_input_set.py +227 -0
- nextmv/cloud/application/_instance.py +289 -0
- nextmv/cloud/application/_managed_input.py +227 -0
- nextmv/cloud/application/_run.py +1393 -0
- nextmv/cloud/application/_secrets.py +294 -0
- nextmv/cloud/application/_utils.py +54 -0
- nextmv/cloud/application/_version.py +303 -0
- nextmv/cloud/batch_experiment.py +3 -1
- nextmv/cloud/instance.py +11 -1
- nextmv/cloud/integration.py +1 -1
- nextmv/cloud/package.py +50 -9
- nextmv/input.py +20 -36
- nextmv/local/application.py +3 -15
- nextmv/polling.py +54 -16
- nextmv/run.py +83 -27
- {nextmv-0.40.0.dist-info → nextmv-1.0.0.dev0.dist-info}/METADATA +33 -8
- nextmv-1.0.0.dev0.dist-info/RECORD +158 -0
- nextmv/cli/community/community.py +0 -24
- nextmv/cli/configuration/configuration.py +0 -23
- nextmv/cli/error.py +0 -22
- nextmv/cloud/application.py +0 -4204
- nextmv-0.40.0.dist-info/RECORD +0 -66
- {nextmv-0.40.0.dist-info → nextmv-1.0.0.dev0.dist-info}/WHEEL +0 -0
- {nextmv-0.40.0.dist-info → nextmv-1.0.0.dev0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,393 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module defines the cloud acceptance create command for the Nextmv CLI.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Annotated
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
|
|
10
|
+
from nextmv.cli.configuration.config import build_app
|
|
11
|
+
from nextmv.cli.message import enum_values, error, in_progress, print_json, success
|
|
12
|
+
from nextmv.cli.options import AppIDOption, ProfileOption
|
|
13
|
+
from nextmv.cloud.acceptance_test import Comparison, Metric, MetricToleranceType, MetricType, StatisticType
|
|
14
|
+
from nextmv.polling import default_polling_options
|
|
15
|
+
|
|
16
|
+
# Set up subcommand application.
|
|
17
|
+
app = typer.Typer()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@app.command(
|
|
21
|
+
# AVOID USING THE HELP PARAMETER WITH TYPER COMMAND DECORATOR. For
|
|
22
|
+
# consistency, commands should be documented using docstrings. We were
|
|
23
|
+
# forced to use help here to work around f-string limitations in
|
|
24
|
+
# docstrings.
|
|
25
|
+
help=f"""
|
|
26
|
+
Create a new Nextmv Cloud acceptance test.
|
|
27
|
+
|
|
28
|
+
The acceptance test is based on a batch experiment. If the batch experiment
|
|
29
|
+
with the same ID already exists, it will be reused. Otherwise, you must
|
|
30
|
+
provide the [code]--input-set-id[/code] option to create a new batch
|
|
31
|
+
experiment.
|
|
32
|
+
|
|
33
|
+
Use the [code]--wait[/code] flag to wait for the acceptance test to
|
|
34
|
+
complete, polling for results. Using the [code]--output[/code] flag will
|
|
35
|
+
also activate waiting, and allows you to specify a destination file for the
|
|
36
|
+
results.
|
|
37
|
+
|
|
38
|
+
[bold][underline]Metrics[/underline][/bold]
|
|
39
|
+
|
|
40
|
+
Metrics are provided as [magenta]json[/magenta] objects using the
|
|
41
|
+
[code]--metrics[/code] flag. Each metric defines how to compare the
|
|
42
|
+
candidate and baseline instances.
|
|
43
|
+
|
|
44
|
+
You can provide metrics in three ways:
|
|
45
|
+
- A single metric as a [magenta]json[/magenta] object.
|
|
46
|
+
- Multiple metrics by repeating the [code]--metrics[/code] flag.
|
|
47
|
+
- Multiple metrics as a [magenta]json[/magenta] array in a single [code]--metrics[/code] flag.
|
|
48
|
+
|
|
49
|
+
Each metric must have the following fields:
|
|
50
|
+
- [magenta]field[/magenta]: Field of the metric to measure (e.g., "solution.objective").
|
|
51
|
+
- [magenta]metric_type[/magenta]: Type of metric comparison. Allowed values: {enum_values(MetricType)}.
|
|
52
|
+
- [magenta]params[/magenta]: Parameters of the metric comparison.
|
|
53
|
+
- [magenta]operator[/magenta]: Comparison operator. Allowed values: {enum_values(Comparison)}.
|
|
54
|
+
- [magenta]tolerance[/magenta]: Tolerance for the comparison.
|
|
55
|
+
- [magenta]type[/magenta]: Type of tolerance. Allowed values: {enum_values(MetricToleranceType)}.
|
|
56
|
+
- [magenta]value[/magenta]: Tolerance value (numeric).
|
|
57
|
+
- [magenta]statistic[/magenta]: Statistical method. Allowed values: {enum_values(StatisticType)}.
|
|
58
|
+
|
|
59
|
+
Object format:
|
|
60
|
+
[green]{{
|
|
61
|
+
"field": "field",
|
|
62
|
+
"metric_type": "type",
|
|
63
|
+
"params": {{
|
|
64
|
+
"operator": "op",
|
|
65
|
+
"tolerance": {{
|
|
66
|
+
"type": "tol_type",
|
|
67
|
+
"value": tol_value
|
|
68
|
+
}}
|
|
69
|
+
}},
|
|
70
|
+
"statistic": "statistic"
|
|
71
|
+
}}[/green]
|
|
72
|
+
|
|
73
|
+
[bold][underline]Examples[/underline][/bold]
|
|
74
|
+
|
|
75
|
+
- Create an acceptance test with a single metric.
|
|
76
|
+
$ [green]METRIC='{{
|
|
77
|
+
"field": "solution.objective",
|
|
78
|
+
"metric_type": "direct-comparison",
|
|
79
|
+
"params": {{
|
|
80
|
+
"operator": "lt",
|
|
81
|
+
"tolerance": {{"type": "relative", "value": 0.05}}
|
|
82
|
+
}},
|
|
83
|
+
"statistic": "mean"
|
|
84
|
+
}}'
|
|
85
|
+
nextmv cloud acceptance create --app-id hare-app --acceptance-test-id test-123 \\
|
|
86
|
+
--candidate-instance-id candidate-123 --baseline-instance-id baseline-456 \\
|
|
87
|
+
--metrics "$METRIC" --input-set-id input-set-123[/green]
|
|
88
|
+
|
|
89
|
+
- Create with multiple metrics by repeating the flag.
|
|
90
|
+
$ [green]METRIC1='{{
|
|
91
|
+
"field": "solution.objective",
|
|
92
|
+
"metric_type": "direct-comparison",
|
|
93
|
+
"params": {{
|
|
94
|
+
"operator": "lt",
|
|
95
|
+
"tolerance": {{"type": "relative", "value": 0.05}}
|
|
96
|
+
}},
|
|
97
|
+
"statistic": "mean"
|
|
98
|
+
}}'
|
|
99
|
+
METRIC2='{{
|
|
100
|
+
"field": "statistics.run.duration",
|
|
101
|
+
"metric_type": "direct-comparison",
|
|
102
|
+
"params": {{
|
|
103
|
+
"operator": "le",
|
|
104
|
+
"tolerance": {{"type": "absolute", "value": 1.0}}
|
|
105
|
+
}},
|
|
106
|
+
"statistic": "p95"
|
|
107
|
+
}}'
|
|
108
|
+
nextmv cloud acceptance create --app-id hare-app --acceptance-test-id test-123 \\
|
|
109
|
+
--candidate-instance-id candidate-123 --baseline-instance-id baseline-456 \\
|
|
110
|
+
--metrics "$METRIC1" --metrics "$METRIC2" --input-set-id input-set-123[/green]
|
|
111
|
+
|
|
112
|
+
- Create with multiple metrics in a single [magenta]json[/magenta] array.
|
|
113
|
+
$ [green]METRICS='[
|
|
114
|
+
{{
|
|
115
|
+
"field": "solution.objective",
|
|
116
|
+
"metric_type": "direct-comparison",
|
|
117
|
+
"params": {{
|
|
118
|
+
"operator": "lt",
|
|
119
|
+
"tolerance": {{"type": "relative", "value": 0.05}}
|
|
120
|
+
}},
|
|
121
|
+
"statistic": "mean"
|
|
122
|
+
}},
|
|
123
|
+
{{
|
|
124
|
+
"field": "statistics.run.duration",
|
|
125
|
+
"metric_type": "direct-comparison",
|
|
126
|
+
"params": {{
|
|
127
|
+
"operator": "le",
|
|
128
|
+
"tolerance": {{"type": "absolute", "value": 1.0}}
|
|
129
|
+
}},
|
|
130
|
+
"statistic": "p95"
|
|
131
|
+
}}
|
|
132
|
+
]'
|
|
133
|
+
nextmv cloud acceptance create --app-id hare-app --acceptance-test-id test-123 \\
|
|
134
|
+
--candidate-instance-id candidate-123 --baseline-instance-id baseline-456 \\
|
|
135
|
+
--metrics "$METRICS" --input-set-id input-set-123[/green]
|
|
136
|
+
|
|
137
|
+
- Create an acceptance test and wait for it to complete.
|
|
138
|
+
$ [green]METRIC='{{
|
|
139
|
+
"field": "solution.objective",
|
|
140
|
+
"metric_type": "direct-comparison",
|
|
141
|
+
"params": {{
|
|
142
|
+
"operator": "lt",
|
|
143
|
+
"tolerance": {{"type": "relative", "value": 0.05}}
|
|
144
|
+
}},
|
|
145
|
+
"statistic": "mean"
|
|
146
|
+
}}'
|
|
147
|
+
nextmv cloud acceptance create --app-id hare-app --acceptance-test-id test-123 \\
|
|
148
|
+
--candidate-instance-id candidate-123 --baseline-instance-id baseline-456 \\
|
|
149
|
+
--metrics "$METRIC" --input-set-id input-set-123 --wait[/green]
|
|
150
|
+
|
|
151
|
+
- Create an acceptance test and save the results to a file, waiting for completion.
|
|
152
|
+
$ [green]METRIC='{{
|
|
153
|
+
"field": "solution.objective",
|
|
154
|
+
"metric_type": "direct-comparison",
|
|
155
|
+
"params": {{
|
|
156
|
+
"operator": "lt",
|
|
157
|
+
"tolerance": {{"type": "relative", "value": 0.05}}
|
|
158
|
+
}},
|
|
159
|
+
"statistic": "mean"
|
|
160
|
+
}}'
|
|
161
|
+
nextmv cloud acceptance create --app-id hare-app --acceptance-test-id test-123 \\
|
|
162
|
+
--candidate-instance-id candidate-123 --baseline-instance-id baseline-456 \\
|
|
163
|
+
--metrics "$METRIC" --input-set-id input-set-123 --output results.json[/green]
|
|
164
|
+
"""
|
|
165
|
+
)
|
|
166
|
+
def create(
|
|
167
|
+
app_id: AppIDOption,
|
|
168
|
+
# Options for acceptance test configuration.
|
|
169
|
+
acceptance_test_id: Annotated[
|
|
170
|
+
str,
|
|
171
|
+
typer.Option(
|
|
172
|
+
"--acceptance-test-id",
|
|
173
|
+
"-t",
|
|
174
|
+
help="ID for the acceptance test.",
|
|
175
|
+
envvar="NEXTMV_ACCEPTANCE_TEST_ID",
|
|
176
|
+
metavar="ACCEPTANCE_TEST_ID",
|
|
177
|
+
rich_help_panel="Acceptance test configuration",
|
|
178
|
+
),
|
|
179
|
+
],
|
|
180
|
+
baseline_instance_id: Annotated[
|
|
181
|
+
str,
|
|
182
|
+
typer.Option(
|
|
183
|
+
"--baseline-instance-id",
|
|
184
|
+
"-b",
|
|
185
|
+
help="ID of the baseline instance to compare against.",
|
|
186
|
+
metavar="BASELINE_INSTANCE_ID",
|
|
187
|
+
rich_help_panel="Acceptance test configuration",
|
|
188
|
+
),
|
|
189
|
+
],
|
|
190
|
+
candidate_instance_id: Annotated[
|
|
191
|
+
str,
|
|
192
|
+
typer.Option(
|
|
193
|
+
"--candidate-instance-id",
|
|
194
|
+
"-c",
|
|
195
|
+
help="ID of the candidate instance to test.",
|
|
196
|
+
metavar="CANDIDATE_INSTANCE_ID",
|
|
197
|
+
rich_help_panel="Acceptance test configuration",
|
|
198
|
+
),
|
|
199
|
+
],
|
|
200
|
+
metrics: Annotated[
|
|
201
|
+
list[str],
|
|
202
|
+
typer.Option(
|
|
203
|
+
"--metrics",
|
|
204
|
+
"-m",
|
|
205
|
+
help="Metrics to use for the acceptance test. Data should be valid [magenta]json[/magenta]. "
|
|
206
|
+
"Pass multiple metrics by repeating the flag, or providing a list of objects. "
|
|
207
|
+
"See command help for details on metric formatting.",
|
|
208
|
+
metavar="METRICS",
|
|
209
|
+
rich_help_panel="Acceptance test configuration",
|
|
210
|
+
),
|
|
211
|
+
],
|
|
212
|
+
description: Annotated[
|
|
213
|
+
str | None,
|
|
214
|
+
typer.Option(
|
|
215
|
+
"--description",
|
|
216
|
+
"-d",
|
|
217
|
+
help="Description of the acceptance test.",
|
|
218
|
+
metavar="DESCRIPTION",
|
|
219
|
+
rich_help_panel="Acceptance test configuration",
|
|
220
|
+
),
|
|
221
|
+
] = None,
|
|
222
|
+
input_set_id: Annotated[
|
|
223
|
+
str | None,
|
|
224
|
+
typer.Option(
|
|
225
|
+
"--input-set-id",
|
|
226
|
+
"-i",
|
|
227
|
+
help="ID of the input set to use for the underlying batch experiment. "
|
|
228
|
+
"Required if the batch experiment does not exist yet.",
|
|
229
|
+
metavar="INPUT_SET_ID",
|
|
230
|
+
rich_help_panel="Acceptance test configuration",
|
|
231
|
+
),
|
|
232
|
+
] = None,
|
|
233
|
+
name: Annotated[
|
|
234
|
+
str | None,
|
|
235
|
+
typer.Option(
|
|
236
|
+
"--name",
|
|
237
|
+
"-n",
|
|
238
|
+
help="Name of the acceptance test. If not provided, the ID will be used as the name.",
|
|
239
|
+
metavar="NAME",
|
|
240
|
+
rich_help_panel="Acceptance test configuration",
|
|
241
|
+
),
|
|
242
|
+
] = None,
|
|
243
|
+
# Options for controlling output.
|
|
244
|
+
output: Annotated[
|
|
245
|
+
str | None,
|
|
246
|
+
typer.Option(
|
|
247
|
+
"--output",
|
|
248
|
+
"-o",
|
|
249
|
+
help="Waits for the test to complete and saves the results to this location.",
|
|
250
|
+
metavar="OUTPUT_PATH",
|
|
251
|
+
rich_help_panel="Output control",
|
|
252
|
+
),
|
|
253
|
+
] = None,
|
|
254
|
+
timeout: Annotated[
|
|
255
|
+
int,
|
|
256
|
+
typer.Option(
|
|
257
|
+
help="The maximum time in seconds to wait for results when polling. Poll indefinitely if not set.",
|
|
258
|
+
metavar="TIMEOUT_SECONDS",
|
|
259
|
+
rich_help_panel="Output control",
|
|
260
|
+
),
|
|
261
|
+
] = -1,
|
|
262
|
+
wait: Annotated[
|
|
263
|
+
bool,
|
|
264
|
+
typer.Option(
|
|
265
|
+
"--wait",
|
|
266
|
+
"-w",
|
|
267
|
+
help="Wait for the acceptance test to complete. Results are printed to [magenta]stdout[/magenta]. "
|
|
268
|
+
"Specify output location with [code]--output[/code].",
|
|
269
|
+
rich_help_panel="Output control",
|
|
270
|
+
),
|
|
271
|
+
] = False,
|
|
272
|
+
profile: ProfileOption = None,
|
|
273
|
+
) -> None:
|
|
274
|
+
cloud_app = build_app(app_id=app_id, profile=profile)
|
|
275
|
+
|
|
276
|
+
# Build the metrics list from the CLI options
|
|
277
|
+
metrics_list = build_metrics(metrics)
|
|
278
|
+
|
|
279
|
+
new_test = cloud_app.new_acceptance_test(
|
|
280
|
+
candidate_instance_id=candidate_instance_id,
|
|
281
|
+
baseline_instance_id=baseline_instance_id,
|
|
282
|
+
id=acceptance_test_id,
|
|
283
|
+
metrics=metrics_list,
|
|
284
|
+
name=name,
|
|
285
|
+
input_set_id=input_set_id,
|
|
286
|
+
description=description,
|
|
287
|
+
)
|
|
288
|
+
acceptance_id = new_test.id
|
|
289
|
+
|
|
290
|
+
# If we don't need to poll at all we are done.
|
|
291
|
+
if not wait and (output is None or output == ""):
|
|
292
|
+
print_json({"acceptance_test_id": acceptance_id})
|
|
293
|
+
|
|
294
|
+
return
|
|
295
|
+
|
|
296
|
+
success(f"Acceptance test [magenta]{acceptance_id}[/magenta] created.")
|
|
297
|
+
|
|
298
|
+
# Build the polling options.
|
|
299
|
+
polling_options = default_polling_options()
|
|
300
|
+
polling_options.max_duration = timeout
|
|
301
|
+
|
|
302
|
+
in_progress(msg="Getting acceptance test results...")
|
|
303
|
+
acceptance_test = cloud_app.acceptance_test_with_polling(
|
|
304
|
+
acceptance_test_id=acceptance_id,
|
|
305
|
+
polling_options=polling_options,
|
|
306
|
+
)
|
|
307
|
+
acceptance_test_dict = acceptance_test.to_dict()
|
|
308
|
+
|
|
309
|
+
# Handle output
|
|
310
|
+
if output is not None and output != "":
|
|
311
|
+
with open(output, "w") as f:
|
|
312
|
+
json.dump(acceptance_test_dict, f, indent=2)
|
|
313
|
+
|
|
314
|
+
success(msg=f"Acceptance test results saved to [magenta]{output}[/magenta].")
|
|
315
|
+
|
|
316
|
+
return
|
|
317
|
+
|
|
318
|
+
print_json(acceptance_test_dict)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def build_metrics(metrics: list[str]) -> list[Metric]:
|
|
322
|
+
"""
|
|
323
|
+
Builds the metrics list from the CLI option(s).
|
|
324
|
+
|
|
325
|
+
Parameters
|
|
326
|
+
----------
|
|
327
|
+
metrics : list[str]
|
|
328
|
+
List of metrics provided via the CLI.
|
|
329
|
+
|
|
330
|
+
Returns
|
|
331
|
+
-------
|
|
332
|
+
list[Metric]
|
|
333
|
+
The built metrics list.
|
|
334
|
+
"""
|
|
335
|
+
metrics_list = []
|
|
336
|
+
|
|
337
|
+
for metric_str in metrics:
|
|
338
|
+
try:
|
|
339
|
+
metric_data = json.loads(metric_str)
|
|
340
|
+
|
|
341
|
+
# Handle the case where the value is a list of metrics.
|
|
342
|
+
if isinstance(metric_data, list):
|
|
343
|
+
for ix, item in enumerate(metric_data):
|
|
344
|
+
if (
|
|
345
|
+
item.get("field") is None
|
|
346
|
+
or item.get("metric_type") is None
|
|
347
|
+
or item.get("params") is None
|
|
348
|
+
or item.get("statistic") is None
|
|
349
|
+
):
|
|
350
|
+
error(
|
|
351
|
+
f"Invalid metric format at index [magenta]{ix}[/magenta] in "
|
|
352
|
+
f"[magenta]{metric_str}[/magenta]. Each metric must have "
|
|
353
|
+
"[magenta]field[/magenta], [magenta]metric_type[/magenta], "
|
|
354
|
+
"[magenta]params[/magenta], and [magenta]statistic[/magenta] fields."
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
metric = Metric(**item)
|
|
358
|
+
metrics_list.append(metric)
|
|
359
|
+
|
|
360
|
+
# Handle the case where the value is a single metric.
|
|
361
|
+
elif isinstance(metric_data, dict):
|
|
362
|
+
if (
|
|
363
|
+
metric_data.get("field") is None
|
|
364
|
+
or metric_data.get("metric_type") is None
|
|
365
|
+
or metric_data.get("params") is None
|
|
366
|
+
or metric_data.get("statistic") is None
|
|
367
|
+
):
|
|
368
|
+
error(
|
|
369
|
+
f"Invalid metric format in [magenta]{metric_str}[/magenta]. "
|
|
370
|
+
"Each metric must have [magenta]field[/magenta], [magenta]metric_type[/magenta], "
|
|
371
|
+
"[magenta]params[/magenta], and [magenta]statistic[/magenta] fields."
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
metric = Metric(**metric_data)
|
|
375
|
+
metrics_list.append(metric)
|
|
376
|
+
|
|
377
|
+
else:
|
|
378
|
+
error(
|
|
379
|
+
f"Invalid metric format: [magenta]{metric_str}[/magenta]. "
|
|
380
|
+
"Expected [magenta]json[/magenta] object or array."
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
except (json.JSONDecodeError, KeyError, ValueError) as e:
|
|
384
|
+
error(f"Invalid metric format: [magenta]{metric_str}[/magenta]. Error: {e}")
|
|
385
|
+
|
|
386
|
+
if not metrics_list:
|
|
387
|
+
error(
|
|
388
|
+
"No valid metrics were provided. Please specify at least one metric with "
|
|
389
|
+
"[magenta]field[/magenta], [magenta]metric_type[/magenta], "
|
|
390
|
+
"[magenta]params[/magenta], and [magenta]statistic[/magenta] fields."
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
return metrics_list
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module defines the cloud acceptance delete command for the Nextmv CLI.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Annotated
|
|
6
|
+
|
|
7
|
+
import typer
|
|
8
|
+
from rich.prompt import Confirm
|
|
9
|
+
|
|
10
|
+
from nextmv.cli.configuration.config import build_app
|
|
11
|
+
from nextmv.cli.message import info, success
|
|
12
|
+
from nextmv.cli.options import AcceptanceTestIDOption, AppIDOption, ProfileOption
|
|
13
|
+
|
|
14
|
+
# Set up subcommand application.
|
|
15
|
+
app = typer.Typer()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@app.command()
|
|
19
|
+
def delete(
|
|
20
|
+
app_id: AppIDOption,
|
|
21
|
+
acceptance_test_id: AcceptanceTestIDOption,
|
|
22
|
+
yes: Annotated[
|
|
23
|
+
bool,
|
|
24
|
+
typer.Option(
|
|
25
|
+
"--yes",
|
|
26
|
+
"-y",
|
|
27
|
+
help="Agree to deletion confirmation prompt. Useful for non-interactive sessions.",
|
|
28
|
+
),
|
|
29
|
+
] = False,
|
|
30
|
+
profile: ProfileOption = None,
|
|
31
|
+
) -> None:
|
|
32
|
+
"""
|
|
33
|
+
Deletes a Nextmv Cloud acceptance test.
|
|
34
|
+
|
|
35
|
+
This action is permanent and cannot be undone. The underlying batch experiment
|
|
36
|
+
and associated data will also be deleted. Use the [code]--yes[/code] flag to skip
|
|
37
|
+
the confirmation prompt.
|
|
38
|
+
|
|
39
|
+
[bold][underline]Examples[/underline][/bold]
|
|
40
|
+
|
|
41
|
+
- Delete the acceptance test with the ID [magenta]test-cotton-tail[/magenta] from application
|
|
42
|
+
[magenta]hare-app[/magenta].
|
|
43
|
+
$ [green]nextmv cloud acceptance delete --app-id hare-app --acceptance-test-id test-cotton-tail[/green]
|
|
44
|
+
|
|
45
|
+
- Delete the acceptance test without confirmation prompt.
|
|
46
|
+
$ [green]nextmv cloud acceptance delete --app-id hare-app --acceptance-test-id test-cotton-tail --yes[/green]
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
if not yes:
|
|
50
|
+
confirm = Confirm.ask(
|
|
51
|
+
f"Are you sure you want to delete acceptance test [magenta]{acceptance_test_id}[/magenta] "
|
|
52
|
+
f"from application [magenta]{app_id}[/magenta]? This action cannot be undone.",
|
|
53
|
+
default=False,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
if not confirm:
|
|
57
|
+
info(
|
|
58
|
+
msg=f"Acceptance test [magenta]{acceptance_test_id}[/magenta] will not be deleted.",
|
|
59
|
+
emoji=":bulb:",
|
|
60
|
+
)
|
|
61
|
+
return
|
|
62
|
+
|
|
63
|
+
cloud_app = build_app(app_id=app_id, profile=profile)
|
|
64
|
+
cloud_app.delete_acceptance_test(acceptance_test_id=acceptance_test_id)
|
|
65
|
+
success(
|
|
66
|
+
f"Acceptance test [magenta]{acceptance_test_id}[/magenta] deleted successfully "
|
|
67
|
+
f"from application [magenta]{app_id}[/magenta]."
|
|
68
|
+
)
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module defines the cloud acceptance get command for the Nextmv CLI.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Annotated
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
|
|
10
|
+
from nextmv.cli.configuration.config import build_app
|
|
11
|
+
from nextmv.cli.message import in_progress, print_json, success
|
|
12
|
+
from nextmv.cli.options import AcceptanceTestIDOption, AppIDOption, ProfileOption
|
|
13
|
+
from nextmv.polling import default_polling_options
|
|
14
|
+
|
|
15
|
+
# Set up subcommand application.
|
|
16
|
+
app = typer.Typer()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@app.command()
|
|
20
|
+
def get(
|
|
21
|
+
app_id: AppIDOption,
|
|
22
|
+
acceptance_test_id: AcceptanceTestIDOption,
|
|
23
|
+
output: Annotated[
|
|
24
|
+
str | None,
|
|
25
|
+
typer.Option(
|
|
26
|
+
"--output",
|
|
27
|
+
"-o",
|
|
28
|
+
help="Waits for the acceptance test to complete and saves the results to this location.",
|
|
29
|
+
metavar="OUTPUT_PATH",
|
|
30
|
+
),
|
|
31
|
+
] = None,
|
|
32
|
+
timeout: Annotated[
|
|
33
|
+
int,
|
|
34
|
+
typer.Option(
|
|
35
|
+
help="The maximum time in seconds to wait for results when polling. Poll indefinitely if not set.",
|
|
36
|
+
metavar="TIMEOUT_SECONDS",
|
|
37
|
+
),
|
|
38
|
+
] = -1,
|
|
39
|
+
wait: Annotated[
|
|
40
|
+
bool,
|
|
41
|
+
typer.Option(
|
|
42
|
+
"--wait",
|
|
43
|
+
"-w",
|
|
44
|
+
help="Wait for the acceptance test to complete. Results are printed to [magenta]stdout[/magenta]. "
|
|
45
|
+
"Specify output location with [code]--output[/code].",
|
|
46
|
+
),
|
|
47
|
+
] = False,
|
|
48
|
+
profile: ProfileOption = None,
|
|
49
|
+
) -> None:
|
|
50
|
+
"""
|
|
51
|
+
Get a Nextmv Cloud acceptance test.
|
|
52
|
+
|
|
53
|
+
Use the [code]--wait[/code] flag to wait for the acceptance test to
|
|
54
|
+
complete, polling for results. Using the [code]--output[/code] flag will
|
|
55
|
+
also activate waiting, and allows you to specify a destination file for the
|
|
56
|
+
results.
|
|
57
|
+
|
|
58
|
+
[bold][underline]Examples[/underline][/bold]
|
|
59
|
+
|
|
60
|
+
- Get the acceptance test with ID [magenta]test-123[/magenta] from application
|
|
61
|
+
[magenta]hare-app[/magenta].
|
|
62
|
+
$ [green]nextmv cloud acceptance get --app-id hare-app --acceptance-test-id test-123[/green]
|
|
63
|
+
|
|
64
|
+
- Get the acceptance test and wait for it to complete if necessary.
|
|
65
|
+
$ [green]nextmv cloud acceptance get --app-id hare-app --acceptance-test-id test-123 --wait[/green]
|
|
66
|
+
|
|
67
|
+
- Get the acceptance test and save the results to a file.
|
|
68
|
+
$ [green]nextmv cloud acceptance get --app-id hare-app \\
|
|
69
|
+
--acceptance-test-id test-123 --output results.json[/green]
|
|
70
|
+
|
|
71
|
+
- Get the acceptance test using a specific profile.
|
|
72
|
+
$ [green]nextmv cloud acceptance get --app-id hare-app --acceptance-test-id test-123 --profile prod[/green]
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
cloud_app = build_app(app_id=app_id, profile=profile)
|
|
76
|
+
|
|
77
|
+
# Build the polling options.
|
|
78
|
+
polling_options = default_polling_options()
|
|
79
|
+
polling_options.max_duration = timeout
|
|
80
|
+
|
|
81
|
+
# Determine if we should wait
|
|
82
|
+
should_wait = wait or (output is not None and output != "")
|
|
83
|
+
|
|
84
|
+
in_progress(msg="Getting acceptance test...")
|
|
85
|
+
if should_wait:
|
|
86
|
+
acceptance_test = cloud_app.acceptance_test_with_polling(
|
|
87
|
+
acceptance_test_id=acceptance_test_id,
|
|
88
|
+
polling_options=polling_options,
|
|
89
|
+
)
|
|
90
|
+
else:
|
|
91
|
+
acceptance_test = cloud_app.acceptance_test(acceptance_test_id=acceptance_test_id)
|
|
92
|
+
|
|
93
|
+
acceptance_test_dict = acceptance_test.to_dict()
|
|
94
|
+
|
|
95
|
+
# Handle output
|
|
96
|
+
if output is not None and output != "":
|
|
97
|
+
with open(output, "w") as f:
|
|
98
|
+
json.dump(acceptance_test_dict, f, indent=2)
|
|
99
|
+
|
|
100
|
+
success(msg=f"Acceptance test results saved to [magenta]{output}[/magenta].")
|
|
101
|
+
|
|
102
|
+
return
|
|
103
|
+
|
|
104
|
+
print_json(acceptance_test_dict)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module defines the cloud acceptance list command for the Nextmv CLI.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Annotated
|
|
7
|
+
|
|
8
|
+
import typer
|
|
9
|
+
|
|
10
|
+
from nextmv.cli.configuration.config import build_app
|
|
11
|
+
from nextmv.cli.message import in_progress, print_json, success
|
|
12
|
+
from nextmv.cli.options import AppIDOption, ProfileOption
|
|
13
|
+
|
|
14
|
+
# Set up subcommand application.
|
|
15
|
+
app = typer.Typer()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@app.command()
|
|
19
|
+
def list(
|
|
20
|
+
app_id: AppIDOption,
|
|
21
|
+
output: Annotated[
|
|
22
|
+
str | None,
|
|
23
|
+
typer.Option(
|
|
24
|
+
"--output",
|
|
25
|
+
"-o",
|
|
26
|
+
help="Saves the list of acceptance tests to this location.",
|
|
27
|
+
metavar="OUTPUT_PATH",
|
|
28
|
+
),
|
|
29
|
+
] = None,
|
|
30
|
+
profile: ProfileOption = None,
|
|
31
|
+
) -> None:
|
|
32
|
+
"""
|
|
33
|
+
List all Nextmv Cloud acceptance tests for an application.
|
|
34
|
+
|
|
35
|
+
This command retrieves all acceptance tests associated with the specified
|
|
36
|
+
application.
|
|
37
|
+
|
|
38
|
+
[bold][underline]Examples[/underline][/bold]
|
|
39
|
+
|
|
40
|
+
- List all acceptance tests for application [magenta]hare-app[/magenta].
|
|
41
|
+
$ [green]nextmv cloud acceptance list --app-id hare-app[/green]
|
|
42
|
+
|
|
43
|
+
- List all acceptance tests and save to a file.
|
|
44
|
+
$ [green]nextmv cloud acceptance list --app-id hare-app --output tests.json[/green]
|
|
45
|
+
|
|
46
|
+
- List all acceptance tests using a specific profile.
|
|
47
|
+
$ [green]nextmv cloud acceptance list --app-id hare-app --profile prod[/green]
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
cloud_app = build_app(app_id=app_id, profile=profile)
|
|
51
|
+
in_progress(msg="Listing acceptance tests...")
|
|
52
|
+
acceptance_tests = cloud_app.list_acceptance_tests()
|
|
53
|
+
acceptance_tests_dict = [test.to_dict() for test in acceptance_tests]
|
|
54
|
+
|
|
55
|
+
if output is not None and output != "":
|
|
56
|
+
with open(output, "w") as f:
|
|
57
|
+
json.dump(acceptance_tests_dict, f, indent=2)
|
|
58
|
+
|
|
59
|
+
success(msg=f"Acceptance tests list saved to [magenta]{output}[/magenta].")
|
|
60
|
+
return
|
|
61
|
+
|
|
62
|
+
print_json(acceptance_tests_dict)
|