nextmv 0.29.5.dev1__tar.gz → 0.30.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/PKG-INFO +1 -1
- nextmv-0.30.0/nextmv/__about__.py +1 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/__init__.py +5 -1
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/acceptance_test.py +2 -51
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/application.py +412 -60
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/batch_experiment.py +73 -1
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/manifest.py +164 -1
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/app.yaml +9 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/test_manifest.py +47 -0
- nextmv-0.29.5.dev1/nextmv/__about__.py +0 -1
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/.gitignore +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/LICENSE +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/README.md +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/__entrypoint__.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/__init__.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/_serialization.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/base_model.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/account.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/client.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/input_set.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/instance.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/package.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/run.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/safe.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/scenario.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/secrets.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/status.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/cloud/version.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/default_app/README.md +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/default_app/app.yaml +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/default_app/requirements.txt +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/default_app/src/__init__.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/default_app/src/main.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/default_app/src/visuals.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/deprecated.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/input.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/logger.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/model.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/options.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/nextmv/output.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/pyproject.toml +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/__init__.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/__init__.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/test_application.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/test_client.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/test_package.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/test_run.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/test_safe_name_id.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/cloud/test_scenario.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/__init__.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options1.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options2.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options3.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options4.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options5.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options6.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options7.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/scripts/options_deprecated.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_base_model.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_entrypoint/__init__.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_entrypoint/test_entrypoint.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_input.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_inputs/test_data.csv +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_inputs/test_data.json +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_inputs/test_data.txt +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_logger.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_model.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_options.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_output.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_serialization.py +0 -0
- {nextmv-0.29.5.dev1 → nextmv-0.30.0}/tests/test_version.py +0 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "v0.30.0"
|
|
@@ -6,7 +6,6 @@ from .acceptance_test import Comparison as Comparison
|
|
|
6
6
|
from .acceptance_test import ComparisonInstance as ComparisonInstance
|
|
7
7
|
from .acceptance_test import DistributionPercentiles as DistributionPercentiles
|
|
8
8
|
from .acceptance_test import DistributionSummaryStatistics as DistributionSummaryStatistics
|
|
9
|
-
from .acceptance_test import ExperimentStatus as ExperimentStatus
|
|
10
9
|
from .acceptance_test import Metric as Metric
|
|
11
10
|
from .acceptance_test import MetricParams as MetricParams
|
|
12
11
|
from .acceptance_test import MetricResult as MetricResult
|
|
@@ -28,6 +27,7 @@ from .batch_experiment import BatchExperiment as BatchExperiment
|
|
|
28
27
|
from .batch_experiment import BatchExperimentInformation as BatchExperimentInformation
|
|
29
28
|
from .batch_experiment import BatchExperimentMetadata as BatchExperimentMetadata
|
|
30
29
|
from .batch_experiment import BatchExperimentRun as BatchExperimentRun
|
|
30
|
+
from .batch_experiment import ExperimentStatus as ExperimentStatus
|
|
31
31
|
from .client import Client as Client
|
|
32
32
|
from .client import get_size as get_size
|
|
33
33
|
from .input_set import InputSet as InputSet
|
|
@@ -37,6 +37,10 @@ from .instance import InstanceConfiguration as InstanceConfiguration
|
|
|
37
37
|
from .manifest import MANIFEST_FILE_NAME as MANIFEST_FILE_NAME
|
|
38
38
|
from .manifest import Manifest as Manifest
|
|
39
39
|
from .manifest import ManifestBuild as ManifestBuild
|
|
40
|
+
from .manifest import ManifestContent as ManifestContent
|
|
41
|
+
from .manifest import ManifestContentMultiFile as ManifestContentMultiFile
|
|
42
|
+
from .manifest import ManifestContentMultiFileInput as ManifestContentMultiFileInput
|
|
43
|
+
from .manifest import ManifestContentMultiFileOutput as ManifestContentMultiFileOutput
|
|
40
44
|
from .manifest import ManifestOption as ManifestOption
|
|
41
45
|
from .manifest import ManifestPython as ManifestPython
|
|
42
46
|
from .manifest import ManifestPythonModel as ManifestPythonModel
|
|
@@ -46,6 +46,7 @@ from enum import Enum
|
|
|
46
46
|
from typing import Optional
|
|
47
47
|
|
|
48
48
|
from nextmv.base_model import BaseModel
|
|
49
|
+
from nextmv.cloud.batch_experiment import ExperimentStatus
|
|
49
50
|
|
|
50
51
|
|
|
51
52
|
class MetricType(str, Enum):
|
|
@@ -249,56 +250,6 @@ class ToleranceType(str, Enum):
|
|
|
249
250
|
"""Relative tolerance type."""
|
|
250
251
|
|
|
251
252
|
|
|
252
|
-
class ExperimentStatus(str, Enum):
|
|
253
|
-
"""
|
|
254
|
-
Status of an acceptance test experiment.
|
|
255
|
-
|
|
256
|
-
You can import the `ExperimentStatus` class directly from `cloud`:
|
|
257
|
-
|
|
258
|
-
```python
|
|
259
|
-
from nextmv.cloud import ExperimentStatus
|
|
260
|
-
```
|
|
261
|
-
|
|
262
|
-
This enumeration defines the different possible statuses of an experiment
|
|
263
|
-
underlying an acceptance test.
|
|
264
|
-
|
|
265
|
-
Attributes
|
|
266
|
-
----------
|
|
267
|
-
started : str
|
|
268
|
-
The experiment has started.
|
|
269
|
-
completed : str
|
|
270
|
-
The experiment was completed successfully.
|
|
271
|
-
failed : str
|
|
272
|
-
The experiment failed.
|
|
273
|
-
draft : str
|
|
274
|
-
The experiment is a draft.
|
|
275
|
-
canceled : str
|
|
276
|
-
The experiment was canceled.
|
|
277
|
-
unknown : str
|
|
278
|
-
The experiment status is unknown.
|
|
279
|
-
|
|
280
|
-
Examples
|
|
281
|
-
--------
|
|
282
|
-
>>> from nextmv.cloud import ExperimentStatus
|
|
283
|
-
>>> status = ExperimentStatus.completed
|
|
284
|
-
>>> status
|
|
285
|
-
<ExperimentStatus.completed: 'completed'>
|
|
286
|
-
"""
|
|
287
|
-
|
|
288
|
-
started = "started"
|
|
289
|
-
"""The experiment has started."""
|
|
290
|
-
completed = "completed"
|
|
291
|
-
"""The experiment was completed."""
|
|
292
|
-
failed = "failed"
|
|
293
|
-
"""The experiment failed."""
|
|
294
|
-
draft = "draft"
|
|
295
|
-
"""The experiment is a draft."""
|
|
296
|
-
canceled = "canceled"
|
|
297
|
-
"""The experiment was canceled."""
|
|
298
|
-
unknown = "unknown"
|
|
299
|
-
"""The experiment status is unknown."""
|
|
300
|
-
|
|
301
|
-
|
|
302
253
|
class MetricTolerance(BaseModel):
|
|
303
254
|
"""
|
|
304
255
|
Tolerance used for a metric in an acceptance test.
|
|
@@ -942,7 +893,7 @@ class AcceptanceTest(BaseModel):
|
|
|
942
893
|
"""Creation date of the acceptance test."""
|
|
943
894
|
updated_at: datetime
|
|
944
895
|
"""Last update date of the acceptance test."""
|
|
945
|
-
status: Optional[ExperimentStatus] = ExperimentStatus.
|
|
896
|
+
status: Optional[ExperimentStatus] = ExperimentStatus.UNKNOWN
|
|
946
897
|
"""Status of the acceptance test."""
|
|
947
898
|
results: Optional[AcceptanceTestResults] = None
|
|
948
899
|
"""Results of the acceptance test."""
|
|
@@ -40,12 +40,13 @@ import requests
|
|
|
40
40
|
from nextmv._serialization import deflated_serialize_json
|
|
41
41
|
from nextmv.base_model import BaseModel
|
|
42
42
|
from nextmv.cloud import package
|
|
43
|
-
from nextmv.cloud.acceptance_test import AcceptanceTest,
|
|
43
|
+
from nextmv.cloud.acceptance_test import AcceptanceTest, Metric
|
|
44
44
|
from nextmv.cloud.batch_experiment import (
|
|
45
45
|
BatchExperiment,
|
|
46
46
|
BatchExperimentInformation,
|
|
47
47
|
BatchExperimentMetadata,
|
|
48
48
|
BatchExperimentRun,
|
|
49
|
+
ExperimentStatus,
|
|
49
50
|
to_runs,
|
|
50
51
|
)
|
|
51
52
|
from nextmv.cloud.client import Client, get_size
|
|
@@ -506,6 +507,57 @@ class Application:
|
|
|
506
507
|
|
|
507
508
|
return AcceptanceTest.from_dict(response.json())
|
|
508
509
|
|
|
510
|
+
def acceptance_test_with_polling(
|
|
511
|
+
self,
|
|
512
|
+
acceptance_test_id: str,
|
|
513
|
+
polling_options: PollingOptions = _DEFAULT_POLLING_OPTIONS,
|
|
514
|
+
) -> AcceptanceTest:
|
|
515
|
+
"""
|
|
516
|
+
Retrieve details of an acceptance test using polling.
|
|
517
|
+
|
|
518
|
+
Retrieves the result of an acceptance test. This method polls for the
|
|
519
|
+
result until the test finishes executing or the polling strategy is
|
|
520
|
+
exhausted.
|
|
521
|
+
|
|
522
|
+
Parameters
|
|
523
|
+
----------
|
|
524
|
+
acceptance_test_id : str
|
|
525
|
+
ID of the acceptance test to retrieve.
|
|
526
|
+
|
|
527
|
+
Returns
|
|
528
|
+
-------
|
|
529
|
+
AcceptanceTest
|
|
530
|
+
The requested acceptance test details.
|
|
531
|
+
|
|
532
|
+
Raises
|
|
533
|
+
------
|
|
534
|
+
requests.HTTPError
|
|
535
|
+
If the response status code is not 2xx.
|
|
536
|
+
|
|
537
|
+
Examples
|
|
538
|
+
--------
|
|
539
|
+
>>> test = app.acceptance_test_with_polling("test-123")
|
|
540
|
+
>>> print(test.name)
|
|
541
|
+
'My Test'
|
|
542
|
+
"""
|
|
543
|
+
|
|
544
|
+
def polling_func() -> tuple[Any, bool]:
|
|
545
|
+
acceptance_test_result = self.acceptance_test(acceptance_test_id=acceptance_test_id)
|
|
546
|
+
if acceptance_test_result.status in {
|
|
547
|
+
ExperimentStatus.COMPLETED,
|
|
548
|
+
ExperimentStatus.FAILED,
|
|
549
|
+
ExperimentStatus.DRAFT,
|
|
550
|
+
ExperimentStatus.CANCELED,
|
|
551
|
+
ExperimentStatus.DELETE_FAILED,
|
|
552
|
+
}:
|
|
553
|
+
return acceptance_test_result, True
|
|
554
|
+
|
|
555
|
+
return None, False
|
|
556
|
+
|
|
557
|
+
acceptance_test = poll(polling_options=polling_options, polling_func=polling_func)
|
|
558
|
+
|
|
559
|
+
return self.acceptance_test(acceptance_test_id=acceptance_test.id)
|
|
560
|
+
|
|
509
561
|
def batch_experiment(self, batch_id: str) -> BatchExperiment:
|
|
510
562
|
"""
|
|
511
563
|
Get a batch experiment.
|
|
@@ -539,6 +591,90 @@ class Application:
|
|
|
539
591
|
|
|
540
592
|
return BatchExperiment.from_dict(response.json())
|
|
541
593
|
|
|
594
|
+
def batch_experiment_metadata(self, batch_id: str) -> BatchExperimentMetadata:
|
|
595
|
+
"""
|
|
596
|
+
Get metadata for a batch experiment.
|
|
597
|
+
|
|
598
|
+
Parameters
|
|
599
|
+
----------
|
|
600
|
+
batch_id : str
|
|
601
|
+
ID of the batch experiment.
|
|
602
|
+
|
|
603
|
+
Returns
|
|
604
|
+
-------
|
|
605
|
+
BatchExperimentMetadata
|
|
606
|
+
The requested batch experiment metadata.
|
|
607
|
+
|
|
608
|
+
Raises
|
|
609
|
+
------
|
|
610
|
+
requests.HTTPError
|
|
611
|
+
If the response status code is not 2xx.
|
|
612
|
+
|
|
613
|
+
Examples
|
|
614
|
+
--------
|
|
615
|
+
>>> metadata = app.batch_experiment_metadata("batch-123")
|
|
616
|
+
>>> print(metadata.name)
|
|
617
|
+
'My Batch Experiment'
|
|
618
|
+
"""
|
|
619
|
+
|
|
620
|
+
response = self.client.request(
|
|
621
|
+
method="GET",
|
|
622
|
+
endpoint=f"{self.experiments_endpoint}/batch/{batch_id}/metadata",
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
return BatchExperimentMetadata.from_dict(response.json())
|
|
626
|
+
|
|
627
|
+
def batch_experiment_with_polling(
|
|
628
|
+
self,
|
|
629
|
+
batch_id: str,
|
|
630
|
+
polling_options: PollingOptions = _DEFAULT_POLLING_OPTIONS,
|
|
631
|
+
) -> BatchExperiment:
|
|
632
|
+
"""
|
|
633
|
+
Get a batch experiment with polling.
|
|
634
|
+
|
|
635
|
+
Retrieves the result of an experiment. This method polls for the result
|
|
636
|
+
until the experiment finishes executing or the polling strategy is
|
|
637
|
+
exhausted.
|
|
638
|
+
|
|
639
|
+
Parameters
|
|
640
|
+
----------
|
|
641
|
+
batch_id : str
|
|
642
|
+
ID of the batch experiment.
|
|
643
|
+
|
|
644
|
+
Returns
|
|
645
|
+
-------
|
|
646
|
+
BatchExperiment
|
|
647
|
+
The requested batch experiment details.
|
|
648
|
+
|
|
649
|
+
Raises
|
|
650
|
+
------
|
|
651
|
+
requests.HTTPError
|
|
652
|
+
If the response status code is not 2xx.
|
|
653
|
+
|
|
654
|
+
Examples
|
|
655
|
+
--------
|
|
656
|
+
>>> batch_exp = app.batch_experiment_with_polling("batch-123")
|
|
657
|
+
>>> print(batch_exp.name)
|
|
658
|
+
'My Batch Experiment'
|
|
659
|
+
"""
|
|
660
|
+
|
|
661
|
+
def polling_func() -> tuple[Any, bool]:
|
|
662
|
+
batch_metadata = self.batch_experiment_metadata(batch_id=batch_id)
|
|
663
|
+
if batch_metadata.status in {
|
|
664
|
+
ExperimentStatus.COMPLETED,
|
|
665
|
+
ExperimentStatus.FAILED,
|
|
666
|
+
ExperimentStatus.DRAFT,
|
|
667
|
+
ExperimentStatus.CANCELED,
|
|
668
|
+
ExperimentStatus.DELETE_FAILED,
|
|
669
|
+
}:
|
|
670
|
+
return batch_metadata, True
|
|
671
|
+
|
|
672
|
+
return None, False
|
|
673
|
+
|
|
674
|
+
batch_information = poll(polling_options=polling_options, polling_func=polling_func)
|
|
675
|
+
|
|
676
|
+
return self.batch_experiment(batch_id=batch_information.id)
|
|
677
|
+
|
|
542
678
|
def cancel_run(self, run_id: str) -> None:
|
|
543
679
|
"""
|
|
544
680
|
Cancel a run.
|
|
@@ -1130,7 +1266,7 @@ class Application:
|
|
|
1130
1266
|
else:
|
|
1131
1267
|
# Get all input IDs from the input set.
|
|
1132
1268
|
input_set = self.input_set(input_set_id=input_set_id)
|
|
1133
|
-
if
|
|
1269
|
+
if not input_set.input_ids:
|
|
1134
1270
|
raise ValueError(f"input set {input_set_id} does not contain any inputs")
|
|
1135
1271
|
runs = []
|
|
1136
1272
|
for input_id in input_set.input_ids:
|
|
@@ -1248,7 +1384,8 @@ class Application:
|
|
|
1248
1384
|
>>> print(test.status)
|
|
1249
1385
|
'completed'
|
|
1250
1386
|
"""
|
|
1251
|
-
|
|
1387
|
+
|
|
1388
|
+
acceptance_test = self.new_acceptance_test(
|
|
1252
1389
|
candidate_instance_id=candidate_instance_id,
|
|
1253
1390
|
baseline_instance_id=baseline_instance_id,
|
|
1254
1391
|
id=id,
|
|
@@ -1258,20 +1395,10 @@ class Application:
|
|
|
1258
1395
|
description=description,
|
|
1259
1396
|
)
|
|
1260
1397
|
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
ExperimentStatus.failed,
|
|
1266
|
-
ExperimentStatus.canceled,
|
|
1267
|
-
]:
|
|
1268
|
-
return test_information, True
|
|
1269
|
-
|
|
1270
|
-
return None, False
|
|
1271
|
-
|
|
1272
|
-
test_information = poll(polling_options=polling_options, polling_func=polling_func)
|
|
1273
|
-
|
|
1274
|
-
return test_information
|
|
1398
|
+
return self.acceptance_test_with_polling(
|
|
1399
|
+
acceptance_test_id=acceptance_test.id,
|
|
1400
|
+
polling_options=polling_options,
|
|
1401
|
+
)
|
|
1275
1402
|
|
|
1276
1403
|
def new_batch_experiment(
|
|
1277
1404
|
self,
|
|
@@ -1354,6 +1481,76 @@ class Application:
|
|
|
1354
1481
|
|
|
1355
1482
|
return response.json()["id"]
|
|
1356
1483
|
|
|
1484
|
+
def new_batch_experiment_with_result(
|
|
1485
|
+
self,
|
|
1486
|
+
name: str,
|
|
1487
|
+
input_set_id: Optional[str] = None,
|
|
1488
|
+
instance_ids: Optional[list[str]] = None,
|
|
1489
|
+
description: Optional[str] = None,
|
|
1490
|
+
id: Optional[str] = None,
|
|
1491
|
+
option_sets: Optional[dict[str, dict[str, str]]] = None,
|
|
1492
|
+
runs: Optional[list[Union[BatchExperimentRun, dict[str, Any]]]] = None,
|
|
1493
|
+
type: Optional[str] = "batch",
|
|
1494
|
+
polling_options: PollingOptions = _DEFAULT_POLLING_OPTIONS,
|
|
1495
|
+
) -> BatchExperiment:
|
|
1496
|
+
"""
|
|
1497
|
+
Convenience method to create a new batch experiment and poll for the
|
|
1498
|
+
result.
|
|
1499
|
+
|
|
1500
|
+
This method combines the `new_batch_experiment` and
|
|
1501
|
+
`batch_experiment_with_polling` methods, applying polling logic to
|
|
1502
|
+
check when the experiment succeeded.
|
|
1503
|
+
|
|
1504
|
+
Parameters
|
|
1505
|
+
----------
|
|
1506
|
+
name: str
|
|
1507
|
+
Name of the batch experiment.
|
|
1508
|
+
input_set_id: str
|
|
1509
|
+
ID of the input set to use for the batch experiment.
|
|
1510
|
+
instance_ids: list[str]
|
|
1511
|
+
List of instance IDs to use for the batch experiment. This argument
|
|
1512
|
+
is deprecated, use `runs` instead.
|
|
1513
|
+
description: Optional[str]
|
|
1514
|
+
Optional description of the batch experiment.
|
|
1515
|
+
id: Optional[str]
|
|
1516
|
+
ID of the batch experiment. Will be generated if not provided.
|
|
1517
|
+
option_sets: Optional[dict[str, dict[str, str]]]
|
|
1518
|
+
Option sets to use for the batch experiment. This is a dictionary
|
|
1519
|
+
where the keys are option set IDs and the values are dictionaries
|
|
1520
|
+
with the actual options.
|
|
1521
|
+
runs: Optional[list[BatchExperimentRun]]
|
|
1522
|
+
List of runs to use for the batch experiment.
|
|
1523
|
+
type: Optional[str]
|
|
1524
|
+
Type of the batch experiment. This is used to determine the
|
|
1525
|
+
experiment type. The default value is "batch". If you want to
|
|
1526
|
+
create a scenario test, set this to "scenario".
|
|
1527
|
+
polling_options : PollingOptions, default=_DEFAULT_POLLING_OPTIONS
|
|
1528
|
+
Options to use when polling for the batch experiment result.
|
|
1529
|
+
|
|
1530
|
+
Returns
|
|
1531
|
+
-------
|
|
1532
|
+
BatchExperiment
|
|
1533
|
+
The completed batch experiment with results.
|
|
1534
|
+
|
|
1535
|
+
Raises
|
|
1536
|
+
------
|
|
1537
|
+
requests.HTTPError
|
|
1538
|
+
If the response status code is not 2xx.
|
|
1539
|
+
"""
|
|
1540
|
+
|
|
1541
|
+
batch_id = self.new_batch_experiment(
|
|
1542
|
+
name=name,
|
|
1543
|
+
input_set_id=input_set_id,
|
|
1544
|
+
instance_ids=instance_ids,
|
|
1545
|
+
description=description,
|
|
1546
|
+
id=id,
|
|
1547
|
+
option_sets=option_sets,
|
|
1548
|
+
runs=runs,
|
|
1549
|
+
type=type,
|
|
1550
|
+
)
|
|
1551
|
+
|
|
1552
|
+
return self.batch_experiment_with_polling(batch_id=batch_id, polling_options=polling_options)
|
|
1553
|
+
|
|
1357
1554
|
def new_input_set(
|
|
1358
1555
|
self,
|
|
1359
1556
|
id: str,
|
|
@@ -1722,7 +1919,7 @@ class Application:
|
|
|
1722
1919
|
not `JSON`. If the final `options` are not of type `dict[str,str]`.
|
|
1723
1920
|
"""
|
|
1724
1921
|
|
|
1725
|
-
self.
|
|
1922
|
+
self.__validate_input_dir_path_and_configuration(input_dir_path, configuration)
|
|
1726
1923
|
|
|
1727
1924
|
tar_file = ""
|
|
1728
1925
|
if input_dir_path is not None and input_dir_path != "":
|
|
@@ -2085,6 +2282,69 @@ class Application:
|
|
|
2085
2282
|
runs=runs,
|
|
2086
2283
|
)
|
|
2087
2284
|
|
|
2285
|
+
def new_scenario_test_with_result(
|
|
2286
|
+
self,
|
|
2287
|
+
id: str,
|
|
2288
|
+
name: str,
|
|
2289
|
+
scenarios: list[Scenario],
|
|
2290
|
+
description: Optional[str] = None,
|
|
2291
|
+
repetitions: Optional[int] = 0,
|
|
2292
|
+
polling_options: PollingOptions = _DEFAULT_POLLING_OPTIONS,
|
|
2293
|
+
) -> BatchExperiment:
|
|
2294
|
+
"""
|
|
2295
|
+
Convenience method to create a new scenario test and poll for the
|
|
2296
|
+
result.
|
|
2297
|
+
|
|
2298
|
+
This method combines the `new_scenario_test` and
|
|
2299
|
+
`scenario_test_with_polling` methods, applying polling logic to
|
|
2300
|
+
check when the test succeeded.
|
|
2301
|
+
|
|
2302
|
+
The scenario tests uses the batch experiments API under the hood.
|
|
2303
|
+
|
|
2304
|
+
Parameters
|
|
2305
|
+
----------
|
|
2306
|
+
id: str
|
|
2307
|
+
ID of the scenario test.
|
|
2308
|
+
name: str
|
|
2309
|
+
Name of the scenario test.
|
|
2310
|
+
scenarios: list[Scenario]
|
|
2311
|
+
List of scenarios to use for the scenario test. At least one
|
|
2312
|
+
scenario should be provided.
|
|
2313
|
+
description: Optional[str]
|
|
2314
|
+
Optional description of the scenario test.
|
|
2315
|
+
repetitions: Optional[int]
|
|
2316
|
+
Number of repetitions to use for the scenario test. 0
|
|
2317
|
+
repetitions means that the tests will be executed once. 1
|
|
2318
|
+
repetition means that the test will be repeated once, i.e.: it
|
|
2319
|
+
will be executed twice. 2 repetitions equals 3 executions, so on,
|
|
2320
|
+
and so forth.
|
|
2321
|
+
|
|
2322
|
+
Returns
|
|
2323
|
+
-------
|
|
2324
|
+
BatchExperiment
|
|
2325
|
+
The completed scenario test as a BatchExperiment.
|
|
2326
|
+
|
|
2327
|
+
Raises
|
|
2328
|
+
------
|
|
2329
|
+
requests.HTTPError
|
|
2330
|
+
If the response status code is not 2xx.
|
|
2331
|
+
ValueError
|
|
2332
|
+
If no scenarios are provided.
|
|
2333
|
+
"""
|
|
2334
|
+
|
|
2335
|
+
test_id = self.new_scenario_test(
|
|
2336
|
+
id=id,
|
|
2337
|
+
name=name,
|
|
2338
|
+
scenarios=scenarios,
|
|
2339
|
+
description=description,
|
|
2340
|
+
repetitions=repetitions,
|
|
2341
|
+
)
|
|
2342
|
+
|
|
2343
|
+
return self.scenario_test_with_polling(
|
|
2344
|
+
scenario_test_id=test_id,
|
|
2345
|
+
polling_options=polling_options,
|
|
2346
|
+
)
|
|
2347
|
+
|
|
2088
2348
|
def new_secrets_collection(
|
|
2089
2349
|
self,
|
|
2090
2350
|
secrets: list[Secret],
|
|
@@ -2638,9 +2898,9 @@ class Application:
|
|
|
2638
2898
|
"""
|
|
2639
2899
|
Get a scenario test.
|
|
2640
2900
|
|
|
2641
|
-
Retrieves a scenario test by ID. Scenario tests are based on batch
|
|
2642
|
-
so this function returns the corresponding batch
|
|
2643
|
-
the scenario test.
|
|
2901
|
+
Retrieves a scenario test by ID. Scenario tests are based on batch
|
|
2902
|
+
experiments, so this function returns the corresponding batch
|
|
2903
|
+
experiment associated with the scenario test.
|
|
2644
2904
|
|
|
2645
2905
|
Parameters
|
|
2646
2906
|
----------
|
|
@@ -2668,6 +2928,82 @@ class Application:
|
|
|
2668
2928
|
|
|
2669
2929
|
return self.batch_experiment(batch_id=scenario_test_id)
|
|
2670
2930
|
|
|
2931
|
+
def scenario_test_metadata(self, scenario_test_id: str) -> BatchExperimentMetadata:
|
|
2932
|
+
"""
|
|
2933
|
+
Get the metadata for a scenario test, given its ID.
|
|
2934
|
+
|
|
2935
|
+
Scenario tests are based on batch experiments, so this function returns
|
|
2936
|
+
the corresponding batch experiment metadata associated with the
|
|
2937
|
+
scenario test.
|
|
2938
|
+
|
|
2939
|
+
Parameters
|
|
2940
|
+
----------
|
|
2941
|
+
scenario_test_id : str
|
|
2942
|
+
ID of the scenario test to retrieve.
|
|
2943
|
+
|
|
2944
|
+
Returns
|
|
2945
|
+
-------
|
|
2946
|
+
BatchExperimentMetadata
|
|
2947
|
+
The scenario test metadata as a batch experiment.
|
|
2948
|
+
|
|
2949
|
+
Raises
|
|
2950
|
+
------
|
|
2951
|
+
requests.HTTPError
|
|
2952
|
+
If the response status code is not 2xx.
|
|
2953
|
+
|
|
2954
|
+
Examples
|
|
2955
|
+
--------
|
|
2956
|
+
>>> metadata = app.scenario_test_metadata("scenario-123")
|
|
2957
|
+
>>> print(metadata.name)
|
|
2958
|
+
'My Scenario Test'
|
|
2959
|
+
>>> print(metadata.type)
|
|
2960
|
+
'scenario'
|
|
2961
|
+
"""
|
|
2962
|
+
|
|
2963
|
+
return self.batch_experiment_metadata(batch_id=scenario_test_id)
|
|
2964
|
+
|
|
2965
|
+
def scenario_test_with_polling(
|
|
2966
|
+
self,
|
|
2967
|
+
scenario_test_id: str,
|
|
2968
|
+
polling_options: PollingOptions = _DEFAULT_POLLING_OPTIONS,
|
|
2969
|
+
) -> BatchExperiment:
|
|
2970
|
+
"""
|
|
2971
|
+
Get a scenario test with polling.
|
|
2972
|
+
|
|
2973
|
+
Retrieves the result of a scenario test. This method polls for the
|
|
2974
|
+
result until the test finishes executing or the polling strategy is
|
|
2975
|
+
exhausted.
|
|
2976
|
+
|
|
2977
|
+
The scenario tests uses the batch experiments API under the hood.
|
|
2978
|
+
|
|
2979
|
+
Parameters
|
|
2980
|
+
----------
|
|
2981
|
+
scenario_test_id : str
|
|
2982
|
+
ID of the scenario test to retrieve.
|
|
2983
|
+
polling_options : PollingOptions, default=_DEFAULT_POLLING_OPTIONS
|
|
2984
|
+
Options to use when polling for the scenario test result.
|
|
2985
|
+
|
|
2986
|
+
Returns
|
|
2987
|
+
-------
|
|
2988
|
+
BatchExperiment
|
|
2989
|
+
The scenario test details as a batch experiment.
|
|
2990
|
+
|
|
2991
|
+
Raises
|
|
2992
|
+
------
|
|
2993
|
+
requests.HTTPError
|
|
2994
|
+
If the response status code is not 2xx.
|
|
2995
|
+
|
|
2996
|
+
Examples
|
|
2997
|
+
--------
|
|
2998
|
+
>>> test = app.scenario_test_with_polling("scenario-123")
|
|
2999
|
+
>>> print(test.name)
|
|
3000
|
+
'My Scenario Test'
|
|
3001
|
+
>>> print(test.type)
|
|
3002
|
+
'scenario'
|
|
3003
|
+
"""
|
|
3004
|
+
|
|
3005
|
+
return self.batch_experiment_with_polling(batch_id=scenario_test_id, polling_options=polling_options)
|
|
3006
|
+
|
|
2671
3007
|
def track_run(self, tracked_run: TrackedRun, instance_id: Optional[str] = None) -> str:
|
|
2672
3008
|
"""
|
|
2673
3009
|
Track an external run.
|
|
@@ -3393,6 +3729,47 @@ class Application:
|
|
|
3393
3729
|
|
|
3394
3730
|
return result
|
|
3395
3731
|
|
|
3732
|
+
@staticmethod
|
|
3733
|
+
def __convert_manifest_to_payload(manifest: Manifest) -> dict[str, Any]:
|
|
3734
|
+
"""Converts a manifest to a payload dictionary for the API."""
|
|
3735
|
+
|
|
3736
|
+
activation_request = {
|
|
3737
|
+
"requirements": {
|
|
3738
|
+
"executable_type": manifest.type,
|
|
3739
|
+
"runtime": manifest.runtime,
|
|
3740
|
+
},
|
|
3741
|
+
}
|
|
3742
|
+
|
|
3743
|
+
if manifest.configuration is not None and manifest.configuration.content is not None:
|
|
3744
|
+
content = manifest.configuration.content
|
|
3745
|
+
io_config = {
|
|
3746
|
+
"format": content.format,
|
|
3747
|
+
}
|
|
3748
|
+
if content.multi_file is not None:
|
|
3749
|
+
multi_config = io_config["multi_file"] = {}
|
|
3750
|
+
if content.multi_file.input is not None:
|
|
3751
|
+
multi_config["input_path"] = content.multi_file.input.path
|
|
3752
|
+
if content.multi_file.output is not None:
|
|
3753
|
+
output_config = multi_config["output_configuration"] = {}
|
|
3754
|
+
if content.multi_file.output.statistics:
|
|
3755
|
+
output_config["statistics_path"] = content.multi_file.output.statistics
|
|
3756
|
+
if content.multi_file.output.assets:
|
|
3757
|
+
output_config["assets_path"] = content.multi_file.output.assets
|
|
3758
|
+
if content.multi_file.output.solutions:
|
|
3759
|
+
output_config["solutions_path"] = content.multi_file.output.solutions
|
|
3760
|
+
activation_request["requirements"]["io_configuration"] = io_config
|
|
3761
|
+
|
|
3762
|
+
if manifest.configuration is not None and manifest.configuration.options is not None:
|
|
3763
|
+
options = manifest.configuration.options.to_dict()
|
|
3764
|
+
if "format" in options and isinstance(options["format"], list):
|
|
3765
|
+
# the endpoint expects a dictionary with a template key having a list of strings
|
|
3766
|
+
# the app.yaml however defines format as a list of strings, so we need to convert it here
|
|
3767
|
+
options["format"] = {
|
|
3768
|
+
"template": options["format"],
|
|
3769
|
+
}
|
|
3770
|
+
activation_request["requirements"]["options"] = options
|
|
3771
|
+
return activation_request
|
|
3772
|
+
|
|
3396
3773
|
def __update_app_binary(
|
|
3397
3774
|
self,
|
|
3398
3775
|
tar_file: str,
|
|
@@ -3419,27 +3796,10 @@ class Application:
|
|
|
3419
3796
|
headers={"Content-Type": "application/octet-stream"},
|
|
3420
3797
|
)
|
|
3421
3798
|
|
|
3422
|
-
activation_request = {
|
|
3423
|
-
"requirements": {
|
|
3424
|
-
"executable_type": manifest.type,
|
|
3425
|
-
"runtime": manifest.runtime,
|
|
3426
|
-
},
|
|
3427
|
-
}
|
|
3428
|
-
|
|
3429
|
-
if manifest.configuration is not None and manifest.configuration.options is not None:
|
|
3430
|
-
options = manifest.configuration.options.to_dict()
|
|
3431
|
-
if "format" in options and isinstance(options["format"], list):
|
|
3432
|
-
# the endpoint expects a dictionary with a template key having a list of strings
|
|
3433
|
-
# the app.yaml however defines format as a list of strings, so we need to convert it here
|
|
3434
|
-
options["format"] = {
|
|
3435
|
-
"template": options["format"],
|
|
3436
|
-
}
|
|
3437
|
-
activation_request["requirements"]["options"] = options
|
|
3438
|
-
|
|
3439
3799
|
response = self.client.request(
|
|
3440
3800
|
method="PUT",
|
|
3441
3801
|
endpoint=endpoint,
|
|
3442
|
-
payload=
|
|
3802
|
+
payload=Application.__convert_manifest_to_payload(manifest=manifest),
|
|
3443
3803
|
)
|
|
3444
3804
|
|
|
3445
3805
|
if verbose:
|
|
@@ -3510,37 +3870,29 @@ class Application:
|
|
|
3510
3870
|
|
|
3511
3871
|
raise ValueError(f"Unknown scenario input type: {scenario.scenario_input.scenario_input_type}")
|
|
3512
3872
|
|
|
3513
|
-
def
|
|
3873
|
+
def __validate_input_dir_path_and_configuration(
|
|
3514
3874
|
self,
|
|
3515
|
-
|
|
3875
|
+
input_dir_path: Optional[str],
|
|
3516
3876
|
configuration: Optional[RunConfiguration],
|
|
3517
3877
|
) -> None:
|
|
3518
3878
|
"""
|
|
3519
3879
|
Auxiliary function to validate the directory path and configuration.
|
|
3520
3880
|
"""
|
|
3521
|
-
if
|
|
3881
|
+
if (
|
|
3882
|
+
configuration is None
|
|
3883
|
+
or configuration.format is None
|
|
3884
|
+
or configuration.format.format_input is None
|
|
3885
|
+
or configuration.format.format_input.input_type is None
|
|
3886
|
+
):
|
|
3887
|
+
# No explicit input type set, so we cannot confirm it.
|
|
3522
3888
|
return
|
|
3523
3889
|
|
|
3524
|
-
if configuration is None:
|
|
3525
|
-
raise ValueError(
|
|
3526
|
-
"If dir_path is provided, a RunConfiguration must also be provided.",
|
|
3527
|
-
)
|
|
3528
|
-
|
|
3529
|
-
if configuration.format is None:
|
|
3530
|
-
raise ValueError(
|
|
3531
|
-
"If dir_path is provided, RunConfiguration.format must also be provided.",
|
|
3532
|
-
)
|
|
3533
|
-
|
|
3534
|
-
if configuration.format.format_input is None:
|
|
3535
|
-
raise ValueError(
|
|
3536
|
-
"If dir_path is provided, RunConfiguration.format.format_input must also be provided.",
|
|
3537
|
-
)
|
|
3538
|
-
|
|
3539
3890
|
input_type = configuration.format.format_input.input_type
|
|
3540
|
-
|
|
3891
|
+
dir_types = (InputFormat.MULTI_FILE, InputFormat.CSV_ARCHIVE)
|
|
3892
|
+
if input_type in dir_types and not input_dir_path:
|
|
3541
3893
|
raise ValueError(
|
|
3542
|
-
"If
|
|
3543
|
-
|
|
3894
|
+
f"If RunConfiguration.format.format_input.input_type is set to {input_type}, "
|
|
3895
|
+
"then input_dir_path must be provided.",
|
|
3544
3896
|
)
|
|
3545
3897
|
|
|
3546
3898
|
def __package_inputs(self, dir_path: str) -> str:
|
|
@@ -3,6 +3,8 @@ This module contains definitions for batch experiments.
|
|
|
3
3
|
|
|
4
4
|
Classes
|
|
5
5
|
-------
|
|
6
|
+
ExperimentStatus
|
|
7
|
+
Enum representing the status of an experiment.
|
|
6
8
|
BatchExperimentInformation
|
|
7
9
|
Base class for all batch experiment models containing common information.
|
|
8
10
|
BatchExperiment
|
|
@@ -14,12 +16,82 @@ BatchExperimentMetadata
|
|
|
14
16
|
"""
|
|
15
17
|
|
|
16
18
|
from datetime import datetime
|
|
19
|
+
from enum import Enum
|
|
17
20
|
from typing import Any, Optional
|
|
18
21
|
|
|
19
22
|
from nextmv.base_model import BaseModel
|
|
20
23
|
from nextmv.cloud.input_set import InputSet
|
|
21
24
|
|
|
22
25
|
|
|
26
|
+
class ExperimentStatus(str, Enum):
|
|
27
|
+
"""
|
|
28
|
+
Status of an experiment.
|
|
29
|
+
|
|
30
|
+
You can import the `ExperimentStatus` class directly from `cloud`:
|
|
31
|
+
|
|
32
|
+
```python from nextmv.cloud import ExperimentStatus ```
|
|
33
|
+
|
|
34
|
+
This enum represents the comprehensive set of possible states for an
|
|
35
|
+
experiment in Nextmv Cloud.
|
|
36
|
+
|
|
37
|
+
Attributes
|
|
38
|
+
----------
|
|
39
|
+
STARTED : str
|
|
40
|
+
Experiment started.
|
|
41
|
+
COMPLETED : str
|
|
42
|
+
Experiment completed.
|
|
43
|
+
FAILED : str
|
|
44
|
+
Experiment failed.
|
|
45
|
+
DRAFT : str
|
|
46
|
+
Experiment is a draft.
|
|
47
|
+
CANCELED : str
|
|
48
|
+
Experiment was canceled.
|
|
49
|
+
STOPPING : str
|
|
50
|
+
Experiment is stopping.
|
|
51
|
+
DELETING : str
|
|
52
|
+
Experiment is being deleted.
|
|
53
|
+
DELETE_FAILED : str
|
|
54
|
+
Experiment deletion failed.
|
|
55
|
+
UNKNOWN : str
|
|
56
|
+
Experiment status is unknown.
|
|
57
|
+
|
|
58
|
+
Examples
|
|
59
|
+
--------
|
|
60
|
+
>>> from nextmv.cloud import ExperimentStatus
|
|
61
|
+
>>> status = ExperimentStatus.STARTED
|
|
62
|
+
>>> print(f"The status is: {status.value}")
|
|
63
|
+
The status is: started
|
|
64
|
+
|
|
65
|
+
>>> if status == ExperimentStatus.COMPLETED:
|
|
66
|
+
... print("Processing complete.")
|
|
67
|
+
... elif status in [ExperimentStatus.STARTED, ExperimentStatus.STOPPING]:
|
|
68
|
+
... print("Processing in progress.")
|
|
69
|
+
... else:
|
|
70
|
+
... print("Processing has not started or has ended with issues.")
|
|
71
|
+
Processing in progress.
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
STARTED = "started"
|
|
76
|
+
"""Experiment started."""
|
|
77
|
+
COMPLETED = "completed"
|
|
78
|
+
"""Experiment completed."""
|
|
79
|
+
FAILED = "failed"
|
|
80
|
+
"""Experiment failed."""
|
|
81
|
+
DRAFT = "draft"
|
|
82
|
+
"""Experiment is a draft."""
|
|
83
|
+
CANCELED = "canceled"
|
|
84
|
+
"""Experiment was canceled."""
|
|
85
|
+
STOPPING = "stopping"
|
|
86
|
+
"""Experiment is stopping."""
|
|
87
|
+
DELETING = "deleting"
|
|
88
|
+
"""Experiment is being deleted."""
|
|
89
|
+
DELETE_FAILED = "delete-failed"
|
|
90
|
+
"""Experiment deletion failed."""
|
|
91
|
+
UNKNOWN = "unknown"
|
|
92
|
+
"""Experiment status is unknown."""
|
|
93
|
+
|
|
94
|
+
|
|
23
95
|
class BatchExperimentInformation(BaseModel):
|
|
24
96
|
"""Information about a batch experiment.
|
|
25
97
|
|
|
@@ -83,7 +155,7 @@ class BatchExperimentInformation(BaseModel):
|
|
|
83
155
|
updated_at: datetime
|
|
84
156
|
"""Last update date of the batch experiment."""
|
|
85
157
|
|
|
86
|
-
status: Optional[
|
|
158
|
+
status: Optional[ExperimentStatus] = None
|
|
87
159
|
"""Status of the batch experiment."""
|
|
88
160
|
description: Optional[str] = None
|
|
89
161
|
"""Description of the batch experiment."""
|
|
@@ -43,6 +43,7 @@ import yaml
|
|
|
43
43
|
from pydantic import AliasChoices, Field
|
|
44
44
|
|
|
45
45
|
from nextmv.base_model import BaseModel
|
|
46
|
+
from nextmv.input import InputFormat
|
|
46
47
|
from nextmv.model import _REQUIREMENTS_FILE, ModelConfiguration
|
|
47
48
|
from nextmv.options import Option, Options, OptionsEnforcement
|
|
48
49
|
|
|
@@ -692,6 +693,166 @@ class ManifestOptions(BaseModel):
|
|
|
692
693
|
)
|
|
693
694
|
|
|
694
695
|
|
|
696
|
+
class ManifestContentMultiFileInput(BaseModel):
|
|
697
|
+
"""
|
|
698
|
+
Configuration for multi-file content format input.
|
|
699
|
+
|
|
700
|
+
You can import the `ManifestContentMultiFileInput` class directly from `cloud`:
|
|
701
|
+
|
|
702
|
+
```python
|
|
703
|
+
from nextmv.cloud import ManifestContentMultiFileInput
|
|
704
|
+
```
|
|
705
|
+
|
|
706
|
+
Parameters
|
|
707
|
+
----------
|
|
708
|
+
path : str
|
|
709
|
+
The path to the input file or directory.
|
|
710
|
+
|
|
711
|
+
|
|
712
|
+
Examples
|
|
713
|
+
--------
|
|
714
|
+
>>> from nextmv.cloud import ManifestContentMultiFileInput
|
|
715
|
+
>>> input_config = ManifestContentMultiFileInput(path="data/input/")
|
|
716
|
+
>>> input_config.path
|
|
717
|
+
'data/input/'
|
|
718
|
+
"""
|
|
719
|
+
|
|
720
|
+
path: str
|
|
721
|
+
"""The path to the input file or directory."""
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
class ManifestContentMultiFileOutput(BaseModel):
|
|
725
|
+
"""
|
|
726
|
+
Configuration for multi-file content format output.
|
|
727
|
+
|
|
728
|
+
You can import the `ManifestContentMultiFileOutput` class directly from `cloud`:
|
|
729
|
+
|
|
730
|
+
```python
|
|
731
|
+
from nextmv.cloud import ManifestContentMultiFileOutput
|
|
732
|
+
```
|
|
733
|
+
|
|
734
|
+
Parameters
|
|
735
|
+
----------
|
|
736
|
+
statistics : Optional[str], default=""
|
|
737
|
+
The path to the statistics file.
|
|
738
|
+
assets : Optional[str], default=""
|
|
739
|
+
The path to the assets file.
|
|
740
|
+
solutions : Optional[str], default=""
|
|
741
|
+
The path to the solutions directory.
|
|
742
|
+
|
|
743
|
+
Examples
|
|
744
|
+
--------
|
|
745
|
+
>>> from nextmv.cloud import ManifestContentMultiFileOutput
|
|
746
|
+
>>> output_config = ManifestContentMultiFileOutput(
|
|
747
|
+
... statistics="my-outputs/statistics.json",
|
|
748
|
+
... assets="my-outputs/assets.json",
|
|
749
|
+
... solutions="my-outputs/solutions/"
|
|
750
|
+
... )
|
|
751
|
+
>>> output_config.statistics
|
|
752
|
+
'my-outputs/statistics.json'
|
|
753
|
+
"""
|
|
754
|
+
|
|
755
|
+
statistics: Optional[str] = ""
|
|
756
|
+
"""The path to the statistics file."""
|
|
757
|
+
assets: Optional[str] = ""
|
|
758
|
+
"""The path to the assets file."""
|
|
759
|
+
solutions: Optional[str] = ""
|
|
760
|
+
"""The path to the solutions directory."""
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
class ManifestContentMultiFile(BaseModel):
|
|
764
|
+
"""
|
|
765
|
+
Configuration for multi-file content format.
|
|
766
|
+
|
|
767
|
+
You can import the `ManifestContentMultiFile` class directly from `cloud`:
|
|
768
|
+
|
|
769
|
+
```python
|
|
770
|
+
from nextmv.cloud import ManifestContentMultiFile
|
|
771
|
+
```
|
|
772
|
+
|
|
773
|
+
Parameters
|
|
774
|
+
----------
|
|
775
|
+
input : ManifestContentMultiFileInput
|
|
776
|
+
Configuration for multi-file content format input.
|
|
777
|
+
output : ManifestContentMultiFileOutput
|
|
778
|
+
Configuration for multi-file content format output.
|
|
779
|
+
|
|
780
|
+
Examples
|
|
781
|
+
--------
|
|
782
|
+
>>> from nextmv.cloud import ManifestContentMultiFile, ManifestContentMultiFileInput, ManifestContentMultiFileOutput
|
|
783
|
+
>>> multi_file_config = ManifestContentMultiFile(
|
|
784
|
+
... input=ManifestContentMultiFileInput(path="data/input/"),
|
|
785
|
+
... output=ManifestContentMultiFileOutput(
|
|
786
|
+
... statistics="my-outputs/statistics.json",
|
|
787
|
+
... assets="my-outputs/assets.json",
|
|
788
|
+
... solutions="my-outputs/solutions/"
|
|
789
|
+
... )
|
|
790
|
+
... )
|
|
791
|
+
>>> multi_file_config.input.path
|
|
792
|
+
'data/input/'
|
|
793
|
+
|
|
794
|
+
"""
|
|
795
|
+
|
|
796
|
+
input: ManifestContentMultiFileInput
|
|
797
|
+
"""Configuration for multi-file content format input."""
|
|
798
|
+
output: ManifestContentMultiFileOutput
|
|
799
|
+
"""Configuration for multi-file content format output."""
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
class ManifestContent(BaseModel):
|
|
803
|
+
"""
|
|
804
|
+
Content configuration for specifying how the app input/output is handled.
|
|
805
|
+
|
|
806
|
+
You can import the `ManifestContent` class directly from `cloud`:
|
|
807
|
+
|
|
808
|
+
```python
|
|
809
|
+
from nextmv.cloud import ManifestContent
|
|
810
|
+
```
|
|
811
|
+
|
|
812
|
+
Parameters
|
|
813
|
+
----------
|
|
814
|
+
format : str
|
|
815
|
+
The format of the content. Must be one of "json", "multi-file", or "csv-archive".
|
|
816
|
+
multi_file : Optional[ManifestContentMultiFile], default=None
|
|
817
|
+
Configuration for multi-file content format.
|
|
818
|
+
|
|
819
|
+
Examples
|
|
820
|
+
--------
|
|
821
|
+
>>> from nextmv.cloud import ManifestContent
|
|
822
|
+
>>> content_config = ManifestContent(
|
|
823
|
+
... format="multi-file",
|
|
824
|
+
... multi_file=ManifestContentMultiFile(
|
|
825
|
+
... input=ManifestContentMultiFileInput(path="data/input/"),
|
|
826
|
+
... output=ManifestContentMultiFileOutput(
|
|
827
|
+
... statistics="my-outputs/statistics.json",
|
|
828
|
+
... assets="my-outputs/assets.json",
|
|
829
|
+
... solutions="my-outputs/solutions/"
|
|
830
|
+
... )
|
|
831
|
+
... )
|
|
832
|
+
... )
|
|
833
|
+
>>> content_config.format
|
|
834
|
+
'multi-file'
|
|
835
|
+
>>> content_config.multi_file.input.path
|
|
836
|
+
'data/input/'
|
|
837
|
+
"""
|
|
838
|
+
|
|
839
|
+
format: str
|
|
840
|
+
"""The format of the content. Must be one of "json", "multi-file",
|
|
841
|
+
or "csv-archive"."""
|
|
842
|
+
multi_file: Optional[ManifestContentMultiFile] = Field(
|
|
843
|
+
serialization_alias="multi-file",
|
|
844
|
+
validation_alias=AliasChoices("multi-file", "multi_file"),
|
|
845
|
+
default=None,
|
|
846
|
+
)
|
|
847
|
+
"""Configuration for multi-file content format."""
|
|
848
|
+
|
|
849
|
+
def __post_init__(self):
|
|
850
|
+
"""Post-initialization to validate fields."""
|
|
851
|
+
acceptable_formats = [InputFormat.JSON, InputFormat.MULTI_FILE, InputFormat.CSV_ARCHIVE]
|
|
852
|
+
if self.format not in acceptable_formats:
|
|
853
|
+
raise ValueError(f"Invalid format: {self.format}. Must be one of {acceptable_formats}.")
|
|
854
|
+
|
|
855
|
+
|
|
695
856
|
class ManifestConfiguration(BaseModel):
|
|
696
857
|
"""
|
|
697
858
|
Configuration for the decision model.
|
|
@@ -719,8 +880,10 @@ class ManifestConfiguration(BaseModel):
|
|
|
719
880
|
'debug_mode'
|
|
720
881
|
"""
|
|
721
882
|
|
|
722
|
-
options: ManifestOptions
|
|
883
|
+
options: Optional[ManifestOptions] = None
|
|
723
884
|
"""Options for the decision model."""
|
|
885
|
+
content: Optional[ManifestContent] = None
|
|
886
|
+
"""Content configuration for specifying how the app input/output is handled."""
|
|
724
887
|
|
|
725
888
|
|
|
726
889
|
class Manifest(BaseModel):
|
|
@@ -64,3 +64,12 @@ configuration:
|
|
|
64
64
|
control_type: select
|
|
65
65
|
hidden_from:
|
|
66
66
|
- operator
|
|
67
|
+
content:
|
|
68
|
+
format: "multi-file"
|
|
69
|
+
multi-file:
|
|
70
|
+
input:
|
|
71
|
+
path: "my-inputs"
|
|
72
|
+
output:
|
|
73
|
+
statistics: "my-outputs/statistics.json"
|
|
74
|
+
assets: "my-outputs/assets.json"
|
|
75
|
+
solutions: "my-outputs/solutions"
|
|
@@ -2,6 +2,10 @@ import unittest
|
|
|
2
2
|
|
|
3
3
|
from nextmv.cloud.manifest import (
|
|
4
4
|
Manifest,
|
|
5
|
+
ManifestContent,
|
|
6
|
+
ManifestContentMultiFile,
|
|
7
|
+
ManifestContentMultiFileInput,
|
|
8
|
+
ManifestContentMultiFileOutput,
|
|
5
9
|
ManifestOption,
|
|
6
10
|
ManifestOptions,
|
|
7
11
|
ManifestOptionUI,
|
|
@@ -154,6 +158,23 @@ class TestManifest(unittest.TestCase):
|
|
|
154
158
|
)
|
|
155
159
|
self.assertEqual(manifest.configuration.options.format, ["-{{name}}", "{{value}}"])
|
|
156
160
|
|
|
161
|
+
self.assertDictEqual(
|
|
162
|
+
manifest.configuration.content.to_dict(),
|
|
163
|
+
{
|
|
164
|
+
"format": "multi-file",
|
|
165
|
+
"multi-file": {
|
|
166
|
+
"input": {
|
|
167
|
+
"path": "my-inputs",
|
|
168
|
+
},
|
|
169
|
+
"output": {
|
|
170
|
+
"statistics": "my-outputs/statistics.json",
|
|
171
|
+
"assets": "my-outputs/assets.json",
|
|
172
|
+
"solutions": "my-outputs/solutions",
|
|
173
|
+
},
|
|
174
|
+
},
|
|
175
|
+
},
|
|
176
|
+
)
|
|
177
|
+
|
|
157
178
|
def test_extract_options(self):
|
|
158
179
|
manifest = Manifest.from_yaml("tests/cloud")
|
|
159
180
|
options = manifest.extract_options()
|
|
@@ -243,6 +264,32 @@ class TestManifest(unittest.TestCase):
|
|
|
243
264
|
self.assertEqual(manifest_options.format, ["-{{name}}", "{{value}}"])
|
|
244
265
|
self.assertEqual(manifest_options.strict, False)
|
|
245
266
|
|
|
267
|
+
def test_manifest_content_from_dict(self):
|
|
268
|
+
manifest_content_dict = {
|
|
269
|
+
"format": "multi-file",
|
|
270
|
+
"multi-file": {
|
|
271
|
+
"input": {
|
|
272
|
+
"path": "data/input_data",
|
|
273
|
+
},
|
|
274
|
+
"output": {
|
|
275
|
+
"statistics": "data/output/stats.json",
|
|
276
|
+
"assets": "data/output/assets.json",
|
|
277
|
+
"solutions": "data/output/solutions",
|
|
278
|
+
},
|
|
279
|
+
},
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
manifest_content = ManifestContent.from_dict(manifest_content_dict)
|
|
283
|
+
|
|
284
|
+
self.assertEqual(manifest_content.format, "multi-file")
|
|
285
|
+
self.assertIsInstance(manifest_content.multi_file, ManifestContentMultiFile)
|
|
286
|
+
self.assertIsInstance(manifest_content.multi_file.input, ManifestContentMultiFileInput)
|
|
287
|
+
self.assertIsInstance(manifest_content.multi_file.output, ManifestContentMultiFileOutput)
|
|
288
|
+
self.assertEqual(manifest_content.multi_file.input.path, "data/input_data")
|
|
289
|
+
self.assertEqual(manifest_content.multi_file.output.statistics, "data/output/stats.json")
|
|
290
|
+
self.assertEqual(manifest_content.multi_file.output.assets, "data/output/assets.json")
|
|
291
|
+
self.assertEqual(manifest_content.multi_file.output.solutions, "data/output/solutions")
|
|
292
|
+
|
|
246
293
|
def test_from_options_with_validation(self):
|
|
247
294
|
options = Options(
|
|
248
295
|
Option(
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "v0.29.5.dev.1"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|