anyscale 0.26.54__py3-none-any.whl → 0.26.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- anyscale/_private/docgen/__main__.py +7 -0
- anyscale/_private/docgen/models.md +1 -0
- anyscale/client/README.md +3 -0
- anyscale/client/openapi_client/__init__.py +3 -0
- anyscale/client/openapi_client/models/__init__.py +3 -0
- anyscale/client/openapi_client/models/decorated_cloud_resource.py +30 -3
- anyscale/client/openapi_client/models/decorated_session.py +29 -1
- anyscale/client/openapi_client/models/operator_check_result.py +177 -0
- anyscale/client/openapi_client/models/operator_check_status.py +102 -0
- anyscale/client/openapi_client/models/operator_status.py +4 -4
- anyscale/client/openapi_client/models/operator_status_details.py +178 -0
- anyscale/client/openapi_client/models/task_table_row.py +27 -1
- anyscale/cloud/models.py +15 -11
- anyscale/cloud_resource.py +11 -11
- anyscale/commands/service_commands.py +10 -7
- anyscale/controllers/cloud_controller.py +35 -16
- anyscale/sdk/anyscale_client/models/session.py +29 -1
- anyscale/service/__init__.py +5 -2
- anyscale/service/_private/service_sdk.py +10 -10
- anyscale/service/commands.py +3 -2
- anyscale/service/models.py +1 -1
- anyscale/version.py +1 -1
- {anyscale-0.26.54.dist-info → anyscale-0.26.56.dist-info}/METADATA +1 -1
- {anyscale-0.26.54.dist-info → anyscale-0.26.56.dist-info}/RECORD +29 -26
- {anyscale-0.26.54.dist-info → anyscale-0.26.56.dist-info}/WHEEL +0 -0
- {anyscale-0.26.54.dist-info → anyscale-0.26.56.dist-info}/entry_points.txt +0 -0
- {anyscale-0.26.54.dist-info → anyscale-0.26.56.dist-info}/licenses/LICENSE +0 -0
- {anyscale-0.26.54.dist-info → anyscale-0.26.56.dist-info}/licenses/NOTICE +0 -0
- {anyscale-0.26.54.dist-info → anyscale-0.26.56.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,178 @@
|
|
1
|
+
# coding: utf-8
|
2
|
+
|
3
|
+
"""
|
4
|
+
Managed Ray API
|
5
|
+
|
6
|
+
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
|
7
|
+
|
8
|
+
The version of the OpenAPI document: 0.1.0
|
9
|
+
Generated by: https://openapi-generator.tech
|
10
|
+
"""
|
11
|
+
|
12
|
+
|
13
|
+
import pprint
|
14
|
+
import re # noqa: F401
|
15
|
+
|
16
|
+
import six
|
17
|
+
|
18
|
+
from openapi_client.configuration import Configuration
|
19
|
+
|
20
|
+
|
21
|
+
class OperatorStatusDetails(object):
|
22
|
+
"""NOTE: This class is auto generated by OpenAPI Generator.
|
23
|
+
Ref: https://openapi-generator.tech
|
24
|
+
|
25
|
+
Do not edit the class manually.
|
26
|
+
"""
|
27
|
+
|
28
|
+
"""
|
29
|
+
Attributes:
|
30
|
+
openapi_types (dict): The key is attribute name
|
31
|
+
and the value is attribute type.
|
32
|
+
attribute_map (dict): The key is attribute name
|
33
|
+
and the value is json key in definition.
|
34
|
+
"""
|
35
|
+
openapi_types = {
|
36
|
+
'operator_version': 'str',
|
37
|
+
'check_results': 'list[OperatorCheckResult]',
|
38
|
+
'reported_at': 'datetime'
|
39
|
+
}
|
40
|
+
|
41
|
+
attribute_map = {
|
42
|
+
'operator_version': 'operator_version',
|
43
|
+
'check_results': 'check_results',
|
44
|
+
'reported_at': 'reported_at'
|
45
|
+
}
|
46
|
+
|
47
|
+
def __init__(self, operator_version=None, check_results=None, reported_at=None, local_vars_configuration=None): # noqa: E501
|
48
|
+
"""OperatorStatusDetails - a model defined in OpenAPI""" # noqa: E501
|
49
|
+
if local_vars_configuration is None:
|
50
|
+
local_vars_configuration = Configuration()
|
51
|
+
self.local_vars_configuration = local_vars_configuration
|
52
|
+
|
53
|
+
self._operator_version = None
|
54
|
+
self._check_results = None
|
55
|
+
self._reported_at = None
|
56
|
+
self.discriminator = None
|
57
|
+
|
58
|
+
if operator_version is not None:
|
59
|
+
self.operator_version = operator_version
|
60
|
+
if check_results is not None:
|
61
|
+
self.check_results = check_results
|
62
|
+
if reported_at is not None:
|
63
|
+
self.reported_at = reported_at
|
64
|
+
|
65
|
+
@property
|
66
|
+
def operator_version(self):
|
67
|
+
"""Gets the operator_version of this OperatorStatusDetails. # noqa: E501
|
68
|
+
|
69
|
+
The version of the Anyscale Operator. # noqa: E501
|
70
|
+
|
71
|
+
:return: The operator_version of this OperatorStatusDetails. # noqa: E501
|
72
|
+
:rtype: str
|
73
|
+
"""
|
74
|
+
return self._operator_version
|
75
|
+
|
76
|
+
@operator_version.setter
|
77
|
+
def operator_version(self, operator_version):
|
78
|
+
"""Sets the operator_version of this OperatorStatusDetails.
|
79
|
+
|
80
|
+
The version of the Anyscale Operator. # noqa: E501
|
81
|
+
|
82
|
+
:param operator_version: The operator_version of this OperatorStatusDetails. # noqa: E501
|
83
|
+
:type: str
|
84
|
+
"""
|
85
|
+
|
86
|
+
self._operator_version = operator_version
|
87
|
+
|
88
|
+
@property
|
89
|
+
def check_results(self):
|
90
|
+
"""Gets the check_results of this OperatorStatusDetails. # noqa: E501
|
91
|
+
|
92
|
+
The results of the operator checks. # noqa: E501
|
93
|
+
|
94
|
+
:return: The check_results of this OperatorStatusDetails. # noqa: E501
|
95
|
+
:rtype: list[OperatorCheckResult]
|
96
|
+
"""
|
97
|
+
return self._check_results
|
98
|
+
|
99
|
+
@check_results.setter
|
100
|
+
def check_results(self, check_results):
|
101
|
+
"""Sets the check_results of this OperatorStatusDetails.
|
102
|
+
|
103
|
+
The results of the operator checks. # noqa: E501
|
104
|
+
|
105
|
+
:param check_results: The check_results of this OperatorStatusDetails. # noqa: E501
|
106
|
+
:type: list[OperatorCheckResult]
|
107
|
+
"""
|
108
|
+
|
109
|
+
self._check_results = check_results
|
110
|
+
|
111
|
+
@property
|
112
|
+
def reported_at(self):
|
113
|
+
"""Gets the reported_at of this OperatorStatusDetails. # noqa: E501
|
114
|
+
|
115
|
+
The timestamp when the operator status was last reported. # noqa: E501
|
116
|
+
|
117
|
+
:return: The reported_at of this OperatorStatusDetails. # noqa: E501
|
118
|
+
:rtype: datetime
|
119
|
+
"""
|
120
|
+
return self._reported_at
|
121
|
+
|
122
|
+
@reported_at.setter
|
123
|
+
def reported_at(self, reported_at):
|
124
|
+
"""Sets the reported_at of this OperatorStatusDetails.
|
125
|
+
|
126
|
+
The timestamp when the operator status was last reported. # noqa: E501
|
127
|
+
|
128
|
+
:param reported_at: The reported_at of this OperatorStatusDetails. # noqa: E501
|
129
|
+
:type: datetime
|
130
|
+
"""
|
131
|
+
|
132
|
+
self._reported_at = reported_at
|
133
|
+
|
134
|
+
def to_dict(self):
|
135
|
+
"""Returns the model properties as a dict"""
|
136
|
+
result = {}
|
137
|
+
|
138
|
+
for attr, _ in six.iteritems(self.openapi_types):
|
139
|
+
value = getattr(self, attr)
|
140
|
+
if isinstance(value, list):
|
141
|
+
result[attr] = list(map(
|
142
|
+
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
|
143
|
+
value
|
144
|
+
))
|
145
|
+
elif hasattr(value, "to_dict"):
|
146
|
+
result[attr] = value.to_dict()
|
147
|
+
elif isinstance(value, dict):
|
148
|
+
result[attr] = dict(map(
|
149
|
+
lambda item: (item[0], item[1].to_dict())
|
150
|
+
if hasattr(item[1], "to_dict") else item,
|
151
|
+
value.items()
|
152
|
+
))
|
153
|
+
else:
|
154
|
+
result[attr] = value
|
155
|
+
|
156
|
+
return result
|
157
|
+
|
158
|
+
def to_str(self):
|
159
|
+
"""Returns the string representation of the model"""
|
160
|
+
return pprint.pformat(self.to_dict())
|
161
|
+
|
162
|
+
def __repr__(self):
|
163
|
+
"""For `print` and `pprint`"""
|
164
|
+
return self.to_str()
|
165
|
+
|
166
|
+
def __eq__(self, other):
|
167
|
+
"""Returns true if both objects are equal"""
|
168
|
+
if not isinstance(other, OperatorStatusDetails):
|
169
|
+
return False
|
170
|
+
|
171
|
+
return self.to_dict() == other.to_dict()
|
172
|
+
|
173
|
+
def __ne__(self, other):
|
174
|
+
"""Returns true if both objects are not equal"""
|
175
|
+
if not isinstance(other, OperatorStatusDetails):
|
176
|
+
return True
|
177
|
+
|
178
|
+
return self.to_dict() != other.to_dict()
|
@@ -43,6 +43,7 @@ class TaskTableRow(object):
|
|
43
43
|
'start_time_ns': 'int',
|
44
44
|
'end_time_ns': 'int',
|
45
45
|
'required_resources': 'str',
|
46
|
+
'runtime_env': 'str',
|
46
47
|
'node_id': 'str',
|
47
48
|
'worker_id': 'str',
|
48
49
|
'worker_pid': 'str',
|
@@ -62,6 +63,7 @@ class TaskTableRow(object):
|
|
62
63
|
'start_time_ns': 'start_time_ns',
|
63
64
|
'end_time_ns': 'end_time_ns',
|
64
65
|
'required_resources': 'required_resources',
|
66
|
+
'runtime_env': 'runtime_env',
|
65
67
|
'node_id': 'node_id',
|
66
68
|
'worker_id': 'worker_id',
|
67
69
|
'worker_pid': 'worker_pid',
|
@@ -70,7 +72,7 @@ class TaskTableRow(object):
|
|
70
72
|
'exception_type': 'exception_type'
|
71
73
|
}
|
72
74
|
|
73
|
-
def __init__(self, id=None, attempt_number=None, job_id=None, function_name=None, task_type=None, current_state=None, error_message=None, start_time_ns=None, end_time_ns=None, required_resources=None, node_id=None, worker_id=None, worker_pid=None, parent_task_id=None, ray_session_name=None, exception_type=None, local_vars_configuration=None): # noqa: E501
|
75
|
+
def __init__(self, id=None, attempt_number=None, job_id=None, function_name=None, task_type=None, current_state=None, error_message=None, start_time_ns=None, end_time_ns=None, required_resources=None, runtime_env=None, node_id=None, worker_id=None, worker_pid=None, parent_task_id=None, ray_session_name=None, exception_type=None, local_vars_configuration=None): # noqa: E501
|
74
76
|
"""TaskTableRow - a model defined in OpenAPI""" # noqa: E501
|
75
77
|
if local_vars_configuration is None:
|
76
78
|
local_vars_configuration = Configuration()
|
@@ -86,6 +88,7 @@ class TaskTableRow(object):
|
|
86
88
|
self._start_time_ns = None
|
87
89
|
self._end_time_ns = None
|
88
90
|
self._required_resources = None
|
91
|
+
self._runtime_env = None
|
89
92
|
self._node_id = None
|
90
93
|
self._worker_id = None
|
91
94
|
self._worker_pid = None
|
@@ -108,6 +111,8 @@ class TaskTableRow(object):
|
|
108
111
|
self.end_time_ns = end_time_ns
|
109
112
|
if required_resources is not None:
|
110
113
|
self.required_resources = required_resources
|
114
|
+
if runtime_env is not None:
|
115
|
+
self.runtime_env = runtime_env
|
111
116
|
if node_id is not None:
|
112
117
|
self.node_id = node_id
|
113
118
|
if worker_id is not None:
|
@@ -342,6 +347,27 @@ class TaskTableRow(object):
|
|
342
347
|
|
343
348
|
self._required_resources = required_resources
|
344
349
|
|
350
|
+
@property
|
351
|
+
def runtime_env(self):
|
352
|
+
"""Gets the runtime_env of this TaskTableRow. # noqa: E501
|
353
|
+
|
354
|
+
|
355
|
+
:return: The runtime_env of this TaskTableRow. # noqa: E501
|
356
|
+
:rtype: str
|
357
|
+
"""
|
358
|
+
return self._runtime_env
|
359
|
+
|
360
|
+
@runtime_env.setter
|
361
|
+
def runtime_env(self, runtime_env):
|
362
|
+
"""Sets the runtime_env of this TaskTableRow.
|
363
|
+
|
364
|
+
|
365
|
+
:param runtime_env: The runtime_env of this TaskTableRow. # noqa: E501
|
366
|
+
:type: str
|
367
|
+
"""
|
368
|
+
|
369
|
+
self._runtime_env = runtime_env
|
370
|
+
|
345
371
|
@property
|
346
372
|
def node_id(self):
|
347
373
|
"""Gets the node_id of this TaskTableRow. # noqa: E501
|
anyscale/cloud/models.py
CHANGED
@@ -109,12 +109,14 @@ class CloudProvider(ModelEnum):
|
|
109
109
|
AWS = "AWS"
|
110
110
|
GCP = "GCP"
|
111
111
|
AZURE = "AZURE"
|
112
|
+
GENERIC = "GENERIC"
|
112
113
|
|
113
114
|
__docstrings__ = {
|
114
115
|
UNKNOWN: "Unknown cloud provider.",
|
115
116
|
AWS: "Amazon Web Services.",
|
116
117
|
GCP: "Google Cloud Platform.",
|
117
118
|
AZURE: "Microsoft Azure.",
|
119
|
+
GENERIC: "Generic cloud provider.",
|
118
120
|
} # type: ignore
|
119
121
|
|
120
122
|
|
@@ -141,7 +143,7 @@ cloud = Cloud(
|
|
141
143
|
id: str = field(metadata={"docstring": "Unique identifier for this Cloud."})
|
142
144
|
provider: Union[CloudProvider, str] = field(
|
143
145
|
metadata={
|
144
|
-
"docstring": "Cloud provider (AWS, GCP, AZURE) or UNKNOWN if not recognized."
|
146
|
+
"docstring": "Cloud provider (AWS, GCP, AZURE, GENERIC) or UNKNOWN if not recognized."
|
145
147
|
},
|
146
148
|
)
|
147
149
|
compute_stack: Union[ComputeStack, str] = field(
|
@@ -499,15 +501,15 @@ bucket_name: s3://my-bucket
|
|
499
501
|
file_storage:
|
500
502
|
file_storage_id: fs-12345678901234567
|
501
503
|
aws_config:
|
502
|
-
vpc_id: vpc-12345678901234567
|
503
|
-
subnet_ids:
|
504
|
-
|
505
|
-
|
506
|
-
security_group_ids:
|
507
|
-
|
508
|
-
anyscale_iam_role_id: arn:aws:iam::123456789012:role/anyscale-iam-role
|
509
|
-
cluster_iam_role_id: arn:aws:iam::123456789012:role/cluster-node-role
|
510
|
-
memorydb_cluster_name: my-memorydb-cluster
|
504
|
+
vpc_id: vpc-12345678901234567
|
505
|
+
subnet_ids:
|
506
|
+
- subnet-11111111111111111
|
507
|
+
- subnet-22222222222222222
|
508
|
+
security_group_ids:
|
509
|
+
- sg-12345678901234567
|
510
|
+
anyscale_iam_role_id: arn:aws:iam::123456789012:role/anyscale-iam-role
|
511
|
+
cluster_iam_role_id: arn:aws:iam::123456789012:role/cluster-node-role
|
512
|
+
memorydb_cluster_name: my-memorydb-cluster
|
511
513
|
"""
|
512
514
|
|
513
515
|
cloud_resource_id: Optional[str] = field(
|
@@ -519,7 +521,9 @@ memorydb_cluster_name: my-memorydb-cluster
|
|
519
521
|
)
|
520
522
|
provider: Union[CloudProvider, str] = field(
|
521
523
|
default=CloudProvider.UNKNOWN,
|
522
|
-
metadata={
|
524
|
+
metadata={
|
525
|
+
"docstring": "The cloud provider type (e.g., AWS, GCP, AZURE, or GENERIC)."
|
526
|
+
},
|
523
527
|
)
|
524
528
|
compute_stack: Union[ComputeStack, str] = field(
|
525
529
|
default=ComputeStack.VM,
|
anyscale/cloud_resource.py
CHANGED
@@ -50,6 +50,8 @@ S3_ARN_PREFIX = "arn:aws:s3:::"
|
|
50
50
|
S3_STORAGE_PREFIX = "s3://"
|
51
51
|
GCS_STORAGE_PREFIX = "gs://"
|
52
52
|
|
53
|
+
HTTPS_INGRESS_PORT = 443
|
54
|
+
|
53
55
|
|
54
56
|
def compare_dicts_diff(d1: Dict[Any, Any], d2: Dict[Any, Any]) -> str:
|
55
57
|
"""Returns a string representation of the difference of the two dictionaries.
|
@@ -480,8 +482,6 @@ def verify_aws_security_groups( # noqa: PLR0912, PLR0911
|
|
480
482
|
raise e
|
481
483
|
anyscale_security_groups.append(anyscale_security_group)
|
482
484
|
|
483
|
-
expected_open_ports = [443, 22] # 443 is for HTTPS ingress, 22 is for SSH
|
484
|
-
|
485
485
|
inbound_ip_permissions = [
|
486
486
|
ip_permission
|
487
487
|
for anyscale_security_group in anyscale_security_groups
|
@@ -499,20 +499,20 @@ def verify_aws_security_groups( # noqa: PLR0912, PLR0911
|
|
499
499
|
}
|
500
500
|
|
501
501
|
# Check inbound permissions
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
for inbound_ip_permission_port in inbound_ip_permissions_with_specific_port
|
507
|
-
):
|
508
|
-
missing_open_ports.append(port)
|
509
|
-
if missing_open_ports:
|
502
|
+
if not any(
|
503
|
+
inbound_ip_permission_port == HTTPS_INGRESS_PORT
|
504
|
+
for inbound_ip_permission_port in inbound_ip_permissions_with_specific_port
|
505
|
+
):
|
510
506
|
logger.warning(
|
511
|
-
f"Security groups {aws_security_group_ids} do not contain inbound permission for
|
507
|
+
f"Security groups {aws_security_group_ids} do not contain inbound permission for port {HTTPS_INGRESS_PORT}. This port is used for interaction with the clusters from Anyscale UI."
|
512
508
|
)
|
513
509
|
if strict:
|
514
510
|
return False
|
515
511
|
|
512
|
+
expected_open_ports = [
|
513
|
+
HTTPS_INGRESS_PORT,
|
514
|
+
22,
|
515
|
+
] # 22 was previously used for SSH but is no longer required.
|
516
516
|
if len(inbound_ip_permissions_with_specific_port) > len(expected_open_ports):
|
517
517
|
logger.warning(
|
518
518
|
f"Security groups {aws_security_group_ids} allows access to more than {expected_open_ports}. This may not be safe by default."
|
@@ -31,10 +31,10 @@ from anyscale.service.models import (
|
|
31
31
|
ServiceConfig,
|
32
32
|
ServiceLogMode,
|
33
33
|
ServiceSortField,
|
34
|
+
ServiceSortOrder,
|
34
35
|
ServiceState,
|
35
36
|
ServiceStatus,
|
36
37
|
ServiceVersionStatus,
|
37
|
-
SortOrder,
|
38
38
|
)
|
39
39
|
from anyscale.util import (
|
40
40
|
AnyscaleJSONEncoder,
|
@@ -658,13 +658,13 @@ def validate_max_items(ctx, param, value):
|
|
658
658
|
return validate_non_negative_arg(ctx, param, value)
|
659
659
|
|
660
660
|
|
661
|
-
def _parse_sort_option(sort: Optional[str],) -> Tuple[Optional[str],
|
661
|
+
def _parse_sort_option(sort: Optional[str],) -> Tuple[Optional[str], ServiceSortOrder]:
|
662
662
|
"""
|
663
663
|
Given a raw sort string (e.g. "-created_at"), return
|
664
664
|
(canonical_field_name, SortOrder).
|
665
665
|
"""
|
666
666
|
if not sort:
|
667
|
-
return None,
|
667
|
+
return None, ServiceSortOrder.ASC
|
668
668
|
|
669
669
|
# build case-insensitive map of allowed fields
|
670
670
|
allowed = {f.value.lower(): f.value for f in ServiceSortField.__members__.values()}
|
@@ -672,10 +672,10 @@ def _parse_sort_option(sort: Optional[str],) -> Tuple[Optional[str], SortOrder]:
|
|
672
672
|
# detect leading '-' for descending
|
673
673
|
if sort.startswith("-"):
|
674
674
|
raw = sort[1:]
|
675
|
-
order =
|
675
|
+
order = ServiceSortOrder.DESC
|
676
676
|
else:
|
677
677
|
raw = sort
|
678
|
-
order =
|
678
|
+
order = ServiceSortOrder.ASC
|
679
679
|
|
680
680
|
key = raw.lower()
|
681
681
|
if key not in allowed:
|
@@ -726,7 +726,10 @@ def _format_service_output_data(svc: ServiceStatus) -> Dict[str, str]:
|
|
726
726
|
|
727
727
|
|
728
728
|
@service_cli.command(
|
729
|
-
name="list",
|
729
|
+
name="list",
|
730
|
+
help="List services.",
|
731
|
+
cls=AnyscaleCommand,
|
732
|
+
example=command_examples.SERVICE_LIST_EXAMPLE,
|
730
733
|
)
|
731
734
|
@click.option("--service-id", "--id", help="ID of the service to display.")
|
732
735
|
@click.option("--name", "-n", help="Name of the service to display.")
|
@@ -979,7 +982,7 @@ def terminate(
|
|
979
982
|
anyscale.service.terminate(id=service_id)
|
980
983
|
log.info(f"Service {service_id} terminate initiated.")
|
981
984
|
log.info(
|
982
|
-
f
|
985
|
+
f"View the service in the UI at {get_endpoint(f'/services/{service_id}')}"
|
983
986
|
)
|
984
987
|
except Exception as e: # noqa: BLE001
|
985
988
|
log.error(f"Error terminating service: {e}")
|
@@ -1471,18 +1471,26 @@ class CloudController(BaseController):
|
|
1471
1471
|
d.pop("cloud_deployment_id", None)
|
1472
1472
|
return formatted_cloud_resources
|
1473
1473
|
|
1474
|
-
def
|
1475
|
-
|
1476
|
-
|
1474
|
+
def _convert_decorated_cloud_resource_to_cloud_deployment(
|
1475
|
+
self, decorated_cloud_resource: DecoratedCloudResource
|
1476
|
+
) -> CloudDeployment:
|
1477
1477
|
# DecoratedCloudResource has extra fields that are not in CloudDeployment.
|
1478
1478
|
allowed_keys = set(CloudDeployment.attribute_map.keys())
|
1479
1479
|
allowed_keys.remove(
|
1480
1480
|
"cloud_deployment_id"
|
1481
1481
|
) # Remove deprecated cloud_deployment_id field.
|
1482
|
+
return CloudDeployment(
|
1483
|
+
**{
|
1484
|
+
k: v
|
1485
|
+
for k, v in decorated_cloud_resource.to_dict().items()
|
1486
|
+
if k in allowed_keys
|
1487
|
+
}
|
1488
|
+
)
|
1489
|
+
|
1490
|
+
def get_cloud_resources(self, cloud_id: str) -> List[CloudDeployment]:
|
1491
|
+
decorated_cloud_resources = self.get_decorated_cloud_resources(cloud_id)
|
1482
1492
|
return [
|
1483
|
-
|
1484
|
-
**{k: v for k, v in resource.to_dict().items() if k in allowed_keys}
|
1485
|
-
)
|
1493
|
+
self._convert_decorated_cloud_resource_to_cloud_deployment(resource)
|
1486
1494
|
for resource in decorated_cloud_resources
|
1487
1495
|
]
|
1488
1496
|
|
@@ -1815,24 +1823,25 @@ class CloudController(BaseController):
|
|
1815
1823
|
"Please use `anyscale cloud resource create` to add cloud resources."
|
1816
1824
|
)
|
1817
1825
|
|
1818
|
-
# Diff the existing and new specs
|
1819
|
-
diff = self._generate_diff(
|
1820
|
-
[self._remove_empty_values(r.to_dict()) for r in existing_resources], spec
|
1821
|
-
)
|
1822
|
-
if not diff:
|
1823
|
-
self.log.info("No changes detected.")
|
1824
|
-
return
|
1825
|
-
|
1826
1826
|
existing_resources_dict = {
|
1827
1827
|
resource.cloud_resource_id: resource for resource in existing_resources
|
1828
1828
|
}
|
1829
1829
|
|
1830
|
+
all_deployments: List[CloudDeployment] = []
|
1830
1831
|
updated_deployments: List[CloudDeployment] = []
|
1831
1832
|
for d in spec:
|
1832
1833
|
try:
|
1833
1834
|
deployment = CloudDeployment(**d)
|
1834
1835
|
except Exception as e: # noqa: BLE001
|
1835
|
-
|
1836
|
+
try:
|
1837
|
+
# Try to parse the cloud deployment as a DecoratedCloudResource as well,
|
1838
|
+
# which has extra fields that are not in CloudDeployment.
|
1839
|
+
deployment = self._convert_decorated_cloud_resource_to_cloud_deployment(
|
1840
|
+
DecoratedCloudResource(**d)
|
1841
|
+
)
|
1842
|
+
except: # noqa: E722
|
1843
|
+
# Raise original error from parsing as CloudDeployment.
|
1844
|
+
raise ClickException(f"Failed to parse cloud resource: {e}")
|
1836
1845
|
|
1837
1846
|
if not deployment.cloud_resource_id:
|
1838
1847
|
raise ClickException(
|
@@ -1846,10 +1855,20 @@ class CloudController(BaseController):
|
|
1846
1855
|
raise ClickException(
|
1847
1856
|
"Please use the `anyscale machine-pool` CLI to update machine pools."
|
1848
1857
|
)
|
1858
|
+
|
1859
|
+
all_deployments.append(deployment)
|
1849
1860
|
if deployment != existing_resources_dict[deployment.cloud_resource_id]:
|
1850
1861
|
updated_deployments.append(deployment)
|
1851
1862
|
|
1852
|
-
#
|
1863
|
+
# Diff the existing and new specs and confirm.
|
1864
|
+
diff = self._generate_diff(
|
1865
|
+
[self._remove_empty_values(r.to_dict()) for r in existing_resources],
|
1866
|
+
[self._remove_empty_values(r.to_dict()) for r in all_deployments],
|
1867
|
+
)
|
1868
|
+
if not diff:
|
1869
|
+
self.log.info("No changes detected.")
|
1870
|
+
return
|
1871
|
+
|
1853
1872
|
self.log.info(f"Detected the following changes:\n{diff}")
|
1854
1873
|
|
1855
1874
|
confirm("Would you like to proceed with updating this cloud?", yes)
|
@@ -61,6 +61,7 @@ class Session(object):
|
|
61
61
|
'serve_metrics_dashboard_url': 'str',
|
62
62
|
'serve_deployment_metrics_dashboard_url': 'str',
|
63
63
|
'serve_llm_metrics_dashboard_url': 'str',
|
64
|
+
'supports_full_grafana_view': 'bool',
|
64
65
|
'persistent_metrics_url': 'str',
|
65
66
|
'connect_url': 'str',
|
66
67
|
'jupyter_notebook_url': 'str',
|
@@ -116,6 +117,7 @@ class Session(object):
|
|
116
117
|
'serve_metrics_dashboard_url': 'serve_metrics_dashboard_url',
|
117
118
|
'serve_deployment_metrics_dashboard_url': 'serve_deployment_metrics_dashboard_url',
|
118
119
|
'serve_llm_metrics_dashboard_url': 'serve_llm_metrics_dashboard_url',
|
120
|
+
'supports_full_grafana_view': 'supports_full_grafana_view',
|
119
121
|
'persistent_metrics_url': 'persistent_metrics_url',
|
120
122
|
'connect_url': 'connect_url',
|
121
123
|
'jupyter_notebook_url': 'jupyter_notebook_url',
|
@@ -142,7 +144,7 @@ class Session(object):
|
|
142
144
|
'ray_dashboard_snapshot_last_reported_at': 'ray_dashboard_snapshot_last_reported_at'
|
143
145
|
}
|
144
146
|
|
145
|
-
def __init__(self, name=None, project_id=None, cloud_id=None, cluster_config=None, build_id=None, compute_template_id=None, idle_timeout=120, uses_app_config=False, allow_public_internet_traffic=False, user_service_access=None, user_service_token=None, ha_job_id=None, id=None, state=None, pending_state=None, state_data=None, status=None, status_details=None, creator_id=None, created_at=None, archived_at=None, webterminal_auth_url=None, metrics_dashboard_url=None, data_metrics_dashboard_url=None, train_metrics_dashboard_url=None, serve_metrics_dashboard_url=None, serve_deployment_metrics_dashboard_url=None, serve_llm_metrics_dashboard_url=None, persistent_metrics_url=None, connect_url=None, jupyter_notebook_url=None, ray_dashboard_url=None, access_token=None, service_proxy_url=None, tensorboard_available=None, cluster_config_last_modified_at=None, host_name=None, head_node_ip=None, ssh_authorized_keys=None, ssh_private_key=None, anyscaled_config=None, anyscaled_config_generated_at=None, default_build_id=None, idle_timeout_last_activity_at=None, ray_version=None, ray_version_last_updated_at=None, user_service_url=None, ray_component_activities_last_reported_at=None, activity_details=None, maximum_uptime_will_terminate_cluster_at=None, idle_termination_status=None, ray_dashboard_snapshot_last_reported_at=None, local_vars_configuration=None): # noqa: E501
|
147
|
+
def __init__(self, name=None, project_id=None, cloud_id=None, cluster_config=None, build_id=None, compute_template_id=None, idle_timeout=120, uses_app_config=False, allow_public_internet_traffic=False, user_service_access=None, user_service_token=None, ha_job_id=None, id=None, state=None, pending_state=None, state_data=None, status=None, status_details=None, creator_id=None, created_at=None, archived_at=None, webterminal_auth_url=None, metrics_dashboard_url=None, data_metrics_dashboard_url=None, train_metrics_dashboard_url=None, serve_metrics_dashboard_url=None, serve_deployment_metrics_dashboard_url=None, serve_llm_metrics_dashboard_url=None, supports_full_grafana_view=False, persistent_metrics_url=None, connect_url=None, jupyter_notebook_url=None, ray_dashboard_url=None, access_token=None, service_proxy_url=None, tensorboard_available=None, cluster_config_last_modified_at=None, host_name=None, head_node_ip=None, ssh_authorized_keys=None, ssh_private_key=None, anyscaled_config=None, anyscaled_config_generated_at=None, default_build_id=None, idle_timeout_last_activity_at=None, ray_version=None, ray_version_last_updated_at=None, user_service_url=None, ray_component_activities_last_reported_at=None, activity_details=None, maximum_uptime_will_terminate_cluster_at=None, idle_termination_status=None, ray_dashboard_snapshot_last_reported_at=None, local_vars_configuration=None): # noqa: E501
|
146
148
|
"""Session - a model defined in OpenAPI""" # noqa: E501
|
147
149
|
if local_vars_configuration is None:
|
148
150
|
local_vars_configuration = Configuration()
|
@@ -176,6 +178,7 @@ class Session(object):
|
|
176
178
|
self._serve_metrics_dashboard_url = None
|
177
179
|
self._serve_deployment_metrics_dashboard_url = None
|
178
180
|
self._serve_llm_metrics_dashboard_url = None
|
181
|
+
self._supports_full_grafana_view = None
|
179
182
|
self._persistent_metrics_url = None
|
180
183
|
self._connect_url = None
|
181
184
|
self._jupyter_notebook_url = None
|
@@ -250,6 +253,8 @@ class Session(object):
|
|
250
253
|
self.serve_deployment_metrics_dashboard_url = serve_deployment_metrics_dashboard_url
|
251
254
|
if serve_llm_metrics_dashboard_url is not None:
|
252
255
|
self.serve_llm_metrics_dashboard_url = serve_llm_metrics_dashboard_url
|
256
|
+
if supports_full_grafana_view is not None:
|
257
|
+
self.supports_full_grafana_view = supports_full_grafana_view
|
253
258
|
if persistent_metrics_url is not None:
|
254
259
|
self.persistent_metrics_url = persistent_metrics_url
|
255
260
|
if connect_url is not None:
|
@@ -954,6 +959,29 @@ class Session(object):
|
|
954
959
|
|
955
960
|
self._serve_llm_metrics_dashboard_url = serve_llm_metrics_dashboard_url
|
956
961
|
|
962
|
+
@property
|
963
|
+
def supports_full_grafana_view(self):
|
964
|
+
"""Gets the supports_full_grafana_view of this Session. # noqa: E501
|
965
|
+
|
966
|
+
Whether the session supports full Grafana embedding view. This field will only be populated after the Session finishes starting. # noqa: E501
|
967
|
+
|
968
|
+
:return: The supports_full_grafana_view of this Session. # noqa: E501
|
969
|
+
:rtype: bool
|
970
|
+
"""
|
971
|
+
return self._supports_full_grafana_view
|
972
|
+
|
973
|
+
@supports_full_grafana_view.setter
|
974
|
+
def supports_full_grafana_view(self, supports_full_grafana_view):
|
975
|
+
"""Sets the supports_full_grafana_view of this Session.
|
976
|
+
|
977
|
+
Whether the session supports full Grafana embedding view. This field will only be populated after the Session finishes starting. # noqa: E501
|
978
|
+
|
979
|
+
:param supports_full_grafana_view: The supports_full_grafana_view of this Session. # noqa: E501
|
980
|
+
:type: bool
|
981
|
+
"""
|
982
|
+
|
983
|
+
self._supports_full_grafana_view = supports_full_grafana_view
|
984
|
+
|
957
985
|
@property
|
958
986
|
def persistent_metrics_url(self):
|
959
987
|
"""Gets the persistent_metrics_url of this Session. # noqa: E501
|
anyscale/service/__init__.py
CHANGED
@@ -39,9 +39,9 @@ from anyscale.service.models import (
|
|
39
39
|
ServiceConfig,
|
40
40
|
ServiceLogMode,
|
41
41
|
ServiceSortField,
|
42
|
+
ServiceSortOrder,
|
42
43
|
ServiceState,
|
43
44
|
ServiceStatus,
|
44
|
-
SortOrder,
|
45
45
|
)
|
46
46
|
|
47
47
|
|
@@ -162,6 +162,9 @@ class ServiceSDK:
|
|
162
162
|
"""
|
163
163
|
return self._private_sdk.delete(id=id, name=name, cloud=cloud, project=project)
|
164
164
|
|
165
|
+
@sdk_docs(
|
166
|
+
doc_py_example=_LIST_EXAMPLE, arg_docstrings=_LIST_ARG_DOCSTRINGS,
|
167
|
+
)
|
165
168
|
def list( # noqa: F811, A001
|
166
169
|
self,
|
167
170
|
*,
|
@@ -179,7 +182,7 @@ class ServiceSDK:
|
|
179
182
|
page_size: Optional[int] = None,
|
180
183
|
# Sorting
|
181
184
|
sort_field: Optional[Union[str, ServiceSortField]] = None,
|
182
|
-
sort_order: Optional[Union[str,
|
185
|
+
sort_order: Optional[Union[str, ServiceSortOrder]] = None,
|
183
186
|
) -> ResultIterator[ServiceStatus]:
|
184
187
|
"""List services.
|
185
188
|
|
@@ -24,13 +24,13 @@ from anyscale.sdk.anyscale_client.models import (
|
|
24
24
|
ServiceConfig as ExternalAPIServiceConfig,
|
25
25
|
ServiceEventCurrentState,
|
26
26
|
ServiceSortField,
|
27
|
-
SortOrder,
|
28
27
|
TracingConfig as APITracingConfg,
|
29
28
|
)
|
30
29
|
from anyscale.service.models import (
|
31
30
|
RayGCSExternalStorageConfig,
|
32
31
|
ServiceConfig,
|
33
32
|
ServiceLogMode,
|
33
|
+
ServiceSortOrder,
|
34
34
|
ServiceState,
|
35
35
|
ServiceStatus,
|
36
36
|
ServiceVersionStatus,
|
@@ -252,7 +252,9 @@ class PrivateServiceSDK(WorkloadSDK):
|
|
252
252
|
"are ignored when performing an in_place update."
|
253
253
|
)
|
254
254
|
|
255
|
-
existing_config: ProductionServiceV2VersionModel =
|
255
|
+
existing_config: ProductionServiceV2VersionModel = (
|
256
|
+
existing_service.primary_version
|
257
|
+
)
|
256
258
|
query_auth_token_enabled = existing_service.auth_token is not None
|
257
259
|
cloud_id = self.client.get_cloud_id(
|
258
260
|
compute_config_id=existing_config.compute_config_id
|
@@ -437,10 +439,10 @@ class PrivateServiceSDK(WorkloadSDK):
|
|
437
439
|
raise ValueError("max_surge_percent must be between 0 and 100.")
|
438
440
|
|
439
441
|
name = config.name or self._get_default_name()
|
440
|
-
existing_service: Optional[
|
441
|
-
|
442
|
-
|
443
|
-
|
442
|
+
existing_service: Optional[DecoratedProductionServiceV2APIModel] = (
|
443
|
+
self.client.get_service(
|
444
|
+
name=name, cloud=config.cloud, project=config.project
|
445
|
+
)
|
444
446
|
)
|
445
447
|
if existing_service is None:
|
446
448
|
self.logger.info(f"Starting new service '{name}'.")
|
@@ -585,7 +587,6 @@ class PrivateServiceSDK(WorkloadSDK):
|
|
585
587
|
project_id: str,
|
586
588
|
query_auth_token_enabled: bool,
|
587
589
|
) -> ServiceVersionStatus:
|
588
|
-
|
589
590
|
image_uri, image_build, project, compute_config = await asyncio.gather(
|
590
591
|
asyncio.to_thread(
|
591
592
|
self._image_sdk.get_image_uri_from_build_id, model.build_id
|
@@ -734,9 +735,8 @@ class PrivateServiceSDK(WorkloadSDK):
|
|
734
735
|
page_size: Optional[int] = None, # Controls items fetched per API call
|
735
736
|
# Sorting
|
736
737
|
sort_field: Optional[Union[str, ServiceSortField]] = None,
|
737
|
-
sort_order: Optional[Union[str,
|
738
|
+
sort_order: Optional[Union[str, ServiceSortOrder]] = None,
|
738
739
|
) -> ResultIterator[ServiceStatus]:
|
739
|
-
|
740
740
|
if page_size is not None and (page_size <= 0 or page_size > MAX_PAGE_SIZE):
|
741
741
|
raise ValueError(
|
742
742
|
f"page_size must be between 1 and {MAX_PAGE_SIZE}, inclusive."
|
@@ -875,7 +875,7 @@ class PrivateServiceSDK(WorkloadSDK):
|
|
875
875
|
|
876
876
|
|
877
877
|
def _normalize_state_filter(
|
878
|
-
states: Optional[Union[List[ServiceState], List[str]]]
|
878
|
+
states: Optional[Union[List[ServiceState], List[str]]],
|
879
879
|
) -> Optional[List[str]]:
|
880
880
|
if states is None:
|
881
881
|
return None
|