cgcsdk 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cgc/.env +1 -1
- cgc/CHANGELOG.md +37 -0
- cgc/commands/cgc_cmd.py +7 -0
- cgc/commands/cgc_helpers.py +45 -33
- cgc/commands/compute/compute_cmd.py +156 -29
- cgc/commands/compute/compute_models.py +7 -3
- cgc/commands/compute/compute_responses.py +17 -1
- cgc/commands/compute/compute_utills.py +99 -2
- cgc/commands/resource/resource_cmd.py +3 -3
- cgc/sdk/__init__.py +3 -0
- cgc/sdk/exceptions.py +7 -0
- cgc/sdk/resource.py +425 -0
- cgc/utils/consts/env_consts.py +1 -0
- cgc/utils/custom_exceptions.py +1 -1
- cgc/utils/response_utils.py +60 -0
- {cgcsdk-1.0.3.dist-info → cgcsdk-1.0.5.dist-info}/METADATA +1 -1
- {cgcsdk-1.0.3.dist-info → cgcsdk-1.0.5.dist-info}/RECORD +21 -19
- {cgcsdk-1.0.3.dist-info → cgcsdk-1.0.5.dist-info}/WHEEL +1 -1
- {cgcsdk-1.0.3.dist-info → cgcsdk-1.0.5.dist-info}/LICENSE +0 -0
- {cgcsdk-1.0.3.dist-info → cgcsdk-1.0.5.dist-info}/entry_points.txt +0 -0
- {cgcsdk-1.0.3.dist-info → cgcsdk-1.0.5.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,25 @@
|
|
1
|
+
from ast import main
|
2
|
+
import cgc.utils.consts.env_consts as env_consts
|
3
|
+
|
4
|
+
def list_get_mounted_volumes_paths(volume_list: list) -> str:
|
5
|
+
"""Formats and returns list of PVC volumes mounted to an app.
|
6
|
+
|
7
|
+
:param volume_list: list of all volumes mounted to an app
|
8
|
+
:type volume_list: list
|
9
|
+
:return: list of volume paths
|
10
|
+
:rtype: str
|
11
|
+
"""
|
12
|
+
volume_name_list = []
|
13
|
+
for volume in volume_list:
|
14
|
+
volume_type = volume.get("type")
|
15
|
+
if volume_type == "PVC":
|
16
|
+
volume_mount_path = volume.get("mount_path")
|
17
|
+
volume_name_list.append(volume_mount_path)
|
18
|
+
volumes_mounted = (
|
19
|
+
", ".join(volume_name_list) if len(volume_name_list) != 0 else None
|
20
|
+
)
|
21
|
+
return volumes_mounted
|
22
|
+
|
1
23
|
def list_get_mounted_volumes(volume_list: list) -> str:
|
2
24
|
"""Formats and returns list of PVC volumes mounted to an app.
|
3
25
|
|
@@ -17,6 +39,29 @@ def list_get_mounted_volumes(volume_list: list) -> str:
|
|
17
39
|
)
|
18
40
|
return volumes_mounted
|
19
41
|
|
42
|
+
def get_app_mounts(pod_list:list) -> list:
|
43
|
+
output_data = []
|
44
|
+
|
45
|
+
for pod in pod_list:
|
46
|
+
try:
|
47
|
+
main_container_name = pod["labels"]["entity"]
|
48
|
+
try:
|
49
|
+
main_container = [x for x in pod["containers"] if x["name"] == main_container_name][0]
|
50
|
+
except IndexError:
|
51
|
+
raise Exception("Parser was unable to find main container in server output in container list")
|
52
|
+
volumes_mounted = list_get_mounted_volumes(main_container["mounts"])
|
53
|
+
volumes_paths = list_get_mounted_volumes_paths(main_container["mounts"])
|
54
|
+
pod_data = {
|
55
|
+
"name": pod["labels"]["app-name"],
|
56
|
+
"type": pod["labels"]["entity"],
|
57
|
+
"status": pod["status"],
|
58
|
+
"volumes_mounted": volumes_mounted,
|
59
|
+
"volumes_paths": volumes_paths,
|
60
|
+
}
|
61
|
+
output_data.append(pod_data)
|
62
|
+
except KeyError:
|
63
|
+
pass
|
64
|
+
return output_data
|
20
65
|
|
21
66
|
def get_app_list(pod_list: list, detailed: bool) -> list:
|
22
67
|
"""Formats and returns list of apps to print.
|
@@ -117,13 +162,23 @@ def compute_create_payload(
|
|
117
162
|
cpu,
|
118
163
|
memory,
|
119
164
|
volumes: list,
|
165
|
+
volume_full_path: str,
|
120
166
|
resource_data: list = [],
|
167
|
+
config_maps_data: list = [],
|
121
168
|
gpu: int = 0,
|
122
169
|
gpu_type: str = None,
|
170
|
+
shm_size: int = 0,
|
171
|
+
image_name: str = "",
|
172
|
+
startup_command: str = "",
|
173
|
+
repository_secret: str = "",
|
174
|
+
node_port_enabled: bool = False,
|
123
175
|
):
|
124
176
|
"""
|
125
177
|
Create payload for app creation.
|
126
178
|
"""
|
179
|
+
shm_payload = {}
|
180
|
+
if shm_size is not None and shm_size != 0:
|
181
|
+
shm_payload = {"shared_memory": shm_size}
|
127
182
|
|
128
183
|
payload = {
|
129
184
|
"resource_data": {
|
@@ -133,16 +188,31 @@ def compute_create_payload(
|
|
133
188
|
"gpu": gpu,
|
134
189
|
"memory": memory,
|
135
190
|
"gpu_type": gpu_type,
|
191
|
+
"full_mount_path": volume_full_path,
|
192
|
+
**shm_payload,
|
136
193
|
}
|
137
194
|
}
|
138
195
|
try:
|
139
196
|
if len(volumes) != 0:
|
140
|
-
|
197
|
+
if not volume_full_path:
|
198
|
+
payload["resource_data"]["pv_volume"] = volumes
|
199
|
+
elif volume_full_path and len(volumes) != 1:
|
200
|
+
raise Exception(
|
201
|
+
"Volume full path can only be used with a single volume"
|
202
|
+
)
|
203
|
+
else:
|
204
|
+
payload["resource_data"]["pv_volume"] = volumes
|
141
205
|
except TypeError:
|
142
206
|
pass
|
143
207
|
try:
|
208
|
+
resource_data_dict = {"resource_data": {}}
|
209
|
+
if node_port_enabled:
|
210
|
+
if not env_consts.ON_PREMISES:
|
211
|
+
raise Exception(
|
212
|
+
"NodePort is supported in on-premises environments only."
|
213
|
+
)
|
214
|
+
resource_data_dict["resource_data"]["node_port_enabled"] = True
|
144
215
|
if len(resource_data) != 0:
|
145
|
-
resource_data_dict = {"resource_data": {}}
|
146
216
|
for resource in resource_data:
|
147
217
|
try:
|
148
218
|
key, value = resource.split("=")
|
@@ -151,9 +221,36 @@ def compute_create_payload(
|
|
151
221
|
raise Exception(
|
152
222
|
"Invalid resource data format. Use key=value format"
|
153
223
|
)
|
224
|
+
if image_name:
|
225
|
+
resource_data_dict["resource_data"]["custom_image"] = image_name
|
226
|
+
if startup_command:
|
227
|
+
resource_data_dict["resource_data"]["custom_command"] = startup_command
|
228
|
+
if repository_secret:
|
229
|
+
resource_data_dict["resource_data"][
|
230
|
+
"image_pull_secret_name"
|
231
|
+
] = repository_secret
|
232
|
+
if resource_data_dict["resource_data"] != {}:
|
154
233
|
payload["template_specific_data"] = resource_data_dict
|
155
234
|
except TypeError:
|
156
235
|
pass
|
236
|
+
try:
|
237
|
+
if len(config_maps_data) != 0:
|
238
|
+
config_maps_data_dict = {}
|
239
|
+
for config_map in config_maps_data:
|
240
|
+
try:
|
241
|
+
key, value = config_map.split(
|
242
|
+
"="
|
243
|
+
) # where key is name of config map and value is data
|
244
|
+
config_maps_data_dict[key] = (
|
245
|
+
value # value is dict, ex.: {"key": "value"}
|
246
|
+
)
|
247
|
+
except ValueError:
|
248
|
+
raise Exception(
|
249
|
+
"Invalid config map data format. Use key=value format"
|
250
|
+
)
|
251
|
+
payload["config_maps_data"] = config_maps_data_dict
|
252
|
+
except TypeError:
|
253
|
+
pass
|
157
254
|
return payload
|
158
255
|
|
159
256
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import click
|
2
2
|
import json
|
3
3
|
|
4
|
-
from cgc.commands.compute.compute_models import
|
4
|
+
from cgc.commands.compute.compute_models import ComputesList, DatabasesList
|
5
5
|
from cgc.commands.compute.compute_responses import (
|
6
6
|
template_list_response,
|
7
7
|
template_get_start_path_response,
|
@@ -22,7 +22,7 @@ from cgc.utils.click_group import CustomGroup, CustomCommand
|
|
22
22
|
from cgc.utils.requests_helper import call_api, EndpointTypes
|
23
23
|
|
24
24
|
|
25
|
-
@click.group(name="resource", cls=CustomGroup, hidden=
|
25
|
+
@click.group(name="resource", cls=CustomGroup, hidden=False)
|
26
26
|
def resource_group():
|
27
27
|
"""
|
28
28
|
Management of templates.
|
@@ -45,7 +45,7 @@ def template_list():
|
|
45
45
|
|
46
46
|
@resource_group.command("get_start_path", cls=CustomCommand)
|
47
47
|
@click.argument(
|
48
|
-
"template", type=click.Choice([*
|
48
|
+
"template", type=click.Choice([*ComputesList.get_list(), *DatabasesList.get_list()])
|
49
49
|
)
|
50
50
|
def template_get_start_path(template: str):
|
51
51
|
"""Displays start path of specified template"""
|
cgc/sdk/__init__.py
CHANGED
@@ -1,3 +1,6 @@
|
|
1
1
|
from cgc.sdk.mongodb import get_mongo_access as mongo_client
|
2
2
|
from cgc.sdk.redis import get_redis_access as redis_client
|
3
3
|
from cgc.sdk.postgresql import get_postgresql_access as postgresql_client
|
4
|
+
|
5
|
+
import cgc.sdk.resource as resource
|
6
|
+
import cgc.sdk.exceptions as exceptions
|
cgc/sdk/exceptions.py
ADDED
cgc/sdk/resource.py
ADDED
@@ -0,0 +1,425 @@
|
|
1
|
+
import json as _json
|
2
|
+
import re
|
3
|
+
import cgc.sdk.exceptions as _exceptions
|
4
|
+
import cgc.utils.prepare_headers as _prepare_headers
|
5
|
+
import cgc.commands.compute.compute_utills as _compute_utills
|
6
|
+
import cgc.utils.requests_helper as _requests_helper
|
7
|
+
import cgc.utils.response_utils as _response_utils
|
8
|
+
from enum import Enum as _Enum
|
9
|
+
from cgc.commands.compute.compute_models import GPUsList
|
10
|
+
|
11
|
+
|
12
|
+
def start_function_loop(function, infinite: bool = True, *args, **kwargs):
|
13
|
+
"""
|
14
|
+
Starts a function in a loop until it gets response code 200.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
function: The function to be executed.
|
18
|
+
infinite: A boolean indicating whether the loop should be infinite (default: True).
|
19
|
+
*args: Positional arguments to be passed to the function.
|
20
|
+
**kwargs: Keyword arguments to be passed to the function.
|
21
|
+
|
22
|
+
Raises:
|
23
|
+
_SDKException: If the app fails to start within 10 iterations (when infinite is False).
|
24
|
+
|
25
|
+
Returns:
|
26
|
+
None
|
27
|
+
"""
|
28
|
+
from time import sleep
|
29
|
+
|
30
|
+
counter = 0
|
31
|
+
try:
|
32
|
+
response = function(*args, **kwargs)
|
33
|
+
if type(response) is bool:
|
34
|
+
while not response:
|
35
|
+
counter += 1
|
36
|
+
if not infinite and counter > 10:
|
37
|
+
raise _exceptions.SDKException(-1, response)
|
38
|
+
sleep(5)
|
39
|
+
response = function(*args, **kwargs)
|
40
|
+
elif type(response) is dict:
|
41
|
+
while response["code"] != 200:
|
42
|
+
counter += 1
|
43
|
+
if not infinite and counter > 10:
|
44
|
+
raise _exceptions.SDKException(-1, response)
|
45
|
+
sleep(5)
|
46
|
+
response = function(*args, **kwargs)
|
47
|
+
else:
|
48
|
+
raise _exceptions.SDKException(-1, response)
|
49
|
+
except _exceptions.SDKException as e:
|
50
|
+
import logging
|
51
|
+
|
52
|
+
if e.code == 409:
|
53
|
+
logging.warning(e)
|
54
|
+
else:
|
55
|
+
raise e
|
56
|
+
else:
|
57
|
+
return response
|
58
|
+
|
59
|
+
|
60
|
+
def stop_function_loop(function, infinite: bool = True, *args, **kwargs):
|
61
|
+
"""
|
62
|
+
Stop function loop, proceed on response code 200.
|
63
|
+
|
64
|
+
Args:
|
65
|
+
function: The function to be stopped.
|
66
|
+
infinite (bool): Flag indicating whether the loop should run infinitely or not.
|
67
|
+
*args: Variable length argument list to be passed to the function.
|
68
|
+
**kwargs: Arbitrary keyword arguments to be passed to the function.
|
69
|
+
|
70
|
+
Raises:
|
71
|
+
_SDKException: If the app fails to stop within 10 attempts (when infinite is False).
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
None
|
75
|
+
"""
|
76
|
+
from time import sleep
|
77
|
+
|
78
|
+
counter = 0
|
79
|
+
response = function(*args, **kwargs)
|
80
|
+
if type(response) is bool:
|
81
|
+
while not response:
|
82
|
+
counter += 1
|
83
|
+
if not infinite and counter > 10:
|
84
|
+
raise _exceptions.SDKException(-1, response)
|
85
|
+
sleep(5)
|
86
|
+
response = function(*args, **kwargs)
|
87
|
+
elif type(response) is dict:
|
88
|
+
while response["code"] != 200:
|
89
|
+
counter += 1
|
90
|
+
if not infinite and counter > 10:
|
91
|
+
raise _exceptions.SDKException(-1, response)
|
92
|
+
sleep(5)
|
93
|
+
response = function(*args, **kwargs)
|
94
|
+
else:
|
95
|
+
raise _exceptions.SDKException(-1, response)
|
96
|
+
return response
|
97
|
+
|
98
|
+
|
99
|
+
def compute_list():
|
100
|
+
"""
|
101
|
+
List all compute apps for user namespace.
|
102
|
+
"""
|
103
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
104
|
+
url = f"{api_url}/v1/api/resource/list?resource_type=compute"
|
105
|
+
metric = "compute.list"
|
106
|
+
__res = _requests_helper.call_api(
|
107
|
+
request=_requests_helper.EndpointTypes.get,
|
108
|
+
url=url,
|
109
|
+
headers=headers,
|
110
|
+
)
|
111
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
112
|
+
__res, metric
|
113
|
+
)
|
114
|
+
|
115
|
+
|
116
|
+
class ResourceTypes(_Enum):
|
117
|
+
compute = "compute"
|
118
|
+
db = "db"
|
119
|
+
|
120
|
+
|
121
|
+
def _resource_status_ready(name: str, response: dict):
|
122
|
+
"""
|
123
|
+
Check if a resource is ready.
|
124
|
+
|
125
|
+
Args:
|
126
|
+
name: The name of the resource.
|
127
|
+
response: The response from the API call.
|
128
|
+
|
129
|
+
Returns:
|
130
|
+
The response from the API call.
|
131
|
+
"""
|
132
|
+
for pod in response["details"]["pods_list"]:
|
133
|
+
try:
|
134
|
+
if pod["labels"]["app-name"] == name:
|
135
|
+
return pod["status"] == "Running"
|
136
|
+
except KeyError:
|
137
|
+
return False
|
138
|
+
return False
|
139
|
+
|
140
|
+
|
141
|
+
def resource_ready(
|
142
|
+
name: str, resource_type: ResourceTypes = ResourceTypes.compute
|
143
|
+
) -> bool:
|
144
|
+
"""
|
145
|
+
Check if a resource is ready.
|
146
|
+
|
147
|
+
Args:
|
148
|
+
name: The name of the resource.
|
149
|
+
resource_type: The type of resource to check (default: ResourceTypes.compute).
|
150
|
+
|
151
|
+
Returns:
|
152
|
+
bool: A boolean indicating whether the resource is ready.
|
153
|
+
"""
|
154
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
155
|
+
url = f"{api_url}/v1/api/resource/list?resource_type={resource_type.value}"
|
156
|
+
metric = "resource.list"
|
157
|
+
__res = _requests_helper.call_api(
|
158
|
+
request=_requests_helper.EndpointTypes.get,
|
159
|
+
url=url,
|
160
|
+
headers=headers,
|
161
|
+
)
|
162
|
+
validated_response = (
|
163
|
+
_response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
164
|
+
__res, metric
|
165
|
+
)
|
166
|
+
)
|
167
|
+
return _resource_status_ready(name, validated_response)
|
168
|
+
|
169
|
+
|
170
|
+
def db_list():
|
171
|
+
"""
|
172
|
+
List all db apps for user namespace.
|
173
|
+
"""
|
174
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
175
|
+
url = f"{api_url}/v1/api/resource/list?resource_type=db"
|
176
|
+
metric = "compute.list"
|
177
|
+
__res = _requests_helper.call_api(
|
178
|
+
request=_requests_helper.EndpointTypes.get,
|
179
|
+
url=url,
|
180
|
+
headers=headers,
|
181
|
+
)
|
182
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
183
|
+
__res, metric
|
184
|
+
)
|
185
|
+
|
186
|
+
|
187
|
+
def compute_create_custom(
|
188
|
+
name: str,
|
189
|
+
image_name: str,
|
190
|
+
cpu: int = 1,
|
191
|
+
memory: int = 2,
|
192
|
+
shm_size: int = 0,
|
193
|
+
gpu: int = 0,
|
194
|
+
gpu_type: str = "A5000",
|
195
|
+
volumes: list = [],
|
196
|
+
volume_full_path: str = "",
|
197
|
+
startup_command: str = "",
|
198
|
+
repository_secret: str = "",
|
199
|
+
resource_data: list = [],
|
200
|
+
config_maps_data: list = [],
|
201
|
+
node_port_enabled: bool = False,
|
202
|
+
):
|
203
|
+
"""
|
204
|
+
Create a custom compute resource.
|
205
|
+
|
206
|
+
:param name: The name of the compute resource.
|
207
|
+
:type name: str
|
208
|
+
:param image_name: The name of the image to use for the compute resource.
|
209
|
+
:type image_name: str,
|
210
|
+
:param cpu: The number of CPUs for the compute resource, defaults to 1.
|
211
|
+
:type cpu: int, optional
|
212
|
+
:param memory: The amount of memory (in GB) for the compute resource, defaults to 2.
|
213
|
+
:type memory: int, optional
|
214
|
+
:param shm_size: The size of the shared memory (in GB) for the compute resource, defaults to 0.
|
215
|
+
:type shm_size: int, optional
|
216
|
+
:param gpu: The number of GPUs for the compute resource, defaults to 0.
|
217
|
+
:type gpu: int, optional
|
218
|
+
:param gpu_type: The type of GPU for the compute resource, defaults to "A5000".
|
219
|
+
:type gpu_type: str, optional
|
220
|
+
:param volumes: The list of volumes to attach to the compute resource, defaults to [].
|
221
|
+
:type volumes: list, optional
|
222
|
+
:param volume_full_path: The full path of the volume, defaults to "".
|
223
|
+
:type volume_full_path: str, optional
|
224
|
+
:param startup_command: The startup command for the compute resource, defaults to "".
|
225
|
+
:type startup_command: str, optional
|
226
|
+
:param repository_secret: The secret for accessing the repository, defaults to "".
|
227
|
+
:type repository_secret: str, optional
|
228
|
+
:param resource_data: The additional resource data, defaults to [].
|
229
|
+
:type resource_data: list, optional
|
230
|
+
:param config_maps_data: The additional config maps data, defaults to [].
|
231
|
+
:type config_maps_data: list, optional
|
232
|
+
:param node_port_enabled: A flag indicating whether the node port is enabled, defaults to False.
|
233
|
+
:type node_port_enabled: bool, optional
|
234
|
+
:raises _SDKException: If the image name is not provided.
|
235
|
+
:raises _SDKException: If an invalid GPU type is specified.
|
236
|
+
:return: The response from the API call.
|
237
|
+
:rtype: _type_
|
238
|
+
"""
|
239
|
+
if not image_name:
|
240
|
+
raise _exceptions.SDKException(-2, "Image name is required")
|
241
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
242
|
+
url = f"{api_url}/v1/api/resource/create"
|
243
|
+
metric = "compute.create"
|
244
|
+
gpu_type = gpu_type.upper()
|
245
|
+
if gpu_type not in GPUsList.get_list():
|
246
|
+
raise _exceptions.SDKException(-3, f"Invalid GPU type: {gpu_type}")
|
247
|
+
__payload = _compute_utills.compute_create_payload(
|
248
|
+
name=name,
|
249
|
+
entity="custom",
|
250
|
+
cpu=cpu,
|
251
|
+
memory=memory,
|
252
|
+
gpu=gpu,
|
253
|
+
gpu_type=gpu_type,
|
254
|
+
volumes=volumes,
|
255
|
+
volume_full_path=volume_full_path,
|
256
|
+
resource_data=resource_data,
|
257
|
+
config_maps_data=config_maps_data,
|
258
|
+
shm_size=shm_size,
|
259
|
+
image_name=image_name,
|
260
|
+
startup_command=startup_command,
|
261
|
+
repository_secret=repository_secret,
|
262
|
+
node_port_enabled=node_port_enabled,
|
263
|
+
)
|
264
|
+
|
265
|
+
__res = _requests_helper.call_api(
|
266
|
+
request=_requests_helper.EndpointTypes.post,
|
267
|
+
url=url,
|
268
|
+
headers=headers,
|
269
|
+
data=_json.dumps(__payload),
|
270
|
+
)
|
271
|
+
|
272
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
273
|
+
__res, metric
|
274
|
+
)
|
275
|
+
|
276
|
+
|
277
|
+
def resource_update_port(
|
278
|
+
name: str, port_name: str, new_port: int, ingress: bool = True
|
279
|
+
):
|
280
|
+
"""
|
281
|
+
Update a port for an app using backend endpoint.
|
282
|
+
|
283
|
+
:param name: name of app to edit
|
284
|
+
:type name: str
|
285
|
+
:param port_name: name of port to edit
|
286
|
+
:type port_name: str
|
287
|
+
:param new_port: new port number
|
288
|
+
:type new_port: int
|
289
|
+
:return: response from the API call
|
290
|
+
:rtype: dict
|
291
|
+
"""
|
292
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
293
|
+
url = f"{api_url}/v1/api/resource/ports?port_modification_mode=UPDATE"
|
294
|
+
metric = "resource.ports.update"
|
295
|
+
__payload = _compute_utills.port_modification_payload(
|
296
|
+
port_name=port_name,
|
297
|
+
port_number=new_port,
|
298
|
+
ingress=ingress,
|
299
|
+
app_name=name,
|
300
|
+
)
|
301
|
+
__res = _requests_helper.call_api(
|
302
|
+
request=_requests_helper.EndpointTypes.post,
|
303
|
+
url=url,
|
304
|
+
headers=headers,
|
305
|
+
data=_json.dumps(__payload),
|
306
|
+
)
|
307
|
+
|
308
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
309
|
+
__res, metric
|
310
|
+
)
|
311
|
+
|
312
|
+
|
313
|
+
def resource_add_port(name: str, port_name: str, new_port: int, ingress: bool = True):
|
314
|
+
"""
|
315
|
+
Add a port for an app using backend endpoint.
|
316
|
+
|
317
|
+
:param name: name of app to edit
|
318
|
+
:type name: str
|
319
|
+
:param port_name: name of port to add
|
320
|
+
:type port_name: str
|
321
|
+
:param new_port: new port number
|
322
|
+
:type new_port: int
|
323
|
+
:return: response from the API call
|
324
|
+
:rtype: dict
|
325
|
+
"""
|
326
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
327
|
+
url = f"{api_url}/v1/api/resource/ports?port_modification_mode=ADD"
|
328
|
+
metric = "resource.ports.add"
|
329
|
+
__payload = _compute_utills.port_modification_payload(
|
330
|
+
port_name=port_name,
|
331
|
+
port_number=new_port,
|
332
|
+
ingress=ingress,
|
333
|
+
app_name=name,
|
334
|
+
)
|
335
|
+
__res = _requests_helper.call_api(
|
336
|
+
request=_requests_helper.EndpointTypes.post,
|
337
|
+
url=url,
|
338
|
+
headers=headers,
|
339
|
+
data=_json.dumps(__payload),
|
340
|
+
)
|
341
|
+
|
342
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
343
|
+
__res, metric
|
344
|
+
)
|
345
|
+
|
346
|
+
|
347
|
+
def resource_delete_port(
|
348
|
+
name: str,
|
349
|
+
port_name: str,
|
350
|
+
):
|
351
|
+
"""
|
352
|
+
Add a port for an app using backend endpoint.
|
353
|
+
|
354
|
+
:param name: name of app to edit
|
355
|
+
:type name: str
|
356
|
+
:param port_name: name of port to delete
|
357
|
+
:type port_name: str
|
358
|
+
:return: response from the API call
|
359
|
+
:rtype: dict
|
360
|
+
"""
|
361
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
362
|
+
url = f"{api_url}/v1/api/resource/ports?port_modification_mode=DELETE"
|
363
|
+
metric = "resource.ports.delete"
|
364
|
+
__payload = _compute_utills.port_delete_payload(
|
365
|
+
port_name=port_name,
|
366
|
+
app_name=name,
|
367
|
+
)
|
368
|
+
__res = _requests_helper.call_api(
|
369
|
+
request=_requests_helper.EndpointTypes.post,
|
370
|
+
url=url,
|
371
|
+
headers=headers,
|
372
|
+
data=_json.dumps(__payload),
|
373
|
+
)
|
374
|
+
|
375
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
376
|
+
__res, metric
|
377
|
+
)
|
378
|
+
|
379
|
+
|
380
|
+
def resource_list_ports(name: str):
|
381
|
+
"""
|
382
|
+
List ports for an app using backend endpoint.
|
383
|
+
|
384
|
+
:param name: name of app to list ports for
|
385
|
+
:type name: str
|
386
|
+
:return: response from the API call
|
387
|
+
:rtype: dict
|
388
|
+
"""
|
389
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
390
|
+
url = f"{api_url}/v1/api/resource/ports?resource_name={name}"
|
391
|
+
metric = "resource.ports.list"
|
392
|
+
__res = _requests_helper.call_api(
|
393
|
+
request=_requests_helper.EndpointTypes.get,
|
394
|
+
url=url,
|
395
|
+
headers=headers,
|
396
|
+
)
|
397
|
+
|
398
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
399
|
+
__res, metric
|
400
|
+
)
|
401
|
+
|
402
|
+
|
403
|
+
def resource_delete(name: str):
|
404
|
+
"""
|
405
|
+
Delete an app using backend endpoint.
|
406
|
+
|
407
|
+
:param name: name of app to delete
|
408
|
+
:type name: str
|
409
|
+
:return: response from the API call
|
410
|
+
:rtype: dict
|
411
|
+
"""
|
412
|
+
api_url, headers = _prepare_headers.get_api_url_and_prepare_headers()
|
413
|
+
url = f"{api_url}/v1/api/resource/delete"
|
414
|
+
metric = "resource.delete"
|
415
|
+
__payload = _compute_utills.compute_delete_payload(name=name)
|
416
|
+
__res = _requests_helper.call_api(
|
417
|
+
request=_requests_helper.EndpointTypes.delete,
|
418
|
+
url=url,
|
419
|
+
headers=headers,
|
420
|
+
data=_json.dumps(__payload),
|
421
|
+
)
|
422
|
+
|
423
|
+
return _response_utils.retrieve_and_validate_response_send_metric_for_sdk(
|
424
|
+
__res, metric
|
425
|
+
)
|
cgc/utils/consts/env_consts.py
CHANGED
@@ -38,6 +38,7 @@ TMP_DIR = os.getenv("TMP_DIR")
|
|
38
38
|
RELEASE = int(os.getenv("RELEASE"))
|
39
39
|
MAJOR_VERSION = int(os.getenv("MAJOR_VERSION"))
|
40
40
|
MINOR_VERSION = int(os.getenv("MINOR_VERSION"))
|
41
|
+
ON_PREMISES = True if os.getenv("ON_PREMISES") == "1" else False
|
41
42
|
|
42
43
|
|
43
44
|
def get_config_file_name():
|
cgc/utils/custom_exceptions.py
CHANGED
@@ -14,7 +14,7 @@ CUSTOM_EXCEPTIONS = {
|
|
14
14
|
409: {
|
15
15
|
"PVC_NAME_ALREADY_EXISTS": "Volume with this name already exists.",
|
16
16
|
"PVC_DELETE_EXCEPTION": "Can't delete mounted volume, try with force",
|
17
|
-
"
|
17
|
+
"RESOURCE_TEMPLATE_NAME_ALREADY_EXISTS": "Template with this name already exists.",
|
18
18
|
},
|
19
19
|
404: {
|
20
20
|
"PVC_CREATE_NO_SC": "Selected disk type and access mode unavailable",
|