jac-scale 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jac_scale/__init__.py +0 -0
- jac_scale/abstractions/config/app_config.jac +30 -0
- jac_scale/abstractions/config/base_config.jac +26 -0
- jac_scale/abstractions/database_provider.jac +51 -0
- jac_scale/abstractions/deployment_target.jac +64 -0
- jac_scale/abstractions/image_registry.jac +54 -0
- jac_scale/abstractions/logger.jac +20 -0
- jac_scale/abstractions/models/deployment_result.jac +27 -0
- jac_scale/abstractions/models/resource_status.jac +38 -0
- jac_scale/config_loader.jac +31 -0
- jac_scale/context.jac +14 -0
- jac_scale/factories/database_factory.jac +43 -0
- jac_scale/factories/deployment_factory.jac +43 -0
- jac_scale/factories/registry_factory.jac +32 -0
- jac_scale/factories/utility_factory.jac +34 -0
- jac_scale/impl/config_loader.impl.jac +131 -0
- jac_scale/impl/context.impl.jac +24 -0
- jac_scale/impl/memory_hierarchy.main.impl.jac +63 -0
- jac_scale/impl/memory_hierarchy.mongo.impl.jac +239 -0
- jac_scale/impl/memory_hierarchy.redis.impl.jac +186 -0
- jac_scale/impl/serve.impl.jac +1785 -0
- jac_scale/jserver/__init__.py +0 -0
- jac_scale/jserver/impl/jfast_api.impl.jac +731 -0
- jac_scale/jserver/impl/jserver.impl.jac +79 -0
- jac_scale/jserver/jfast_api.jac +162 -0
- jac_scale/jserver/jserver.jac +101 -0
- jac_scale/memory_hierarchy.jac +138 -0
- jac_scale/plugin.jac +218 -0
- jac_scale/plugin_config.jac +175 -0
- jac_scale/providers/database/kubernetes_mongo.jac +137 -0
- jac_scale/providers/database/kubernetes_redis.jac +110 -0
- jac_scale/providers/registry/dockerhub.jac +64 -0
- jac_scale/serve.jac +118 -0
- jac_scale/targets/kubernetes/kubernetes_config.jac +215 -0
- jac_scale/targets/kubernetes/kubernetes_target.jac +841 -0
- jac_scale/targets/kubernetes/utils/kubernetes_utils.impl.jac +519 -0
- jac_scale/targets/kubernetes/utils/kubernetes_utils.jac +85 -0
- jac_scale/tests/__init__.py +0 -0
- jac_scale/tests/conftest.py +29 -0
- jac_scale/tests/fixtures/test_api.jac +159 -0
- jac_scale/tests/fixtures/todo_app.jac +68 -0
- jac_scale/tests/test_abstractions.py +88 -0
- jac_scale/tests/test_deploy_k8s.py +265 -0
- jac_scale/tests/test_examples.py +484 -0
- jac_scale/tests/test_factories.py +149 -0
- jac_scale/tests/test_file_upload.py +444 -0
- jac_scale/tests/test_k8s_utils.py +156 -0
- jac_scale/tests/test_memory_hierarchy.py +247 -0
- jac_scale/tests/test_serve.py +1835 -0
- jac_scale/tests/test_sso.py +711 -0
- jac_scale/utilities/loggers/standard_logger.jac +40 -0
- jac_scale/utils.jac +16 -0
- jac_scale-0.1.1.dist-info/METADATA +658 -0
- jac_scale-0.1.1.dist-info/RECORD +57 -0
- jac_scale-0.1.1.dist-info/WHEEL +5 -0
- jac_scale-0.1.1.dist-info/entry_points.txt +3 -0
- jac_scale-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,519 @@
|
|
|
1
|
+
import os;
|
|
2
|
+
import pathlib;
|
|
3
|
+
|
|
4
|
+
impl parse_cpu_quantity(quantity: str) -> float {
|
|
5
|
+
trimmed = quantity.strip();
|
|
6
|
+
if (trimmed == '') {
|
|
7
|
+
raise ValueError('CPU quantity cannot be empty.') ;
|
|
8
|
+
}
|
|
9
|
+
lower = trimmed.lower();
|
|
10
|
+
if lower.endswith('m') {
|
|
11
|
+
value = lower[:-1].strip();
|
|
12
|
+
if (value == '') {
|
|
13
|
+
raise ValueError(
|
|
14
|
+
f"CPU quantity '{quantity}' is missing a numeric component."
|
|
15
|
+
) ;
|
|
16
|
+
}
|
|
17
|
+
return float(value) / 1000.0;
|
|
18
|
+
}
|
|
19
|
+
return float(trimmed);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
impl parse_memory_quantity(quantity: str) -> float {
|
|
23
|
+
trimmed = quantity.strip();
|
|
24
|
+
if (trimmed == '') {
|
|
25
|
+
raise ValueError('Memory quantity cannot be empty.') ;
|
|
26
|
+
}
|
|
27
|
+
suffixes = [
|
|
28
|
+
('EI', float(1024 ** 6)),
|
|
29
|
+
('PI', float(1024 ** 5)),
|
|
30
|
+
('TI', float(1024 ** 4)),
|
|
31
|
+
('GI', float(1024 ** 3)),
|
|
32
|
+
('MI', float(1024 ** 2)),
|
|
33
|
+
('KI', float(1024)),
|
|
34
|
+
('E', float(10 ** 18)),
|
|
35
|
+
('P', float(10 ** 15)),
|
|
36
|
+
('T', float(10 ** 12)),
|
|
37
|
+
('G', float(10 ** 9)),
|
|
38
|
+
('M', float(10 ** 6)),
|
|
39
|
+
('K', float(10 ** 3))
|
|
40
|
+
];
|
|
41
|
+
upper = trimmed.upper();
|
|
42
|
+
for (unit, multiplier) in suffixes {
|
|
43
|
+
if upper.endswith(unit) {
|
|
44
|
+
number_part = trimmed[:(-len(unit))].strip();
|
|
45
|
+
if (number_part == '') {
|
|
46
|
+
raise ValueError(
|
|
47
|
+
f"Memory quantity '{quantity}' is missing a numeric component."
|
|
48
|
+
) ;
|
|
49
|
+
}
|
|
50
|
+
return float(number_part) * multiplier;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
return float(trimmed);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
impl validate_resource_limits(
|
|
57
|
+
cpu_request: (str | None),
|
|
58
|
+
cpu_limit: (str | None),
|
|
59
|
+
memory_request: (str | None),
|
|
60
|
+
memory_limit: (str | None)
|
|
61
|
+
) -> None {
|
|
62
|
+
if (cpu_request and cpu_limit) {
|
|
63
|
+
try {
|
|
64
|
+
cpu_request_value = parse_cpu_quantity(cpu_request);
|
|
65
|
+
cpu_limit_value = parse_cpu_quantity(cpu_limit);
|
|
66
|
+
} except ValueError as exc {
|
|
67
|
+
raise ValueError(f"Invalid CPU quantity: {exc}") ;
|
|
68
|
+
}
|
|
69
|
+
if (cpu_limit_value < cpu_request_value) {
|
|
70
|
+
raise ValueError(
|
|
71
|
+
f"K8s_CPU_LIMIT ({cpu_limit}) must not be lower than K8s_CPU_REQUEST ({cpu_request})."
|
|
72
|
+
) ;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
if (memory_request and memory_limit) {
|
|
76
|
+
try {
|
|
77
|
+
memory_request_value = parse_memory_quantity(memory_request);
|
|
78
|
+
memory_limit_value = parse_memory_quantity(memory_limit);
|
|
79
|
+
} except ValueError as exc {
|
|
80
|
+
raise ValueError(f"Invalid memory quantity: {exc}") ;
|
|
81
|
+
}
|
|
82
|
+
if (memory_limit_value < memory_request_value) {
|
|
83
|
+
raise ValueError(
|
|
84
|
+
f"K8s_MEMORY_LIMIT ({memory_limit}) must not be lower than K8s_MEMORY_REQUEST ({memory_request})."
|
|
85
|
+
) ;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
"""Stage the application code inside the PVC using a transient helper pod."""
|
|
91
|
+
impl sync_code_to_pvc(
|
|
92
|
+
core_v1: client.CoreV1Api,
|
|
93
|
+
namespace: str,
|
|
94
|
+
pvc_name: str,
|
|
95
|
+
code_folder: str,
|
|
96
|
+
app_name: str,
|
|
97
|
+
sync_image: str
|
|
98
|
+
) -> None {
|
|
99
|
+
import tempfile;
|
|
100
|
+
import from pathlib { Path }
|
|
101
|
+
sync_pod_name = f"{app_name}-code-sync";
|
|
102
|
+
same_directory_toml_path = os.path.join(code_folder, 'jac.toml');
|
|
103
|
+
pod_body = {
|
|
104
|
+
'apiVersion': 'v1',
|
|
105
|
+
'kind': 'Pod',
|
|
106
|
+
'metadata': {'name': sync_pod_name},
|
|
107
|
+
'spec': {
|
|
108
|
+
'restartPolicy': 'Never',
|
|
109
|
+
'containers': [
|
|
110
|
+
{
|
|
111
|
+
'name': 'sync',
|
|
112
|
+
'image': sync_image,
|
|
113
|
+
'command': ['sh', '-c', 'sleep 3600'],
|
|
114
|
+
'volumeMounts': [{'name': 'code', 'mountPath': '/data'}]
|
|
115
|
+
}
|
|
116
|
+
],
|
|
117
|
+
'volumes': [
|
|
118
|
+
{'name': 'code', 'persistentVolumeClaim': {'claimName': pvc_name}}
|
|
119
|
+
]
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
try {
|
|
123
|
+
core_v1.create_namespaced_pod(namespace, pod_body);
|
|
124
|
+
} except ApiException as exc {
|
|
125
|
+
if (exc.status == 409) {
|
|
126
|
+
core_v1.delete_namespaced_pod(sync_pod_name, namespace);
|
|
127
|
+
wait_for_pod_deletion(core_v1, namespace, sync_pod_name);
|
|
128
|
+
core_v1.create_namespaced_pod(namespace, pod_body);
|
|
129
|
+
} else {
|
|
130
|
+
raise ;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
wait_for_pod_phase(core_v1, namespace, sync_pod_name, {'Running'});
|
|
134
|
+
with tempfile.NamedTemporaryFile(suffix='.tar.gz', delete=False) as temp_tar {
|
|
135
|
+
temp_tar_path = Path(temp_tar.name);
|
|
136
|
+
}
|
|
137
|
+
try {
|
|
138
|
+
create_tarball(code_folder, str(temp_tar_path));
|
|
139
|
+
run_kubectl_command(
|
|
140
|
+
[
|
|
141
|
+
'exec',
|
|
142
|
+
'-n',
|
|
143
|
+
namespace,
|
|
144
|
+
sync_pod_name,
|
|
145
|
+
'--',
|
|
146
|
+
'sh',
|
|
147
|
+
'-c',
|
|
148
|
+
'rm -rf /data/* && mkdir -p /data/workspace'
|
|
149
|
+
]
|
|
150
|
+
);
|
|
151
|
+
run_kubectl_command(
|
|
152
|
+
[
|
|
153
|
+
'cp',
|
|
154
|
+
'-n',
|
|
155
|
+
namespace,
|
|
156
|
+
temp_tar_path.name,
|
|
157
|
+
f"{sync_pod_name}:/tmp/jaseci-code.tar.gz"
|
|
158
|
+
],
|
|
159
|
+
cwd=temp_tar_path.parent
|
|
160
|
+
);
|
|
161
|
+
run_kubectl_command(
|
|
162
|
+
[
|
|
163
|
+
'exec',
|
|
164
|
+
'-n',
|
|
165
|
+
namespace,
|
|
166
|
+
sync_pod_name,
|
|
167
|
+
'--',
|
|
168
|
+
'sh',
|
|
169
|
+
'-c',
|
|
170
|
+
'tar -xzf /tmp/jaseci-code.tar.gz -C /data/workspace && rm -f /tmp/jaseci-code.tar.gz'
|
|
171
|
+
]
|
|
172
|
+
);
|
|
173
|
+
|
|
174
|
+
if os.path.isfile(same_directory_toml_path) {
|
|
175
|
+
run_kubectl_command(
|
|
176
|
+
[
|
|
177
|
+
'cp',
|
|
178
|
+
'-n',
|
|
179
|
+
namespace,
|
|
180
|
+
"jac.toml",
|
|
181
|
+
f"{sync_pod_name}:/data/workspace/jac.toml"
|
|
182
|
+
],
|
|
183
|
+
cwd=code_folder
|
|
184
|
+
);
|
|
185
|
+
} else {
|
|
186
|
+
print("jac.toml file not found");
|
|
187
|
+
}
|
|
188
|
+
} finally {
|
|
189
|
+
temp_tar_path.unlink(missing_ok=True);
|
|
190
|
+
try {
|
|
191
|
+
core_v1.delete_namespaced_pod(sync_pod_name, namespace);
|
|
192
|
+
wait_for_pod_deletion(core_v1, namespace, sync_pod_name);
|
|
193
|
+
} except ApiException as exc {
|
|
194
|
+
if (exc.status != 404) {
|
|
195
|
+
raise ;
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
"""Execute a kubectl command and surface useful error details."""
|
|
202
|
+
impl run_kubectl_command(args: list[str], cwd: str | Path | None = None) -> None {
|
|
203
|
+
import shutil;
|
|
204
|
+
import subprocess;
|
|
205
|
+
if (shutil.which('kubectl') is None) {
|
|
206
|
+
raise RuntimeError('kubectl is required to sync code to the PVC.') ;
|
|
207
|
+
}
|
|
208
|
+
try {
|
|
209
|
+
subprocess.run(
|
|
210
|
+
['kubectl', *args], check=True, text=True, cwd=str(cwd) if cwd else None
|
|
211
|
+
);
|
|
212
|
+
} except subprocess.CalledProcessError as exc {
|
|
213
|
+
raise exc from RuntimeError(
|
|
214
|
+
f"kubectl command failed: {' '.join(['kubectl', *args])}"
|
|
215
|
+
) ;
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
"""Block until the pod disappears from the API."""
|
|
220
|
+
impl wait_for_pod_deletion(
|
|
221
|
+
core_v1: client.CoreV1Api, namespace: str, pod_name: str, timeout: int = 120
|
|
222
|
+
) -> None {
|
|
223
|
+
start_time = time.time();
|
|
224
|
+
while ((time.time() - start_time) < timeout) {
|
|
225
|
+
try {
|
|
226
|
+
core_v1.read_namespaced_pod(pod_name, namespace);
|
|
227
|
+
} except ApiException as exc {
|
|
228
|
+
if (exc.status == 404) {
|
|
229
|
+
return;
|
|
230
|
+
}
|
|
231
|
+
raise ;
|
|
232
|
+
}
|
|
233
|
+
time.sleep(2);
|
|
234
|
+
}
|
|
235
|
+
raise TimeoutError(f"Timed out waiting for pod '{pod_name}' deletion.") ;
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
"""Poll the pod until it reaches one of the desired phases."""
|
|
239
|
+
impl wait_for_pod_phase(
|
|
240
|
+
core_v1: client.CoreV1Api,
|
|
241
|
+
namespace: str,
|
|
242
|
+
pod_name: str,
|
|
243
|
+
target_phases: set[str],
|
|
244
|
+
timeout: int = 180
|
|
245
|
+
) -> None {
|
|
246
|
+
start_time = time.time();
|
|
247
|
+
while ((time.time() - start_time) < timeout) {
|
|
248
|
+
try {
|
|
249
|
+
pod = core_v1.read_namespaced_pod(pod_name, namespace);
|
|
250
|
+
} except ApiException as exc {
|
|
251
|
+
if (exc.status == 404) {
|
|
252
|
+
time.sleep(2);
|
|
253
|
+
continue;
|
|
254
|
+
}
|
|
255
|
+
raise ;
|
|
256
|
+
}
|
|
257
|
+
phase = (pod.status.phase or '').strip();
|
|
258
|
+
if (phase in target_phases) {
|
|
259
|
+
return;
|
|
260
|
+
}
|
|
261
|
+
if (phase == 'Failed') {
|
|
262
|
+
raise RuntimeError(f"Sync pod '{pod_name}' entered Failed state.") ;
|
|
263
|
+
}
|
|
264
|
+
time.sleep(2);
|
|
265
|
+
}
|
|
266
|
+
raise TimeoutError(
|
|
267
|
+
f"Timed out while waiting for pod '{pod_name}' to reach phase {target_phases}."
|
|
268
|
+
) ;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
"""Create a PersistentVolumeClaim if it does not already exist."""
|
|
272
|
+
impl ensure_pvc_exists(
|
|
273
|
+
core_v1: client.CoreV1Api,
|
|
274
|
+
namespace: str,
|
|
275
|
+
pvc_name: str,
|
|
276
|
+
storage_size: str,
|
|
277
|
+
storage_class: (str | None) = None,
|
|
278
|
+
access_mode: str = 'ReadWriteOnce'
|
|
279
|
+
) -> None {
|
|
280
|
+
try {
|
|
281
|
+
core_v1.read_namespaced_persistent_volume_claim(pvc_name, namespace);
|
|
282
|
+
return;
|
|
283
|
+
} except ApiException as exc {
|
|
284
|
+
if (exc.status != 404) {
|
|
285
|
+
raise ;
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
import from typing { Any }
|
|
289
|
+
pvc_body: dict[(str, Any)] = {
|
|
290
|
+
'apiVersion': 'v1',
|
|
291
|
+
'kind': 'PersistentVolumeClaim',
|
|
292
|
+
'metadata': {'name': pvc_name},
|
|
293
|
+
'spec': {
|
|
294
|
+
'accessModes': [access_mode],
|
|
295
|
+
'resources': {'requests': {'storage': storage_size}}
|
|
296
|
+
}
|
|
297
|
+
};
|
|
298
|
+
if storage_class {
|
|
299
|
+
pvc_body['spec']['storageClassName'] = storage_class;
|
|
300
|
+
}
|
|
301
|
+
core_v1.create_namespaced_persistent_volume_claim(namespace, pvc_body);
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
"""
|
|
305
|
+
Detect if the current connected cluster is AWS or local.
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
'aws' for AWS EKS clusters, 'local' for local/other clusters.
|
|
309
|
+
"""
|
|
310
|
+
impl cluster_type -> str {
|
|
311
|
+
try {
|
|
312
|
+
import from kubernetes { client }
|
|
313
|
+
v1 = client.CoreV1Api();
|
|
314
|
+
nodes = v1.list_node();
|
|
315
|
+
if not nodes.items {
|
|
316
|
+
return 'local';
|
|
317
|
+
}
|
|
318
|
+
for <>node in nodes.items {
|
|
319
|
+
provider_id = <>node.spec.provider_id or '';
|
|
320
|
+
if provider_id.startswith('aws://') {
|
|
321
|
+
return 'aws';
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
for <>node in nodes.items {
|
|
325
|
+
labels = <>node.metadata.labels or {};
|
|
326
|
+
if (
|
|
327
|
+
('topology.kubernetes.io/region' in labels)
|
|
328
|
+
and <>any(
|
|
329
|
+
(region in labels.get('topology.kubernetes.io/region', ''))
|
|
330
|
+
for region in ['us-', 'eu-', 'ap-', 'ca-', 'sa-']
|
|
331
|
+
)
|
|
332
|
+
and (
|
|
333
|
+
('karpenter.sh/provisioner-name' in labels)
|
|
334
|
+
or (
|
|
335
|
+
('kubernetes.io/os' in labels)
|
|
336
|
+
and ('node.kubernetes.io/instance-type' in labels)
|
|
337
|
+
)
|
|
338
|
+
)
|
|
339
|
+
) {
|
|
340
|
+
return 'aws';
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
return 'local';
|
|
344
|
+
} except Exception {
|
|
345
|
+
return 'local';
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
"""
|
|
350
|
+
Wait for a service to become available.
|
|
351
|
+
|
|
352
|
+
Can work with either NodePort (localhost:node_port) or NLB (AWS Load Balancer URL).
|
|
353
|
+
If nlb_url is provided, it will be used. Otherwise, localhost:node_port will be used.
|
|
354
|
+
"""
|
|
355
|
+
impl check_deployment_status(
|
|
356
|
+
node_port: int,
|
|
357
|
+
path: str = '/docs',
|
|
358
|
+
interval: int = 15,
|
|
359
|
+
max_retries: int = 30,
|
|
360
|
+
nlb_url: (str | None) = None
|
|
361
|
+
) -> bool {
|
|
362
|
+
if nlb_url {
|
|
363
|
+
url = f"{nlb_url.rstrip('/')}{path}";
|
|
364
|
+
} else {
|
|
365
|
+
url = f"http://localhost:{node_port}{path}";
|
|
366
|
+
}
|
|
367
|
+
for attempt in range(1, (max_retries + 1)) {
|
|
368
|
+
try {
|
|
369
|
+
response = requests.get(url, timeout=10);
|
|
370
|
+
if (response.status_code == 200) {
|
|
371
|
+
print(f"Service is available at: {url}");
|
|
372
|
+
return True;
|
|
373
|
+
}
|
|
374
|
+
} except RequestException as e {
|
|
375
|
+
if (attempt == max_retries) {
|
|
376
|
+
print(f"Failed to connect to {url}: {e}");
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
if (attempt < max_retries) {
|
|
380
|
+
time.sleep(interval);
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
return False;
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
"""Create or update ConfigMap with binary tar.gz using Kubernetes API."""
|
|
387
|
+
impl create_or_update_configmap(
|
|
388
|
+
namespace: str, configmap_name: str, tar_path: str
|
|
389
|
+
) -> None {
|
|
390
|
+
config.load_kube_config();
|
|
391
|
+
v1 = client.CoreV1Api();
|
|
392
|
+
with open(tar_path, 'rb') as f {
|
|
393
|
+
encoded_data = base64.b64encode(f.read()).decode('utf-8');
|
|
394
|
+
}
|
|
395
|
+
body = client.V1ConfigMap(
|
|
396
|
+
metadata=client.V1ObjectMeta(name=configmap_name),
|
|
397
|
+
binary_data={'jaseci-code.tar.gz': encoded_data}
|
|
398
|
+
);
|
|
399
|
+
try {
|
|
400
|
+
existing = v1.read_namespaced_config_map(configmap_name, namespace);
|
|
401
|
+
body.metadata.resource_version = existing.metadata.resource_version;
|
|
402
|
+
v1.patch_namespaced_config_map(
|
|
403
|
+
name=configmap_name, namespace=namespace, body=body
|
|
404
|
+
);
|
|
405
|
+
} except ApiException as e {
|
|
406
|
+
if (e.status == 404) {
|
|
407
|
+
v1.create_namespaced_config_map(namespace, body);
|
|
408
|
+
} else {
|
|
409
|
+
raise ;
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
"""
|
|
415
|
+
Create a tar.gz file from the source directory using only os module.
|
|
416
|
+
"""
|
|
417
|
+
impl create_tarball(source_dir: str, tar_path: str) -> None {
|
|
418
|
+
if not os.path.exists(source_dir) {
|
|
419
|
+
raise FileNotFoundError(f"Source directory not found: {source_dir}") ;
|
|
420
|
+
}
|
|
421
|
+
os.makedirs((os.path.dirname(tar_path) or '.'), exist_ok=True);
|
|
422
|
+
with tarfile.open(tar_path, 'w:gz') as tar {
|
|
423
|
+
tar.add(source_dir, arcname='.');
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
"""
|
|
428
|
+
Ensure that a given namespace exists in the Kubernetes cluster.
|
|
429
|
+
If it doesn't exist and is not 'default', it will be created.
|
|
430
|
+
"""
|
|
431
|
+
impl ensure_namespace_exists(namespace: str) -> None {
|
|
432
|
+
if (namespace == 'default') {
|
|
433
|
+
return;
|
|
434
|
+
}
|
|
435
|
+
try {
|
|
436
|
+
config.load_kube_config();
|
|
437
|
+
core_v1 = client.CoreV1Api();
|
|
438
|
+
core_v1.read_namespace(name=namespace);
|
|
439
|
+
print(f"Namespace '{namespace}' already exists.");
|
|
440
|
+
} except ApiException as e {
|
|
441
|
+
if (e.status == 404) {
|
|
442
|
+
print(f"Namespace '{namespace}' not found. Creating it...");
|
|
443
|
+
core_v1.create_namespace(
|
|
444
|
+
body={
|
|
445
|
+
'apiVersion': 'v1',
|
|
446
|
+
'kind': 'Namespace',
|
|
447
|
+
'metadata': {'name': namespace}
|
|
448
|
+
}
|
|
449
|
+
);
|
|
450
|
+
print(f"Namespace '{namespace}' created successfully.");
|
|
451
|
+
} else {
|
|
452
|
+
raise ;
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
"""Deploy example."""
|
|
458
|
+
impl delete_if_exists(
|
|
459
|
+
delete_func: Callable, name: str, namespace: str, kind: str
|
|
460
|
+
) -> None {
|
|
461
|
+
try {
|
|
462
|
+
delete_func(name, namespace);
|
|
463
|
+
} except ApiException as e {
|
|
464
|
+
if (e.status == 404) {
|
|
465
|
+
;
|
|
466
|
+
} else {
|
|
467
|
+
raise ;
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
"""
|
|
473
|
+
Checks if Kubernetes config is configured and the K8s API server is reachable.
|
|
474
|
+
"""
|
|
475
|
+
impl check_K8s_status -> None {
|
|
476
|
+
try {
|
|
477
|
+
config.load_kube_config();
|
|
478
|
+
} except ConfigException {
|
|
479
|
+
try {
|
|
480
|
+
config.load_incluster_config();
|
|
481
|
+
} except ConfigException {
|
|
482
|
+
raise None from Exception('Kubernetes is not configured on this machine.') ;
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
try {
|
|
486
|
+
v1 = client.CoreV1Api();
|
|
487
|
+
v1.get_api_resources();
|
|
488
|
+
} except (ApiException, urllib3.exceptions.HTTPError, OSError) {
|
|
489
|
+
raise None from Exception(
|
|
490
|
+
'Unable to connect to kubernetes APi.Check whether kubernetes cluster is up'
|
|
491
|
+
) ;
|
|
492
|
+
}
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
"""Load env variables in .env to aws beanstalk environment."""
|
|
496
|
+
impl load_env_variables(code_folder: str) -> list {
|
|
497
|
+
env_file = os.path.join(code_folder, '.env');
|
|
498
|
+
env_vars = dotenv_values(env_file);
|
|
499
|
+
env_list = [];
|
|
500
|
+
if os.path.exists(env_file) {
|
|
501
|
+
for (key, value) in env_vars.items() {
|
|
502
|
+
env_list.append({'name': key, 'value': value});
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
return env_list;
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
"""
|
|
509
|
+
Print a statement only if debug_only is True.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
statement (str): The message to print.
|
|
513
|
+
debug_only (bool): If True, print the statement; otherwise, do nothing.
|
|
514
|
+
"""
|
|
515
|
+
impl debug_print(statement: str, debug_only: bool = False) -> None {
|
|
516
|
+
if debug_only {
|
|
517
|
+
print(statement);
|
|
518
|
+
}
|
|
519
|
+
}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""Kubernetes utility functions for deployment operations."""
|
|
2
|
+
import base64;
|
|
3
|
+
import os;
|
|
4
|
+
import tarfile;
|
|
5
|
+
import time;
|
|
6
|
+
import from collections.abc { Callable }
|
|
7
|
+
import from pathlib { Path }
|
|
8
|
+
import requests;
|
|
9
|
+
import urllib3;
|
|
10
|
+
import from dotenv { dotenv_values }
|
|
11
|
+
import from kubernetes { client, config }
|
|
12
|
+
import from kubernetes.client.rest { ApiException }
|
|
13
|
+
import from kubernetes.config.config_exception { ConfigException }
|
|
14
|
+
import from requests.exceptions { RequestException }
|
|
15
|
+
|
|
16
|
+
def debug_print(statement: str, debug_only: bool = False) -> None;
|
|
17
|
+
|
|
18
|
+
def parse_cpu_quantity(quantity: str) -> float;
|
|
19
|
+
|
|
20
|
+
def parse_memory_quantity(quantity: str) -> float;
|
|
21
|
+
|
|
22
|
+
def validate_resource_limits(
|
|
23
|
+
cpu_request: (str | None),
|
|
24
|
+
cpu_limit: (str | None),
|
|
25
|
+
memory_request: (str | None),
|
|
26
|
+
memory_limit: (str | None)
|
|
27
|
+
) -> None;
|
|
28
|
+
|
|
29
|
+
def load_env_variables(code_folder: str) -> list;
|
|
30
|
+
|
|
31
|
+
def check_K8s_status -> None;
|
|
32
|
+
|
|
33
|
+
def delete_if_exists(
|
|
34
|
+
delete_func: Callable, name: str, namespace: str, kind: str
|
|
35
|
+
) -> None;
|
|
36
|
+
|
|
37
|
+
def ensure_namespace_exists(namespace: str) -> None;
|
|
38
|
+
|
|
39
|
+
def create_tarball(source_dir: str, tar_path: str) -> None;
|
|
40
|
+
|
|
41
|
+
def create_or_update_configmap(
|
|
42
|
+
namespace: str, configmap_name: str, tar_path: str
|
|
43
|
+
) -> None;
|
|
44
|
+
|
|
45
|
+
def check_deployment_status(
|
|
46
|
+
node_port: int,
|
|
47
|
+
path: str = '/docs',
|
|
48
|
+
interval: int = 15,
|
|
49
|
+
max_retries: int = 30,
|
|
50
|
+
nlb_url: (str | None) = None
|
|
51
|
+
) -> bool;
|
|
52
|
+
|
|
53
|
+
def cluster_type -> str;
|
|
54
|
+
|
|
55
|
+
def ensure_pvc_exists(
|
|
56
|
+
core_v1: client.CoreV1Api,
|
|
57
|
+
namespace: str,
|
|
58
|
+
pvc_name: str,
|
|
59
|
+
storage_size: str,
|
|
60
|
+
storage_class: (str | None) = None,
|
|
61
|
+
access_mode: str = 'ReadWriteOnce'
|
|
62
|
+
) -> None;
|
|
63
|
+
|
|
64
|
+
def wait_for_pod_phase(
|
|
65
|
+
core_v1: client.CoreV1Api,
|
|
66
|
+
namespace: str,
|
|
67
|
+
pod_name: str,
|
|
68
|
+
target_phases: set[str],
|
|
69
|
+
timeout: int = 180
|
|
70
|
+
) -> None;
|
|
71
|
+
|
|
72
|
+
def wait_for_pod_deletion(
|
|
73
|
+
core_v1: client.CoreV1Api, namespace: str, pod_name: str, timeout: int = 120
|
|
74
|
+
) -> None;
|
|
75
|
+
|
|
76
|
+
def run_kubectl_command(args: list[str], cwd: str | Path | None = None) -> None;
|
|
77
|
+
|
|
78
|
+
def sync_code_to_pvc(
|
|
79
|
+
core_v1: client.CoreV1Api,
|
|
80
|
+
namespace: str,
|
|
81
|
+
pvc_name: str,
|
|
82
|
+
code_folder: str,
|
|
83
|
+
app_name: str,
|
|
84
|
+
sync_image: str
|
|
85
|
+
) -> None;
|
|
File without changes
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Pytest configuration for jac-scale tests."""
|
|
2
|
+
|
|
3
|
+
import contextlib
|
|
4
|
+
import glob
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
import pytest
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _remove_anchor_store_files() -> None:
|
|
11
|
+
"""Remove anchor_store.db files created by ShelfDB."""
|
|
12
|
+
for pattern in [
|
|
13
|
+
"anchor_store.db.dat",
|
|
14
|
+
"anchor_store.db.bak",
|
|
15
|
+
"anchor_store.db.dir",
|
|
16
|
+
]:
|
|
17
|
+
for file in glob.glob(pattern):
|
|
18
|
+
with contextlib.suppress(Exception):
|
|
19
|
+
Path(file).unlink()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def pytest_sessionstart(session: pytest.Session) -> None:
|
|
23
|
+
"""Clean up anchor_store.db files at the start of the test session."""
|
|
24
|
+
_remove_anchor_store_files()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def pytest_sessionfinish(session: pytest.Session, exitstatus: int) -> None:
|
|
28
|
+
"""Clean up anchor_store.db files at the end of the test session."""
|
|
29
|
+
_remove_anchor_store_files()
|