jac-scale 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. jac_scale/__init__.py +0 -0
  2. jac_scale/abstractions/config/app_config.jac +30 -0
  3. jac_scale/abstractions/config/base_config.jac +26 -0
  4. jac_scale/abstractions/database_provider.jac +51 -0
  5. jac_scale/abstractions/deployment_target.jac +64 -0
  6. jac_scale/abstractions/image_registry.jac +54 -0
  7. jac_scale/abstractions/logger.jac +20 -0
  8. jac_scale/abstractions/models/deployment_result.jac +27 -0
  9. jac_scale/abstractions/models/resource_status.jac +38 -0
  10. jac_scale/config_loader.jac +31 -0
  11. jac_scale/context.jac +14 -0
  12. jac_scale/factories/database_factory.jac +43 -0
  13. jac_scale/factories/deployment_factory.jac +43 -0
  14. jac_scale/factories/registry_factory.jac +32 -0
  15. jac_scale/factories/utility_factory.jac +34 -0
  16. jac_scale/impl/config_loader.impl.jac +131 -0
  17. jac_scale/impl/context.impl.jac +24 -0
  18. jac_scale/impl/memory_hierarchy.main.impl.jac +63 -0
  19. jac_scale/impl/memory_hierarchy.mongo.impl.jac +239 -0
  20. jac_scale/impl/memory_hierarchy.redis.impl.jac +186 -0
  21. jac_scale/impl/serve.impl.jac +1785 -0
  22. jac_scale/jserver/__init__.py +0 -0
  23. jac_scale/jserver/impl/jfast_api.impl.jac +731 -0
  24. jac_scale/jserver/impl/jserver.impl.jac +79 -0
  25. jac_scale/jserver/jfast_api.jac +162 -0
  26. jac_scale/jserver/jserver.jac +101 -0
  27. jac_scale/memory_hierarchy.jac +138 -0
  28. jac_scale/plugin.jac +218 -0
  29. jac_scale/plugin_config.jac +175 -0
  30. jac_scale/providers/database/kubernetes_mongo.jac +137 -0
  31. jac_scale/providers/database/kubernetes_redis.jac +110 -0
  32. jac_scale/providers/registry/dockerhub.jac +64 -0
  33. jac_scale/serve.jac +118 -0
  34. jac_scale/targets/kubernetes/kubernetes_config.jac +215 -0
  35. jac_scale/targets/kubernetes/kubernetes_target.jac +841 -0
  36. jac_scale/targets/kubernetes/utils/kubernetes_utils.impl.jac +519 -0
  37. jac_scale/targets/kubernetes/utils/kubernetes_utils.jac +85 -0
  38. jac_scale/tests/__init__.py +0 -0
  39. jac_scale/tests/conftest.py +29 -0
  40. jac_scale/tests/fixtures/test_api.jac +159 -0
  41. jac_scale/tests/fixtures/todo_app.jac +68 -0
  42. jac_scale/tests/test_abstractions.py +88 -0
  43. jac_scale/tests/test_deploy_k8s.py +265 -0
  44. jac_scale/tests/test_examples.py +484 -0
  45. jac_scale/tests/test_factories.py +149 -0
  46. jac_scale/tests/test_file_upload.py +444 -0
  47. jac_scale/tests/test_k8s_utils.py +156 -0
  48. jac_scale/tests/test_memory_hierarchy.py +247 -0
  49. jac_scale/tests/test_serve.py +1835 -0
  50. jac_scale/tests/test_sso.py +711 -0
  51. jac_scale/utilities/loggers/standard_logger.jac +40 -0
  52. jac_scale/utils.jac +16 -0
  53. jac_scale-0.1.1.dist-info/METADATA +658 -0
  54. jac_scale-0.1.1.dist-info/RECORD +57 -0
  55. jac_scale-0.1.1.dist-info/WHEEL +5 -0
  56. jac_scale-0.1.1.dist-info/entry_points.txt +3 -0
  57. jac_scale-0.1.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,841 @@
1
+ """Kubernetes deployment target implementation."""
2
+ import os;
3
+ import from typing { Any }
4
+ import from jac_scale.abstractions.deployment_target { DeploymentTarget }
5
+ import from jac_scale.abstractions.config.app_config { AppConfig }
6
+ import from jac_scale.abstractions.models.deployment_result { DeploymentResult }
7
+ import from jac_scale.abstractions.models.resource_status {
8
+ ResourceStatus,
9
+ ResourceStatusInfo
10
+ }
11
+ import from jac_scale.abstractions.logger { Logger }
12
+ import from jac_scale.targets.kubernetes.kubernetes_config { KubernetesConfig }
13
+ import from jac_scale.targets.kubernetes.utils.kubernetes_utils {
14
+ check_deployment_status,
15
+ cluster_type,
16
+ delete_if_exists,
17
+ check_K8s_status,
18
+ ensure_namespace_exists,
19
+ ensure_pvc_exists,
20
+ load_env_variables,
21
+ validate_resource_limits,
22
+ sync_code_to_pvc
23
+ }
24
+ import from jac_scale.providers.registry.dockerhub { DockerHubRegistry }
25
+ import from jac_scale.providers.database.kubernetes_mongo { KubernetesMongoProvider }
26
+ import from jac_scale.providers.database.kubernetes_redis { KubernetesRedisProvider }
27
+ import from jac_scale.factories.database_factory { DatabaseProviderFactory }
28
+ import from jac_scale.factories.registry_factory { ImageRegistryFactory }
29
+ import time;
30
+ import from kubernetes { client, config as k8s_config }
31
+ import from kubernetes.client.exceptions { ApiException }
32
+
33
+ """Kubernetes deployment target implementation."""
34
+ class KubernetesTarget(DeploymentTarget) {
35
+ has k8s_config: KubernetesConfig;
36
+
37
+ def init(
38
+ self: KubernetesTarget,
39
+ config: KubernetesConfig,
40
+ logger: (Logger | None) = None
41
+ ) -> None {
42
+ self.config = config;
43
+ self.k8s_config = config;
44
+ self.env_list = [];
45
+ if logger {
46
+ self.logger = logger;
47
+ }
48
+ }
49
+
50
+ """Get init containers from all enabled database providers."""
51
+ def _get_database_init_containers(
52
+ self: KubernetesTarget, app_name: str
53
+ ) -> list[dict[(str, Any)]] {
54
+ init_containers = [];
55
+ wait_image = self.k8s_config.wait_image;
56
+
57
+ if self.k8s_config.mongodb_enabled {
58
+ mongo_provider = DatabaseProviderFactory.create(
59
+ 'kubernetes_mongo', self, {'app_name': app_name}
60
+ );
61
+ init_container = mongo_provider.get_init_container(app_name, wait_image);
62
+ if init_container {
63
+ init_containers.append(init_container);
64
+ }
65
+ }
66
+
67
+ if self.k8s_config.redis_enabled {
68
+ redis_provider = DatabaseProviderFactory.create(
69
+ 'kubernetes_redis', self, {'app_name': app_name}
70
+ );
71
+ init_container = redis_provider.get_init_container(app_name, wait_image);
72
+ if init_container {
73
+ init_containers.append(init_container);
74
+ }
75
+ }
76
+
77
+ return init_containers;
78
+ }
79
+
80
+ """Build resource requests and limits configuration."""
81
+ def _build_resource_config(self: KubernetesTarget) -> dict[(str, Any)] {
82
+ resource_requests: dict[(str, str)] = {};
83
+ if self.k8s_config.cpu_request {
84
+ resource_requests['cpu'] = self.k8s_config.cpu_request;
85
+ }
86
+ if self.k8s_config.memory_request {
87
+ resource_requests['memory'] = self.k8s_config.memory_request;
88
+ }
89
+
90
+ resource_limits: dict[(str, str)] = {};
91
+ if self.k8s_config.cpu_limit {
92
+ resource_limits['cpu'] = self.k8s_config.cpu_limit;
93
+ }
94
+ if self.k8s_config.memory_limit {
95
+ resource_limits['memory'] = self.k8s_config.memory_limit;
96
+ }
97
+
98
+ resources: dict[(str, dict)] = {};
99
+ if resource_requests {
100
+ resources['requests'] = resource_requests;
101
+ }
102
+ if resource_limits {
103
+ resources['limits'] = resource_limits;
104
+ }
105
+
106
+ return resources;
107
+ }
108
+
109
+ """Build readiness and liveness probe configurations."""
110
+ def _build_probe_config(
111
+ self: KubernetesTarget, health_check_path: str
112
+ ) -> dict[(str, Any)] {
113
+ probe_config = {
114
+ 'httpGet': {
115
+ 'path': health_check_path,
116
+ 'port': self.k8s_config.container_port,
117
+ 'scheme': 'HTTP'
118
+ },
119
+ 'initialDelaySeconds': self.k8s_config.readiness_initial_delay,
120
+ 'periodSeconds': self.k8s_config.readiness_period
121
+ };
122
+
123
+ liveness_probe = dict(probe_config);
124
+ liveness_probe['initialDelaySeconds'] = self.k8s_config.liveness_initial_delay;
125
+ liveness_probe['periodSeconds'] = self.k8s_config.liveness_period;
126
+ liveness_probe['failureThreshold'] = self.k8s_config.liveness_failure_threshold;
127
+
128
+ return {'readiness': probe_config, 'liveness': liveness_probe};
129
+ }
130
+
131
+ """Build Kubernetes service configuration."""
132
+ def _build_service_config(
133
+ self: KubernetesTarget,
134
+ app_name: str,
135
+ is_aws: bool,
136
+ health_check_path: str,
137
+ service_type: str
138
+ ) -> dict[(str, Any)] {
139
+ service_port = 80 if is_aws else self.k8s_config.container_port;
140
+ port_config: dict[(str, Any)] = {
141
+ 'protocol': 'TCP',
142
+ 'port': service_port,
143
+ 'targetPort': self.k8s_config.container_port
144
+ };
145
+ if not is_aws {
146
+ port_config['nodePort'] = self.k8s_config.node_port;
147
+ }
148
+
149
+ service_metadata: dict[(str, Any)] = {
150
+ 'name': f"{app_name}-service",
151
+ 'namespace': self.k8s_config.namespace,
152
+ 'labels': {'app': app_name}
153
+ };
154
+
155
+ if is_aws {
156
+ service_metadata['annotations'] = {
157
+ 'service.beta.kubernetes.io/aws-load-balancer-type': 'nlb',
158
+ 'service.beta.kubernetes.io/aws-load-balancer-scheme': 'internet-facing',
159
+ 'service.beta.kubernetes.io/aws-load-balancer-healthcheck-path': health_check_path
160
+ };
161
+ }
162
+
163
+ service = {
164
+ 'apiVersion': 'v1',
165
+ 'kind': 'Service',
166
+ 'metadata': service_metadata,
167
+ 'spec': {
168
+ 'type': service_type,
169
+ 'ports': [port_config],
170
+ 'selector': {'app': app_name}
171
+ }
172
+ };
173
+
174
+ return service;
175
+ }
176
+
177
+ """Build the bash command for setting up Jaseci runtime environment.
178
+
179
+ This replaces the hard-coded command with a configurable version.
180
+ """
181
+ def _build_runtime_setup_command(
182
+ self: KubernetesTarget, app_config: AppConfig
183
+ ) -> list[str] {
184
+ config = self.k8s_config;
185
+ commands = [];
186
+
187
+ # Base setup
188
+ commands.append('export DEBIAN_FRONTEND=noninteractive');
189
+ commands.append('apt-get update');
190
+
191
+ # Install base packages
192
+ base_packages = ['git', 'npm', 'nodejs'];
193
+ if config.additional_packages {
194
+ base_packages.extend(config.additional_packages);
195
+ }
196
+ commands.append(f"apt-get install -y {' '.join(base_packages)}");
197
+
198
+ # Clone and setup Jaseci if enabled
199
+ if config.install_jaseci {
200
+ commands.append('rm -rf jaseci');
201
+ # Clone repository
202
+ clone_cmd = f"git clone --branch {config.jaseci_branch} --single-branch {config.jaseci_repo_url}";
203
+ commands.append(clone_cmd);
204
+ commands.append('cd ./jaseci');
205
+ # Checkout specific commit if provided
206
+ if config.jaseci_commit {
207
+ commands.append(f"git checkout {config.jaseci_commit}");
208
+ }
209
+ # Update submodules
210
+ commands.append('git submodule update --init --recursive');
211
+ # Setup virtual environment
212
+ commands.append('python -m venv venv');
213
+ commands.append('source venv/bin/activate');
214
+ # Install Jaseci components
215
+ commands.append('pip install pluggy');
216
+ commands.append('pip install -e ./jac');
217
+ commands.append('pip install -e ./jac-scale');
218
+ commands.append('pip install -e ./jac-client');
219
+ commands.append('pip install -e ./jac-byllm');
220
+ commands.append('cd ..');
221
+ }
222
+
223
+ # Change to app directory (project is already copied there via volume mount)
224
+ commands.append(f"cd {config.app_mount_path}");
225
+
226
+ # Install Python requirements
227
+ commands.append('jac add');
228
+
229
+ # Install Node requirements
230
+ commands.append('jac add --npm');
231
+
232
+ # Start the application
233
+ commands.append(f"jac start {app_config.file_name}");
234
+
235
+ # Join all commands
236
+ full_command = ' && '.join(commands);
237
+
238
+ return ['bash', '-c', full_command];
239
+ }
240
+
241
+ """Build container configuration."""
242
+ def _build_container_config(
243
+ self: KubernetesTarget,
244
+ app_name: str,
245
+ image: str,
246
+ probe_configs: dict[(str, Any)],
247
+ resources_config: dict[(str, Any)],
248
+ command: (list[str] | None) = None,
249
+ working_dir: (str | None) = None,
250
+ volume_mounts: (list[dict[(str, Any)]] | None) = None
251
+ ) -> dict[(str, Any)] {
252
+ container_config = {
253
+ 'name': app_name,
254
+ 'image': image,
255
+ 'ports': [{'containerPort': self.k8s_config.container_port}],
256
+ 'env': self.env_list,
257
+ 'readinessProbe': probe_configs['readiness'],
258
+ 'livenessProbe': probe_configs['liveness']
259
+ };
260
+
261
+ if command {
262
+ container_config['command'] = command;
263
+ }
264
+ if working_dir {
265
+ container_config['workingDir'] = working_dir;
266
+ }
267
+ if volume_mounts {
268
+ container_config['volumeMounts'] = volume_mounts;
269
+ }
270
+ if resources_config {
271
+ container_config['resources'] = resources_config;
272
+ }
273
+
274
+ return container_config;
275
+ }
276
+
277
+ """Build volumes and build container configuration for code sync."""
278
+ def _build_volumes_config(
279
+ self: KubernetesTarget,
280
+ app_name: str,
281
+ app_config: AppConfig,
282
+ namespace: str,
283
+ core_v1: Any
284
+ ) -> dict[str, Any] {
285
+ pvc_name = f"{app_name}-code-pvc";
286
+ pvc_size = self.k8s_config.pvc_size;
287
+ sync_image = self.k8s_config.busybox_image;
288
+
289
+ ensure_pvc_exists(core_v1, namespace, pvc_name, pvc_size);
290
+ if self.logger {
291
+ self.logger.info('Syncing application code to PVC');
292
+ }
293
+ sync_code_to_pvc(
294
+ core_v1, namespace, pvc_name, app_config.code_folder, app_name, sync_image
295
+ );
296
+
297
+ build_container = {
298
+ 'name': 'build-app',
299
+ 'image': self.k8s_config.python_image,
300
+ 'command': [
301
+ 'sh',
302
+ '-c',
303
+ f"apt-get update && apt-get install -y rsync && mkdir -p {self.k8s_config.app_mount_path} && rm -rf {self.k8s_config.app_mount_path}/* && rsync -av --exclude='.jac' {self.k8s_config.workspace_path}/ {self.k8s_config.app_mount_path}/"
304
+ ],
305
+ 'volumeMounts': [
306
+ {'name': 'app-code', 'mountPath': self.k8s_config.app_mount_path},
307
+ {'name': 'code-source', 'mountPath': self.k8s_config.code_mount_path}
308
+ ]
309
+ };
310
+
311
+ volumes = [
312
+ {'name': 'app-code', 'emptyDir': {}},
313
+ {'name': 'code-source', 'persistentVolumeClaim': {'claimName': pvc_name}}
314
+ ];
315
+
316
+ return {'volumes': volumes, 'build_container': build_container};
317
+ }
318
+
319
+ """Deploy all enabled databases."""
320
+ def _deploy_databases(
321
+ self: KubernetesTarget,
322
+ app_name: str,
323
+ namespace: str,
324
+ apps_v1: Any,
325
+ core_v1: Any
326
+ ) -> None {
327
+ if self.k8s_config.mongodb_enabled {
328
+ mongodb_name = f"{app_name}-mongodb";
329
+ mongodb_service_name = f"{mongodb_name}-service";
330
+ mongo_provider = DatabaseProviderFactory.create(
331
+ 'kubernetes_mongo', self, {'app_name': app_name}
332
+ );
333
+ mongo_result = mongo_provider.deploy({});
334
+ self.env_list.append(
335
+ {'name': 'MONGODB_URI', 'value': mongo_result['connection_string']}
336
+ );
337
+ try {
338
+ apps_v1.read_namespaced_stateful_set(
339
+ name=mongodb_name, namespace=namespace
340
+ );
341
+ } except ApiException as e {
342
+ if (e.status == 404) {
343
+ apps_v1.create_namespaced_stateful_set(
344
+ namespace=namespace, body=mongo_result['deployment']
345
+ );
346
+ } else {
347
+ raise ;
348
+ }
349
+ }
350
+ try {
351
+ core_v1.read_namespaced_service(
352
+ name=mongodb_service_name, namespace=namespace
353
+ );
354
+ } except ApiException as e {
355
+ if (e.status == 404) {
356
+ core_v1.create_namespaced_service(
357
+ namespace=namespace, body=mongo_result['service']
358
+ );
359
+ } else {
360
+ raise ;
361
+ }
362
+ }
363
+ }
364
+
365
+ if self.k8s_config.redis_enabled {
366
+ redis_name = f"{app_name}-redis";
367
+ redis_service_name = f"{redis_name}-service";
368
+ redis_provider = DatabaseProviderFactory.create(
369
+ 'kubernetes_redis', self, {'app_name': app_name}
370
+ );
371
+ redis_result = redis_provider.deploy({});
372
+ self.env_list.append(
373
+ {'name': 'REDIS_URL', 'value': redis_result['connection_string']}
374
+ );
375
+ try {
376
+ apps_v1.read_namespaced_deployment(
377
+ name=redis_name, namespace=namespace
378
+ );
379
+ } except ApiException as e {
380
+ if (e.status == 404) {
381
+ apps_v1.create_namespaced_deployment(
382
+ namespace=namespace, body=redis_result['deployment']
383
+ );
384
+ } else {
385
+ raise ;
386
+ }
387
+ }
388
+ try {
389
+ core_v1.read_namespaced_service(
390
+ name=redis_service_name, namespace=namespace
391
+ );
392
+ } except ApiException as e {
393
+ if (e.status == 404) {
394
+ core_v1.create_namespaced_service(
395
+ namespace=namespace, body=redis_result['service']
396
+ );
397
+ } else {
398
+ raise ;
399
+ }
400
+ }
401
+ }
402
+ }
403
+
404
+ """Wait for deployment to be ready and check status."""
405
+ def _wait_for_deployment(
406
+ self: KubernetesTarget,
407
+ app_name: str,
408
+ namespace: str,
409
+ health_check_path: str,
410
+ is_aws: bool
411
+ ) -> None {
412
+ path = health_check_path;
413
+ if is_aws {
414
+ time.sleep(self.k8s_config.aws_nlb_wait);
415
+ nlb_url = None;
416
+ try {
417
+ k8s_config.load_kube_config();
418
+ core_v1 = client.CoreV1Api();
419
+ service_obj = core_v1.read_namespaced_service(
420
+ f"{app_name}-service", namespace
421
+ );
422
+ nlb_ingress = service_obj.status.load_balancer.ingress;
423
+ if (nlb_ingress and (len(nlb_ingress) > 0)) {
424
+ endpoint = nlb_ingress[0].hostname or nlb_ingress[0].ip;
425
+ nlb_url = f"http://{endpoint}";
426
+ } else {
427
+ if self.logger {
428
+ self.logger.info(
429
+ f"NLB is being provisioned. Run 'kubectl get svc {app_name}-service -n {namespace}' to get the endpoint."
430
+ );
431
+ }
432
+ }
433
+ } except Exception as e {
434
+ if self.logger {
435
+ self.logger.warn(f"Could not retrieve NLB endpoint: {e}");
436
+ }
437
+ }
438
+ if (nlb_url) {
439
+ deployment_status = check_deployment_status(
440
+ self.k8s_config.node_port, path, nlb_url=nlb_url
441
+ );
442
+ if not (deployment_status) {
443
+ raise Exception("Deployment failed: Application failed to deploy.") ;
444
+ }
445
+ if self.logger {
446
+ self.logger.info(
447
+ f"Deployment complete! Access Jaseci-app at {nlb_url}{path}"
448
+ );
449
+ }
450
+ }
451
+ } else {
452
+ deployment_status = check_deployment_status(
453
+ self.k8s_config.node_port, path
454
+ );
455
+ if deployment_status is False {
456
+ raise Exception("Deployment failed: Application failed to deploy.") ;
457
+ }
458
+ if self.logger {
459
+ self.logger.info(
460
+ f"Deployment complete! Access Jaseci-app at http://localhost:{self.k8s_config.node_port}{path}"
461
+ );
462
+ }
463
+ }
464
+ }
465
+
466
+ """Handle image registry if build is requested."""
467
+ def _handle_image_registry(
468
+ self: KubernetesTarget, app_name: str, app_config: AppConfig
469
+ ) -> str {
470
+ image_name = self.k8s_config.docker_image_name or f"{app_name}:latest";
471
+ repository_name: str;
472
+ if app_config.build and self.image_registry {
473
+ # Build and push image
474
+ full_image = self.image_registry.build_and_push(
475
+ app_config.code_folder, image_name
476
+ );
477
+ repository_name = full_image;
478
+ } elif app_config.build {
479
+ # Create registry if not set
480
+ registry_config = {
481
+ 'app_name': app_name,
482
+ 'docker_username': self.k8s_config.docker_username,
483
+ 'docker_password': self.k8s_config.docker_password,
484
+ 'docker_image_name': image_name
485
+ };
486
+ image_registry = ImageRegistryFactory.create('dockerhub', registry_config);
487
+ full_image = image_registry.build_and_push(
488
+ app_config.code_folder, image_name
489
+ );
490
+ repository_name = full_image;
491
+ } else {
492
+ repository_name = self.k8s_config.python_image;
493
+ }
494
+ return repository_name;
495
+ }
496
+
497
+ def deploy(self: KubernetesTarget, app_config: AppConfig) -> DeploymentResult {
498
+ # 1. Initialize and validate
499
+ app_name = app_config.app_name or self.k8s_config.app_name;
500
+ namespace = os.getenv('K8s_NAMESPACE') or self.k8s_config.namespace;
501
+
502
+ if self.logger {
503
+ self.logger.info(
504
+ f"Deploying application '{app_name}' to Kubernetes",
505
+ {'namespace': namespace, 'build': app_config.build}
506
+ );
507
+ }
508
+
509
+ validate_resource_limits(
510
+ self.k8s_config.cpu_request,
511
+ self.k8s_config.cpu_limit,
512
+ self.k8s_config.memory_request,
513
+ self.k8s_config.memory_limit
514
+ );
515
+
516
+ # 2. Initialize Kubernetes clients
517
+ k8s_config.load_kube_config();
518
+ apps_v1 = client.AppsV1Api();
519
+ core_v1 = client.CoreV1Api();
520
+ check_K8s_status();
521
+ ensure_namespace_exists(namespace);
522
+
523
+ # 3. Load environment variables
524
+ self.env_list = load_env_variables(app_config.code_folder);
525
+
526
+ # 4. Determine cluster type
527
+ cluster_env = cluster_type();
528
+ is_aws = cluster_env == 'aws';
529
+ service_type = 'LoadBalancer' if is_aws else 'NodePort';
530
+
531
+ # 5. Handle image registry if build is requested
532
+ repository_name = self._handle_image_registry(app_name, app_config);
533
+
534
+ # 6. Get health check path
535
+ health_check_path = os.getenv(
536
+ 'K8s_HEALTHCHECK_PATH', self.k8s_config.health_check_path
537
+ );
538
+
539
+ # 7. Get database init containers
540
+ init_containers = self._get_database_init_containers(app_name);
541
+
542
+ # 8. Build configurations
543
+ volumes = [];
544
+ resources_config = self._build_resource_config();
545
+ probe_configs = self._build_probe_config(health_check_path);
546
+
547
+ # 9. Build container configuration
548
+ container_config = self._build_container_config(
549
+ app_name, repository_name, probe_configs, resources_config
550
+ );
551
+
552
+ # 10. Handle volumes and build container if not building image
553
+ if not app_config.build {
554
+ volumes_config = self._build_volumes_config(
555
+ app_name, app_config, namespace, core_v1
556
+ );
557
+ volumes = volumes_config['volumes'];
558
+ build_container = volumes_config['build_container'];
559
+ init_containers.append(build_container);
560
+ # Build runtime setup command
561
+ command = self._build_runtime_setup_command(app_config);
562
+ container_config = self._build_container_config(
563
+ app_name,
564
+ self.k8s_config.python_image,
565
+ probe_configs,
566
+ resources_config,
567
+ command=command,
568
+ working_dir=self.k8s_config.app_mount_path,
569
+ volume_mounts=[
570
+ {'name': 'app-code', 'mountPath': self.k8s_config.app_mount_path}
571
+ ]
572
+ );
573
+ }
574
+
575
+ # 11. Build service and deployment
576
+ service = self._build_service_config(
577
+ app_name, is_aws, health_check_path, service_type
578
+ );
579
+ deployment = {
580
+ 'apiVersion': 'apps/v1',
581
+ 'kind': 'Deployment',
582
+ 'metadata': {
583
+ 'name': app_name,
584
+ 'namespace': namespace,
585
+ 'labels': {'app': app_name}
586
+ },
587
+ 'spec': {
588
+ 'replicas': 1,
589
+ 'selector': {'matchLabels': {'app': app_name}},
590
+ 'template': {
591
+ 'metadata': {'labels': {'app': app_name}},
592
+ 'spec': {
593
+ 'initContainers': init_containers,
594
+ 'containers': [container_config],
595
+ 'volumes': volumes
596
+ }
597
+ }
598
+ }
599
+ };
600
+
601
+ # 12. Delete existing resources if they exist
602
+ delete_if_exists(
603
+ apps_v1.delete_namespaced_deployment, app_name, namespace, 'Deployment'
604
+ );
605
+ time.sleep(self.k8s_config.resource_deletion_wait);
606
+
607
+ # 13. Deploy databases
608
+ self._deploy_databases(app_name, namespace, apps_v1, core_v1);
609
+
610
+ # 14. Deploy main application
611
+ if self.logger {
612
+ self.logger.info(f'Deploying Jaseci-app app...');
613
+ }
614
+ apps_v1.create_namespaced_deployment(namespace=namespace, body=deployment);
615
+ try {
616
+ core_v1.read_namespaced_service(
617
+ name=f"{app_name}-service", namespace=namespace
618
+ );
619
+ } except ApiException as e {
620
+ if (e.status == 404) {
621
+ core_v1.create_namespaced_service(namespace=namespace, body=service);
622
+ } else {
623
+ raise ;
624
+ }
625
+ }
626
+
627
+ # 15. Wait for deployment and get service URL
628
+ self._wait_for_deployment(app_name, namespace, health_check_path, is_aws);
629
+ service_url = self.get_service_url(app_name);
630
+
631
+ if self.logger {
632
+ self.logger.info(
633
+ f"Deployment complete for '{app_name}'", {'service_url': service_url}
634
+ );
635
+ }
636
+
637
+ return DeploymentResult(
638
+ success=True,
639
+ service_url=service_url,
640
+ message=f"Application '{app_name}' deployed successfully"
641
+ );
642
+ }
643
+
644
+ """Destroy all enabled databases."""
645
+ def _destroy_databases(
646
+ self: KubernetesTarget,
647
+ app_name: str,
648
+ namespace: str,
649
+ apps_v1: Any,
650
+ core_v1: Any
651
+ ) -> None {
652
+ if self.k8s_config.mongodb_enabled {
653
+ mongodb_name = f"{app_name}-mongodb";
654
+ delete_if_exists(
655
+ apps_v1.delete_namespaced_stateful_set,
656
+ mongodb_name,
657
+ namespace,
658
+ 'StatefulSet'
659
+ );
660
+ delete_if_exists(
661
+ core_v1.delete_namespaced_service,
662
+ f"{mongodb_name}-service",
663
+ namespace,
664
+ 'Service'
665
+ );
666
+ }
667
+
668
+ if self.k8s_config.redis_enabled {
669
+ redis_name = f"{app_name}-redis";
670
+ delete_if_exists(
671
+ apps_v1.delete_namespaced_deployment,
672
+ redis_name,
673
+ namespace,
674
+ 'Deployment'
675
+ );
676
+ delete_if_exists(
677
+ core_v1.delete_namespaced_service,
678
+ f"{redis_name}-service",
679
+ namespace,
680
+ 'Service'
681
+ );
682
+ }
683
+ }
684
+
685
+ def destroy(self: KubernetesTarget, app_name: str) -> None {
686
+ if self.logger {
687
+ self.logger.info(f"Destroying application '{app_name}' from Kubernetes");
688
+ }
689
+
690
+ try {
691
+ k8s_config.load_kube_config();
692
+ apps_v1 = client.AppsV1Api();
693
+ core_v1 = client.CoreV1Api();
694
+ namespace = self.k8s_config.namespace;
695
+
696
+ # Delete main deployment and service
697
+ delete_if_exists(
698
+ apps_v1.delete_namespaced_deployment, app_name, namespace, 'Deployment'
699
+ );
700
+ delete_if_exists(
701
+ core_v1.delete_namespaced_service,
702
+ f"{app_name}-service",
703
+ namespace,
704
+ 'Service'
705
+ );
706
+
707
+ # Delete databases
708
+ self._destroy_databases(app_name, namespace, apps_v1, core_v1);
709
+
710
+ # Delete code sync pod and PVCs
711
+ delete_if_exists(
712
+ core_v1.delete_namespaced_pod,
713
+ f"{app_name}-code-sync",
714
+ namespace,
715
+ 'Pod'
716
+ );
717
+
718
+ # Delete PVCs
719
+ pvcs = core_v1.list_namespaced_persistent_volume_claim(namespace);
720
+ for pvc in pvcs.items {
721
+ if pvc.metadata.name.startswith(app_name) {
722
+ try {
723
+ core_v1.delete_namespaced_persistent_volume_claim(
724
+ name=pvc.metadata.name, namespace=namespace
725
+ );
726
+ } except Exception { }
727
+ }
728
+ }
729
+
730
+ if self.logger {
731
+ self.logger.info(f"Application '{app_name}' destroyed successfully");
732
+ }
733
+ } except Exception as e {
734
+ if self.logger {
735
+ self.logger.error(f"Error destroying application '{app_name}': {e}");
736
+ }
737
+ raise ;
738
+ }
739
+ }
740
+
741
+ def get_status(self: KubernetesTarget, app_name: str) -> ResourceStatusInfo {
742
+ try {
743
+ k8s_config.load_kube_config();
744
+ apps_v1 = client.AppsV1Api();
745
+ core_v1 = client.CoreV1Api();
746
+ namespace = self.k8s_config.namespace;
747
+
748
+ deployment = apps_v1.read_namespaced_deployment(
749
+ name=app_name, namespace=namespace
750
+ );
751
+
752
+ replicas = deployment.spec.replicas or 0;
753
+ ready_replicas = deployment.status.ready_replicas or 0;
754
+
755
+ if ready_replicas == replicas and replicas > 0 {
756
+ status = ResourceStatus.RUNNING;
757
+ } elif ready_replicas > 0 {
758
+ status = ResourceStatus.PENDING;
759
+ } else {
760
+ status = ResourceStatus.FAILED;
761
+ }
762
+
763
+ return ResourceStatusInfo(
764
+ status=status, replicas=replicas, ready_replicas=ready_replicas
765
+ );
766
+ } except Exception as e {
767
+ if self.logger {
768
+ self.logger.error(f"Failed to get status for '{app_name}': {e}");
769
+ }
770
+ return ResourceStatusInfo(status=ResourceStatus.UNKNOWN, message=str(e));
771
+ }
772
+ }
773
+
774
+ def scale(self: KubernetesTarget, app_name: str, replicas: int) -> None {
775
+ if self.logger {
776
+ self.logger.info(
777
+ f"Scaling application '{app_name}' to {replicas} replicas"
778
+ );
779
+ }
780
+
781
+ try {
782
+ k8s_config.load_kube_config();
783
+ apps_v1 = client.AppsV1Api();
784
+ namespace = self.k8s_config.namespace;
785
+
786
+ deployment = apps_v1.read_namespaced_deployment(
787
+ name=app_name, namespace=namespace
788
+ );
789
+ deployment.spec.replicas = replicas;
790
+
791
+ apps_v1.patch_namespaced_deployment(
792
+ name=app_name, namespace=namespace, body=deployment
793
+ );
794
+
795
+ if self.logger {
796
+ self.logger.info(
797
+ f"Successfully scaled '{app_name}' to {replicas} replicas"
798
+ );
799
+ }
800
+ } except Exception as e {
801
+ if self.logger {
802
+ self.logger.error(f"Failed to scale '{app_name}': {e}");
803
+ }
804
+ raise ;
805
+ }
806
+ }
807
+
808
+ def get_service_url(self: KubernetesTarget, app_name: str) -> (str | None) {
809
+ try {
810
+ k8s_config.load_kube_config();
811
+ core_v1 = client.CoreV1Api();
812
+ namespace = self.k8s_config.namespace;
813
+ service_name = f"{app_name}-service";
814
+
815
+ service = core_v1.read_namespaced_service(
816
+ name=service_name, namespace=namespace
817
+ );
818
+
819
+ cluster_env = cluster_type();
820
+ is_aws = cluster_env == 'aws';
821
+
822
+ if is_aws {
823
+ # AWS LoadBalancer
824
+ ingress = service.status.load_balancer.ingress;
825
+ if ingress and len(ingress) > 0 {
826
+ endpoint = ingress[0].hostname or ingress[0].ip;
827
+ return f"http://{endpoint}";
828
+ }
829
+ } else {
830
+ # NodePort
831
+ node_port = self.k8s_config.node_port;
832
+ return f"http://localhost:{node_port}";
833
+ }
834
+ } except Exception as e {
835
+ if self.logger {
836
+ self.logger.debug(f"Could not get service URL for '{app_name}': {e}");
837
+ }
838
+ }
839
+ return None;
840
+ }
841
+ }