jac-scale 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. jac_scale/__init__.py +0 -0
  2. jac_scale/abstractions/config/app_config.jac +30 -0
  3. jac_scale/abstractions/config/base_config.jac +26 -0
  4. jac_scale/abstractions/database_provider.jac +51 -0
  5. jac_scale/abstractions/deployment_target.jac +64 -0
  6. jac_scale/abstractions/image_registry.jac +54 -0
  7. jac_scale/abstractions/logger.jac +20 -0
  8. jac_scale/abstractions/models/deployment_result.jac +27 -0
  9. jac_scale/abstractions/models/resource_status.jac +38 -0
  10. jac_scale/config_loader.jac +31 -0
  11. jac_scale/context.jac +14 -0
  12. jac_scale/factories/database_factory.jac +43 -0
  13. jac_scale/factories/deployment_factory.jac +43 -0
  14. jac_scale/factories/registry_factory.jac +32 -0
  15. jac_scale/factories/utility_factory.jac +34 -0
  16. jac_scale/impl/config_loader.impl.jac +131 -0
  17. jac_scale/impl/context.impl.jac +24 -0
  18. jac_scale/impl/memory_hierarchy.main.impl.jac +63 -0
  19. jac_scale/impl/memory_hierarchy.mongo.impl.jac +239 -0
  20. jac_scale/impl/memory_hierarchy.redis.impl.jac +186 -0
  21. jac_scale/impl/serve.impl.jac +1785 -0
  22. jac_scale/jserver/__init__.py +0 -0
  23. jac_scale/jserver/impl/jfast_api.impl.jac +731 -0
  24. jac_scale/jserver/impl/jserver.impl.jac +79 -0
  25. jac_scale/jserver/jfast_api.jac +162 -0
  26. jac_scale/jserver/jserver.jac +101 -0
  27. jac_scale/memory_hierarchy.jac +138 -0
  28. jac_scale/plugin.jac +218 -0
  29. jac_scale/plugin_config.jac +175 -0
  30. jac_scale/providers/database/kubernetes_mongo.jac +137 -0
  31. jac_scale/providers/database/kubernetes_redis.jac +110 -0
  32. jac_scale/providers/registry/dockerhub.jac +64 -0
  33. jac_scale/serve.jac +118 -0
  34. jac_scale/targets/kubernetes/kubernetes_config.jac +215 -0
  35. jac_scale/targets/kubernetes/kubernetes_target.jac +841 -0
  36. jac_scale/targets/kubernetes/utils/kubernetes_utils.impl.jac +519 -0
  37. jac_scale/targets/kubernetes/utils/kubernetes_utils.jac +85 -0
  38. jac_scale/tests/__init__.py +0 -0
  39. jac_scale/tests/conftest.py +29 -0
  40. jac_scale/tests/fixtures/test_api.jac +159 -0
  41. jac_scale/tests/fixtures/todo_app.jac +68 -0
  42. jac_scale/tests/test_abstractions.py +88 -0
  43. jac_scale/tests/test_deploy_k8s.py +265 -0
  44. jac_scale/tests/test_examples.py +484 -0
  45. jac_scale/tests/test_factories.py +149 -0
  46. jac_scale/tests/test_file_upload.py +444 -0
  47. jac_scale/tests/test_k8s_utils.py +156 -0
  48. jac_scale/tests/test_memory_hierarchy.py +247 -0
  49. jac_scale/tests/test_serve.py +1835 -0
  50. jac_scale/tests/test_sso.py +711 -0
  51. jac_scale/utilities/loggers/standard_logger.jac +40 -0
  52. jac_scale/utils.jac +16 -0
  53. jac_scale-0.1.1.dist-info/METADATA +658 -0
  54. jac_scale-0.1.1.dist-info/RECORD +57 -0
  55. jac_scale-0.1.1.dist-info/WHEEL +5 -0
  56. jac_scale-0.1.1.dist-info/entry_points.txt +3 -0
  57. jac_scale-0.1.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,63 @@
1
+ """ScaleTieredMemory Implementation - Multi-tier storage coordination.
2
+
3
+ Storage configuration comes from environment variables or jac.toml.
4
+ """
5
+ import logging;
6
+ import from jaclang.runtimelib.memory { SqliteMemory }
7
+
8
+ glob logger = logging.getLogger(__name__);
9
+
10
+ """Initialize ScaleTieredMemory with distributed backends."""
11
+ impl ScaleTieredMemory.init(use_cache: bool = True) -> None {
12
+ # L1: Initialize volatile memory (inherited from VolatileMemory via TieredMemory)
13
+ self.__mem__ = {};
14
+ self.__gc__ = set();
15
+ # L2: Try to initialize Redis cache (replaces LocalCacheMemory)
16
+ redis_backend = RedisBackend();
17
+ self._cache_available = redis_backend.is_available();
18
+ if self._cache_available and use_cache {
19
+ self.l2 = redis_backend;
20
+ logger.debug("Redis cache backend initialized");
21
+ } else {
22
+ self.l2 = None;
23
+ logger.debug("Redis not available, running without distributed cache");
24
+ }
25
+ # L3: Try MongoDB first (replaces SqliteMemory), fall back to SqliteMemory
26
+ mongo_backend = MongoBackend();
27
+ if mongo_backend.is_available() {
28
+ self.l3 = mongo_backend;
29
+ self._persistence_type = PersistenceType.MONGODB;
30
+ logger.debug("MongoDB persistence backend initialized");
31
+ # Show subtle message (optional - uncomment to enable)
32
+ try {
33
+ import from jaclang.cli.console { console }
34
+ logger.debug(" ✔ Using MongoDB for persistence", style="muted");
35
+ } except Exception { }
36
+ } else {
37
+ # Fall back to jaclang's SqliteMemory using configured path
38
+ self.l3 = SqliteMemory(path=_db_config['shelf_db_path']);
39
+ self._persistence_type = PersistenceType.SQLITE;
40
+ logger.debug("MongoDB not available, using SqliteMemory for persistence");
41
+ # Show subtle message
42
+ try {
43
+ import from jaclang.cli.console { console }
44
+ logger.debug(" ✔ Using SQLite for persistence", style="muted");
45
+ } except Exception { }
46
+ }
47
+ }
48
+
49
+ """Close all backends."""
50
+ impl ScaleTieredMemory.close -> None {
51
+ # Sync and close L3 persistence
52
+ if self.l3 {
53
+ self.l3.sync();
54
+ self.l3.close();
55
+ }
56
+ # Close L2 cache
57
+ if self.l2 {
58
+ self.l2.close();
59
+ }
60
+ # Clear L1 (inherited from VolatileMemory)
61
+ self.__mem__.clear();
62
+ self.__gc__.clear();
63
+ }
@@ -0,0 +1,239 @@
1
+ """MongoDB PersistenceBackend Implementation."""
2
+ import logging;
3
+ import from collections.abc { Callable, Generator, Iterable }
4
+ import from pickle { dumps, loads }
5
+ import from typing { Any, cast }
6
+ import from uuid { UUID }
7
+ import from pymongo { MongoClient, UpdateOne }
8
+ import from pymongo.errors { ConnectionFailure }
9
+ import from jaclang.pycore.archetype { Anchor, NodeAnchor, Root }
10
+ import from jaclang.runtimelib.utils { storage_key, to_uuid }
11
+
12
+ glob logger = logging.getLogger(__name__);
13
+
14
+ """Post-initialization: connect to MongoDB."""
15
+ impl MongoBackend.postinit -> None {
16
+ if self.client is None and self.mongo_url {
17
+ try {
18
+ self.client = MongoClient(self.mongo_url);
19
+ self.db = self.client[self.db_name];
20
+ self.collection = self.db[self.collection_name];
21
+ } except Exception as e {
22
+ logger.debug(f"MongoDB connection failed: {e}");
23
+ self.client = None;
24
+ }
25
+ }
26
+ }
27
+
28
+ """Check if MongoDB is available and connected."""
29
+ impl MongoBackend.is_available -> bool {
30
+ if not self.mongo_url {
31
+ return False;
32
+ }
33
+ client = None;
34
+ try {
35
+ client = MongoClient(self.mongo_url);
36
+ client.admin.command('ping', maxTimeMS=100);
37
+ return True;
38
+ } except ConnectionFailure {
39
+ return False;
40
+ } except Exception as e {
41
+ logger.debug(f"MongoDB availability check failed: {e}");
42
+ return False;
43
+ } finally {
44
+ if client {
45
+ try {
46
+ client.close();
47
+ } except Exception { }
48
+ }
49
+ }
50
+ }
51
+
52
+ """Get anchor by UUID from MongoDB."""
53
+ impl MongoBackend.get(id: UUID) -> (Anchor | None) {
54
+ if self.client is None {
55
+ return None;
56
+ }
57
+ _id = to_uuid(id);
58
+ try {
59
+ db_obj = self.collection.find_one({'_id': str(_id)});
60
+ if db_obj {
61
+ return self._load_anchor(db_obj);
62
+ }
63
+ } except Exception as e {
64
+ logger.debug(f"MongoDB get failed: {e}");
65
+ }
66
+ return None;
67
+ }
68
+
69
+ """Store anchor in MongoDB."""
70
+ impl MongoBackend.put(anchor: Anchor) -> None {
71
+ if self.client is None or not anchor.persistent {
72
+ return;
73
+ }
74
+ _id = to_uuid(anchor.id);
75
+ try {
76
+ data_blob = dumps(anchor);
77
+ self.collection.update_one(
78
+ {'_id': str(_id)},
79
+ {'$set': {'data': data_blob, 'type': type(anchor).__name__}},
80
+ upsert=True
81
+ );
82
+ } except Exception as e {
83
+ logger.debug(f"MongoDB put failed: {e}");
84
+ }
85
+ }
86
+
87
+ """Delete anchor from MongoDB."""
88
+ impl MongoBackend.delete(id: UUID) -> None {
89
+ if self.client is None {
90
+ return;
91
+ }
92
+ _id = to_uuid(id);
93
+ try {
94
+ self.collection.delete_one({'_id': str(_id)});
95
+ } except Exception as e {
96
+ logger.debug(f"MongoDB delete failed: {e}");
97
+ }
98
+ }
99
+
100
+ """Close MongoDB connection."""
101
+ impl MongoBackend.close -> None {
102
+ if self.client {
103
+ try {
104
+ self.client.close();
105
+ } except Exception as e {
106
+ logger.warning(f"Error closing MongoDB connection: {e}");
107
+ }
108
+ self.client = None;
109
+ }
110
+ }
111
+
112
+ """Check if an anchor exists in MongoDB."""
113
+ impl MongoBackend.has(id: UUID) -> bool {
114
+ if self.client is None {
115
+ return False;
116
+ }
117
+ _id = to_uuid(id);
118
+ try {
119
+ return self.collection.count_documents({'_id': str(_id)}, limit=1) > 0;
120
+ } except Exception {
121
+ return False;
122
+ }
123
+ }
124
+
125
+ """Query all anchors with optional filter."""
126
+ impl MongoBackend.query(
127
+ filter: (Callable[[Anchor], bool] | None) = None
128
+ ) -> Generator[Anchor, None, None] {
129
+ if self.client is None {
130
+ return;
131
+ }
132
+ try {
133
+ for doc in self.collection.find() {
134
+ if (anchor := self._load_anchor(doc)) {
135
+ if filter is None or filter(anchor) {
136
+ yield anchor;
137
+ }
138
+ }
139
+ }
140
+ } except Exception as e {
141
+ logger.debug(f"MongoDB query failed: {e}");
142
+ }
143
+ }
144
+
145
+ """Get all root anchors."""
146
+ impl MongoBackend.get_roots -> Generator[Root, None, None] {
147
+ for anchor in self.query() {
148
+ if isinstance(anchor.archetype, Root) {
149
+ yield cast(Root, anchor.archetype);
150
+ }
151
+ }
152
+ }
153
+
154
+ """Find anchors by IDs with optional filter."""
155
+ impl MongoBackend.find(
156
+ ids: (UUID | Iterable[UUID]), filter: (Callable[[Anchor], Anchor] | None) = None
157
+ ) -> Generator[Anchor, None, None] {
158
+ id_list = [ids] if isinstance(ids, UUID) else list(ids);
159
+ for id in id_list {
160
+ if (anchor := self.get(id)) {
161
+ if filter is None or filter(anchor) {
162
+ yield anchor;
163
+ }
164
+ }
165
+ }
166
+ }
167
+
168
+ """Find one anchor by ID(s) with optional filter."""
169
+ impl MongoBackend.find_one(
170
+ ids: (UUID | Iterable[UUID]), filter: (Callable[[Anchor], Anchor] | None) = None
171
+ ) -> (Anchor | None) {
172
+ id_list = [ids] if isinstance(ids, UUID) else list(ids);
173
+ for id in id_list {
174
+ if (anchor := self.get(id)) {
175
+ if filter is None or filter(anchor) {
176
+ return anchor;
177
+ }
178
+ }
179
+ }
180
+ return None;
181
+ }
182
+
183
+ """Commit - no-op for MongoDB (writes are immediate)."""
184
+ impl MongoBackend.commit(anchor: (Anchor | None) = None) -> None {
185
+ # No-op: MongoDB writes are immediate
186
+ }
187
+
188
+ # PersistentMemory-specific methods
189
+ """Sync - no-op for MongoDB (writes are immediate)."""
190
+ impl MongoBackend.sync -> None {
191
+ # No-op: MongoDB writes are immediate
192
+ }
193
+
194
+ """Bulk store multiple anchors."""
195
+ impl MongoBackend.bulk_put(anchors: Iterable[Anchor]) -> None {
196
+ if self.client is None {
197
+ return;
198
+ }
199
+ ops: list = [];
200
+ for anchor in anchors {
201
+ if not anchor.persistent {
202
+ continue;
203
+ }
204
+ _id = to_uuid(anchor.id);
205
+ try {
206
+ data_blob = dumps(anchor);
207
+ ops.append(
208
+ UpdateOne(
209
+ {'_id': str(_id)},
210
+ {'$set': {'data': data_blob, 'type': type(anchor).__name__}},
211
+ upsert=True
212
+ )
213
+ );
214
+ } except Exception as e {
215
+ logger.debug(f"MongoDB bulk_put serialization failed: {e}");
216
+ }
217
+ }
218
+ if ops {
219
+ try {
220
+ self.collection.bulk_write(ops);
221
+ } except Exception as e {
222
+ logger.debug(f"MongoDB bulk_write failed: {e}");
223
+ }
224
+ }
225
+ }
226
+
227
+ """Load anchor from raw MongoDB document."""
228
+ impl MongoBackend._load_anchor(raw: dict[(str, Any)]) -> (Anchor | None) {
229
+ if 'data' not in raw {
230
+ return None;
231
+ }
232
+ try {
233
+ data: bytes = raw['data'];
234
+ return loads(data);
235
+ } except Exception as e {
236
+ logger.debug(f"MongoDB _load_anchor failed: {e}");
237
+ return None;
238
+ }
239
+ }
@@ -0,0 +1,186 @@
1
+ """Redis CacheBackend Implementation."""
2
+ import redis;
3
+ import logging;
4
+ import from collections.abc { Callable, Generator, Iterable }
5
+ import from pickle { dumps, loads }
6
+ import from uuid { UUID }
7
+ import from jaclang.pycore.archetype { Anchor, Root }
8
+ import from jaclang.runtimelib.utils { storage_key, to_uuid }
9
+
10
+ glob logger = logging.getLogger(__name__);
11
+
12
+ """Post-initialization: connect to Redis."""
13
+ impl RedisBackend.postinit -> None {
14
+ if self.redis_url and self.redis_client is None {
15
+ try {
16
+ self.redis_client = redis.from_url(self.redis_url);
17
+ } except Exception as e {
18
+ logger.debug(f"Redis connection failed: {e}");
19
+ self.redis_client = None;
20
+ }
21
+ }
22
+ }
23
+
24
+ """Check if Redis is available and connected."""
25
+ impl RedisBackend.is_available -> bool {
26
+ if not self.redis_url {
27
+ return False;
28
+ }
29
+ client = None;
30
+ try {
31
+ client = redis.from_url(self.redis_url);
32
+ client.ping();
33
+ return True;
34
+ } except Exception as e {
35
+ logger.debug(f"Redis availability check failed: {e}");
36
+ return False;
37
+ } finally {
38
+ if client {
39
+ try {
40
+ client.close();
41
+ } except Exception { }
42
+ }
43
+ }
44
+ }
45
+
46
+ """Get anchor by UUID from Redis cache."""
47
+ impl RedisBackend.get(id: UUID) -> (Anchor | None) {
48
+ if self.redis_client is None {
49
+ return None;
50
+ }
51
+ key = storage_key(to_uuid(id));
52
+ try {
53
+ raw = self.redis_client.get(key);
54
+ if not raw {
55
+ return None;
56
+ }
57
+ return loads(raw);
58
+ } except Exception as e {
59
+ logger.debug(f"Redis get failed: {e}");
60
+ return None;
61
+ }
62
+ }
63
+
64
+ """Store anchor in Redis cache."""
65
+ impl RedisBackend.put(anchor: Anchor) -> None {
66
+ if self.redis_client is None {
67
+ return;
68
+ }
69
+ try {
70
+ data = dumps(anchor);
71
+ key = storage_key(anchor.id);
72
+ self.redis_client.set(key, data);
73
+ } except Exception as e {
74
+ logger.debug(f"Redis put failed: {e}");
75
+ }
76
+ }
77
+
78
+ """Delete anchor from Redis cache."""
79
+ impl RedisBackend.delete(id: UUID) -> None {
80
+ if self.redis_client is None {
81
+ return;
82
+ }
83
+ try {
84
+ key = storage_key(to_uuid(id));
85
+ self.redis_client.delete(key);
86
+ } except Exception as e {
87
+ logger.debug(f"Redis delete failed: {e}");
88
+ }
89
+ }
90
+
91
+ """Close Redis connection."""
92
+ impl RedisBackend.close -> None {
93
+ if self.redis_client {
94
+ try {
95
+ self.redis_client.close();
96
+ } except Exception as e {
97
+ logger.warning(f"Error closing Redis connection: {e}");
98
+ }
99
+ self.redis_client = None;
100
+ }
101
+ }
102
+
103
+ """Check if an anchor is in the cache."""
104
+ impl RedisBackend.has(id: UUID) -> bool {
105
+ return self.exists(id);
106
+ }
107
+
108
+ """Query all anchors - not supported for distributed cache."""
109
+ impl RedisBackend.query(
110
+ filter: (Callable[[Anchor], bool] | None) = None
111
+ ) -> Generator[Anchor, None, None] {
112
+ # Redis doesn't support iteration efficiently
113
+ return;
114
+ yield ;
115
+ }
116
+
117
+ """Get all root anchors - not supported for distributed cache."""
118
+ impl RedisBackend.get_roots -> Generator[Root, None, None] {
119
+ return;
120
+ yield ;
121
+ }
122
+
123
+ """Find anchors by IDs with optional filter."""
124
+ impl RedisBackend.find(
125
+ ids: (UUID | Iterable[UUID]), filter: (Callable[[Anchor], Anchor] | None) = None
126
+ ) -> Generator[Anchor, None, None] {
127
+ id_list = [ids] if isinstance(ids, UUID) else list(ids);
128
+ for id in id_list {
129
+ if (anchor := self.get(id)) {
130
+ if filter is None or filter(anchor) {
131
+ yield anchor;
132
+ }
133
+ }
134
+ }
135
+ }
136
+
137
+ """Find one anchor by ID(s) with optional filter."""
138
+ impl RedisBackend.find_one(
139
+ ids: (UUID | Iterable[UUID]), filter: (Callable[[Anchor], Anchor] | None) = None
140
+ ) -> (Anchor | None) {
141
+ id_list = [ids] if isinstance(ids, UUID) else list(ids);
142
+ for id in id_list {
143
+ if (anchor := self.get(id)) {
144
+ if filter is None or filter(anchor) {
145
+ return anchor;
146
+ }
147
+ }
148
+ }
149
+ return None;
150
+ }
151
+
152
+ """Commit - no-op for cache."""
153
+ impl RedisBackend.commit(anchor: (Anchor | None) = None) -> None {
154
+ # No-op: cache doesn't need commit
155
+ }
156
+
157
+ # CacheMemory-specific methods
158
+ """Check if key exists in cache."""
159
+ impl RedisBackend.exists(id: UUID) -> bool {
160
+ if self.redis_client is None {
161
+ return False;
162
+ }
163
+ try {
164
+ key = storage_key(to_uuid(id));
165
+ return bool(self.redis_client.exists(key));
166
+ } except Exception {
167
+ return False;
168
+ }
169
+ }
170
+
171
+ """Store anchor only if it already exists in cache."""
172
+ impl RedisBackend.put_if_exists(anchor: Anchor) -> bool {
173
+ if self.redis_client is None {
174
+ return False;
175
+ }
176
+ if not self.exists(anchor.id) {
177
+ return False;
178
+ }
179
+ self.put(anchor);
180
+ return True;
181
+ }
182
+
183
+ """Remove an entry from cache."""
184
+ impl RedisBackend.invalidate(id: UUID) -> None {
185
+ self.delete(id);
186
+ }