chuk-artifacts 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chuk_artifacts/admin.py +93 -17
- chuk_artifacts/base.py +9 -3
- chuk_artifacts/batch.py +44 -29
- chuk_artifacts/core.py +18 -17
- chuk_artifacts/metadata.py +24 -22
- chuk_artifacts/presigned.py +24 -23
- chuk_artifacts/store.py +52 -12
- {chuk_artifacts-0.1.3.dist-info → chuk_artifacts-0.1.5.dist-info}/METADATA +2 -2
- chuk_artifacts-0.1.5.dist-info/RECORD +23 -0
- chuk_artifacts/session/__init__.py +0 -0
- chuk_artifacts/session/session_manager.py +0 -196
- chuk_artifacts/session/session_operations.py +0 -366
- chuk_artifacts-0.1.3.dist-info/RECORD +0 -26
- {chuk_artifacts-0.1.3.dist-info → chuk_artifacts-0.1.5.dist-info}/WHEEL +0 -0
- {chuk_artifacts-0.1.3.dist-info → chuk_artifacts-0.1.5.dist-info}/licenses/LICENSE +0 -0
- {chuk_artifacts-0.1.3.dist-info → chuk_artifacts-0.1.5.dist-info}/top_level.txt +0 -0
chuk_artifacts/admin.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
2
|
# chuk_artifacts/admin.py
|
3
3
|
"""
|
4
|
-
Administrative and debugging operations
|
4
|
+
Administrative and debugging operations.
|
5
|
+
Now includes chuk_sessions integration.
|
5
6
|
"""
|
6
7
|
|
7
8
|
from __future__ import annotations
|
@@ -20,6 +21,10 @@ class AdminOperations:
|
|
20
21
|
"""Handles administrative and debugging operations."""
|
21
22
|
|
22
23
|
def __init__(self, artifact_store: 'ArtifactStore'):
|
24
|
+
# canonical reference
|
25
|
+
self.artifact_store = artifact_store
|
26
|
+
|
27
|
+
# backward-compat/consistency with other ops modules
|
23
28
|
self.store = artifact_store
|
24
29
|
|
25
30
|
async def validate_configuration(self) -> Dict[str, Any]:
|
@@ -28,7 +33,7 @@ class AdminOperations:
|
|
28
33
|
|
29
34
|
# Test session provider
|
30
35
|
try:
|
31
|
-
session_ctx_mgr = self.
|
36
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
32
37
|
async with session_ctx_mgr as session:
|
33
38
|
# Test basic operations
|
34
39
|
test_key = f"test_{uuid.uuid4().hex}"
|
@@ -38,48 +43,119 @@ class AdminOperations:
|
|
38
43
|
if value == "test_value":
|
39
44
|
results["session"] = {
|
40
45
|
"status": "ok",
|
41
|
-
"provider": self.
|
46
|
+
"provider": self.artifact_store._session_provider_name
|
42
47
|
}
|
43
48
|
else:
|
44
49
|
results["session"] = {
|
45
50
|
"status": "error",
|
46
51
|
"message": "Session store test failed",
|
47
|
-
"provider": self.
|
52
|
+
"provider": self.artifact_store._session_provider_name
|
48
53
|
}
|
49
54
|
except Exception as e:
|
50
55
|
results["session"] = {
|
51
56
|
"status": "error",
|
52
57
|
"message": str(e),
|
53
|
-
"provider": self.
|
58
|
+
"provider": self.artifact_store._session_provider_name
|
54
59
|
}
|
55
60
|
|
56
61
|
# Test storage provider
|
57
62
|
try:
|
58
|
-
storage_ctx_mgr = self.
|
63
|
+
storage_ctx_mgr = self.artifact_store._s3_factory()
|
59
64
|
async with storage_ctx_mgr as s3:
|
60
|
-
await s3.head_bucket(Bucket=self.
|
65
|
+
await s3.head_bucket(Bucket=self.artifact_store.bucket)
|
61
66
|
results["storage"] = {
|
62
67
|
"status": "ok",
|
63
|
-
"bucket": self.
|
64
|
-
"provider": self.
|
68
|
+
"bucket": self.artifact_store.bucket,
|
69
|
+
"provider": self.artifact_store._storage_provider_name
|
65
70
|
}
|
66
71
|
except Exception as e:
|
67
72
|
results["storage"] = {
|
68
73
|
"status": "error",
|
69
74
|
"message": str(e),
|
70
|
-
"provider": self.
|
75
|
+
"provider": self.artifact_store._storage_provider_name
|
76
|
+
}
|
77
|
+
|
78
|
+
# Test session manager (chuk_sessions)
|
79
|
+
try:
|
80
|
+
# Try to allocate a test session
|
81
|
+
test_session = await self.artifact_store._session_manager.allocate_session(
|
82
|
+
user_id="test_admin_user"
|
83
|
+
)
|
84
|
+
# Validate it
|
85
|
+
is_valid = await self.artifact_store._session_manager.validate_session(test_session)
|
86
|
+
# Clean up
|
87
|
+
await self.artifact_store._session_manager.delete_session(test_session)
|
88
|
+
|
89
|
+
if is_valid:
|
90
|
+
results["session_manager"] = {
|
91
|
+
"status": "ok",
|
92
|
+
"sandbox_id": self.artifact_store.sandbox_id,
|
93
|
+
"test_session": test_session
|
94
|
+
}
|
95
|
+
else:
|
96
|
+
results["session_manager"] = {
|
97
|
+
"status": "error",
|
98
|
+
"message": "Session validation failed"
|
99
|
+
}
|
100
|
+
except Exception as e:
|
101
|
+
results["session_manager"] = {
|
102
|
+
"status": "error",
|
103
|
+
"message": str(e)
|
71
104
|
}
|
72
105
|
|
73
106
|
return results
|
74
107
|
|
75
108
|
async def get_stats(self) -> Dict[str, Any]:
|
76
109
|
"""Get storage statistics."""
|
110
|
+
base_stats = {
|
111
|
+
"storage_provider": self.artifact_store._storage_provider_name,
|
112
|
+
"session_provider": self.artifact_store._session_provider_name,
|
113
|
+
"bucket": self.artifact_store.bucket,
|
114
|
+
"max_retries": self.artifact_store.max_retries,
|
115
|
+
"closed": self.artifact_store._closed,
|
116
|
+
"sandbox_id": self.artifact_store.sandbox_id,
|
117
|
+
"session_ttl_hours": self.artifact_store.session_ttl_hours,
|
118
|
+
}
|
119
|
+
|
120
|
+
# Add session manager stats from chuk_sessions
|
121
|
+
try:
|
122
|
+
session_stats = self.artifact_store._session_manager.get_cache_stats()
|
123
|
+
base_stats["session_manager"] = session_stats
|
124
|
+
except Exception as e:
|
125
|
+
base_stats["session_manager"] = {
|
126
|
+
"error": str(e),
|
127
|
+
"status": "unavailable"
|
128
|
+
}
|
129
|
+
|
130
|
+
return base_stats
|
131
|
+
|
132
|
+
async def cleanup_all_expired(self) -> Dict[str, int]:
|
133
|
+
"""Clean up all expired resources."""
|
134
|
+
results = {"timestamp": datetime.utcnow().isoformat() + "Z"}
|
135
|
+
|
136
|
+
# Clean up expired sessions using chuk_sessions
|
137
|
+
try:
|
138
|
+
expired_sessions = await self.artifact_store._session_manager.cleanup_expired_sessions()
|
139
|
+
results["expired_sessions_cleaned"] = expired_sessions
|
140
|
+
except Exception as e:
|
141
|
+
results["session_cleanup_error"] = str(e)
|
142
|
+
results["expired_sessions_cleaned"] = 0
|
143
|
+
|
144
|
+
# TODO: Add artifact cleanup based on TTL
|
145
|
+
# This would require scanning metadata to find expired artifacts
|
146
|
+
results["expired_artifacts_cleaned"] = 0 # Placeholder
|
147
|
+
|
148
|
+
return results
|
149
|
+
|
150
|
+
async def get_sandbox_info(self) -> Dict[str, Any]:
|
151
|
+
"""Get information about the current sandbox."""
|
77
152
|
return {
|
78
|
-
"
|
79
|
-
"
|
80
|
-
"
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
153
|
+
"sandbox_id": self.artifact_store.sandbox_id,
|
154
|
+
"session_prefix_pattern": self.artifact_store.get_session_prefix_pattern(),
|
155
|
+
"grid_architecture": {
|
156
|
+
"enabled": True,
|
157
|
+
"pattern": "grid/{sandbox_id}/{session_id}/{artifact_id}",
|
158
|
+
"mandatory_sessions": True,
|
159
|
+
"federation_ready": True
|
160
|
+
}
|
85
161
|
}
|
chuk_artifacts/base.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
# -*- coding: utf-8 -*-
|
2
2
|
# chuk_artifacts/base.py
|
3
3
|
"""
|
4
|
-
|
4
|
+
Base class for operation modules.
|
5
|
+
Updated to work with chuk_sessions integration.
|
5
6
|
"""
|
6
7
|
|
7
8
|
from __future__ import annotations
|
@@ -17,10 +18,10 @@ logger = logging.getLogger(__name__)
|
|
17
18
|
|
18
19
|
|
19
20
|
class BaseOperations:
|
20
|
-
"""
|
21
|
+
"""Base class for all operation modules."""
|
21
22
|
|
22
23
|
def __init__(self, store: 'ArtifactStore'):
|
23
|
-
#
|
24
|
+
# Store reference to artifact store
|
24
25
|
self._artifact_store = store
|
25
26
|
|
26
27
|
@property
|
@@ -47,6 +48,11 @@ class BaseOperations:
|
|
47
48
|
def max_retries(self) -> int:
|
48
49
|
return self._artifact_store.max_retries
|
49
50
|
|
51
|
+
@property
|
52
|
+
def session_manager(self):
|
53
|
+
"""Access to chuk_sessions SessionManager."""
|
54
|
+
return self._artifact_store._session_manager
|
55
|
+
|
50
56
|
def _check_closed(self):
|
51
57
|
"""Check if store is closed and raise error if so."""
|
52
58
|
if self._artifact_store._closed:
|
chuk_artifacts/batch.py
CHANGED
@@ -1,28 +1,32 @@
|
|
1
|
-
#
|
2
|
-
# chuk_artifacts/batch.py
|
3
|
-
# ===========================================================================
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
# chuk_artifacts/batch.py
|
4
3
|
"""
|
5
4
|
Batch operations for multiple artifacts.
|
5
|
+
Now uses chuk_sessions for session management.
|
6
6
|
"""
|
7
7
|
|
8
8
|
from __future__ import annotations
|
9
9
|
|
10
|
-
import uuid, hashlib, json, logging
|
10
|
+
import uuid, hashlib, json, logging, asyncio
|
11
11
|
from datetime import datetime
|
12
|
-
from typing import Any, Dict, List, Optional
|
12
|
+
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
13
13
|
|
14
|
-
|
15
|
-
from .
|
14
|
+
if TYPE_CHECKING:
|
15
|
+
from .store import ArtifactStore
|
16
|
+
|
17
|
+
from .exceptions import ArtifactStoreError, ProviderError, SessionError
|
16
18
|
|
17
19
|
logger = logging.getLogger(__name__)
|
18
20
|
|
19
|
-
_ANON_PREFIX = "anon"
|
20
21
|
_DEFAULT_TTL = 900
|
21
22
|
|
22
23
|
|
23
|
-
class BatchOperations
|
24
|
+
class BatchOperations:
|
24
25
|
"""Handles batch operations for multiple artifacts."""
|
25
26
|
|
27
|
+
def __init__(self, artifact_store: 'ArtifactStore'):
|
28
|
+
self.artifact_store = artifact_store
|
29
|
+
|
26
30
|
async def store_batch(
|
27
31
|
self,
|
28
32
|
items: List[Dict[str, Any]],
|
@@ -30,7 +34,14 @@ class BatchOperations(BaseOperations):
|
|
30
34
|
ttl: int = _DEFAULT_TTL,
|
31
35
|
) -> List[str]:
|
32
36
|
"""Store multiple artifacts in a batch operation."""
|
33
|
-
self.
|
37
|
+
if self.artifact_store._closed:
|
38
|
+
raise ArtifactStoreError("Store is closed")
|
39
|
+
|
40
|
+
# Ensure session is allocated using chuk_sessions
|
41
|
+
if session_id is None:
|
42
|
+
session_id = await self.artifact_store._session_manager.allocate_session()
|
43
|
+
else:
|
44
|
+
session_id = await self.artifact_store._session_manager.allocate_session(session_id=session_id)
|
34
45
|
|
35
46
|
artifact_ids = []
|
36
47
|
failed_items = []
|
@@ -38,18 +49,19 @@ class BatchOperations(BaseOperations):
|
|
38
49
|
for i, item in enumerate(items):
|
39
50
|
try:
|
40
51
|
artifact_id = uuid.uuid4().hex
|
41
|
-
|
42
|
-
key = f"sess/{scope}/{artifact_id}"
|
52
|
+
key = self.artifact_store.generate_artifact_key(session_id, artifact_id)
|
43
53
|
|
44
54
|
# Store in object storage
|
45
55
|
await self._store_with_retry(
|
46
56
|
item["data"], key, item["mime"],
|
47
|
-
item.get("filename"),
|
57
|
+
item.get("filename"), session_id
|
48
58
|
)
|
49
59
|
|
50
60
|
# Prepare metadata record
|
51
61
|
record = {
|
52
|
-
"
|
62
|
+
"artifact_id": artifact_id,
|
63
|
+
"session_id": session_id,
|
64
|
+
"sandbox_id": self.artifact_store.sandbox_id,
|
53
65
|
"key": key,
|
54
66
|
"mime": item["mime"],
|
55
67
|
"summary": item["summary"],
|
@@ -57,14 +69,16 @@ class BatchOperations(BaseOperations):
|
|
57
69
|
"filename": item.get("filename"),
|
58
70
|
"bytes": len(item["data"]),
|
59
71
|
"sha256": hashlib.sha256(item["data"]).hexdigest(),
|
60
|
-
"stored_at": datetime.utcnow().isoformat(
|
72
|
+
"stored_at": datetime.utcnow().isoformat() + "Z",
|
61
73
|
"ttl": ttl,
|
62
|
-
"storage_provider": self.
|
63
|
-
"session_provider": self.
|
74
|
+
"storage_provider": self.artifact_store._storage_provider_name,
|
75
|
+
"session_provider": self.artifact_store._session_provider_name,
|
76
|
+
"batch_operation": True,
|
77
|
+
"batch_index": i,
|
64
78
|
}
|
65
79
|
|
66
80
|
# Store metadata via session provider
|
67
|
-
session_ctx_mgr = self.
|
81
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
68
82
|
async with session_ctx_mgr as session:
|
69
83
|
await session.setex(artifact_id, ttl, json.dumps(record))
|
70
84
|
|
@@ -80,28 +94,30 @@ class BatchOperations(BaseOperations):
|
|
80
94
|
|
81
95
|
return artifact_ids
|
82
96
|
|
83
|
-
async def _store_with_retry(self, data: bytes, key: str, mime: str, filename: str,
|
97
|
+
async def _store_with_retry(self, data: bytes, key: str, mime: str, filename: str, session_id: str):
|
84
98
|
"""Store data with retry logic (copied from core for batch operations)."""
|
85
|
-
import asyncio
|
86
|
-
|
87
99
|
last_exception = None
|
88
100
|
|
89
|
-
for attempt in range(self.max_retries):
|
101
|
+
for attempt in range(self.artifact_store.max_retries):
|
90
102
|
try:
|
91
|
-
storage_ctx_mgr = self.
|
103
|
+
storage_ctx_mgr = self.artifact_store._s3_factory()
|
92
104
|
async with storage_ctx_mgr as s3:
|
93
105
|
await s3.put_object(
|
94
|
-
Bucket=self.bucket,
|
106
|
+
Bucket=self.artifact_store.bucket,
|
95
107
|
Key=key,
|
96
108
|
Body=data,
|
97
109
|
ContentType=mime,
|
98
|
-
Metadata={
|
110
|
+
Metadata={
|
111
|
+
"filename": filename or "",
|
112
|
+
"session_id": session_id,
|
113
|
+
"sandbox_id": self.artifact_store.sandbox_id,
|
114
|
+
},
|
99
115
|
)
|
100
116
|
return # Success
|
101
117
|
|
102
118
|
except Exception as e:
|
103
119
|
last_exception = e
|
104
|
-
if attempt < self.max_retries - 1:
|
120
|
+
if attempt < self.artifact_store.max_retries - 1:
|
105
121
|
wait_time = 2 ** attempt # Exponential backoff
|
106
122
|
logger.warning(
|
107
123
|
f"Batch storage attempt {attempt + 1} failed, retrying in {wait_time}s",
|
@@ -109,7 +125,6 @@ class BatchOperations(BaseOperations):
|
|
109
125
|
)
|
110
126
|
await asyncio.sleep(wait_time)
|
111
127
|
else:
|
112
|
-
logger.error(f"All {self.max_retries} batch storage attempts failed")
|
128
|
+
logger.error(f"All {self.artifact_store.max_retries} batch storage attempts failed")
|
113
129
|
|
114
|
-
raise last_exception
|
115
|
-
|
130
|
+
raise last_exception
|
chuk_artifacts/core.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
# chuk_artifacts/core.py
|
3
3
|
"""
|
4
4
|
Clean core storage operations - grid architecture only.
|
5
|
+
Now uses chuk_sessions for session management.
|
5
6
|
"""
|
6
7
|
|
7
8
|
from __future__ import annotations
|
@@ -29,7 +30,7 @@ class CoreStorageOperations:
|
|
29
30
|
"""Clean core storage operations with grid architecture."""
|
30
31
|
|
31
32
|
def __init__(self, artifact_store: 'ArtifactStore'):
|
32
|
-
self.
|
33
|
+
self.artifact_store = artifact_store
|
33
34
|
|
34
35
|
async def store(
|
35
36
|
self,
|
@@ -43,14 +44,14 @@ class CoreStorageOperations:
|
|
43
44
|
ttl: int = _DEFAULT_TTL,
|
44
45
|
) -> str:
|
45
46
|
"""Store artifact with grid key generation."""
|
46
|
-
if self.
|
47
|
+
if self.artifact_store._closed:
|
47
48
|
raise ArtifactStoreError("Store is closed")
|
48
49
|
|
49
50
|
start_time = time.time()
|
50
51
|
artifact_id = uuid.uuid4().hex
|
51
52
|
|
52
|
-
# Generate grid key
|
53
|
-
key = self.
|
53
|
+
# Generate grid key using chuk_sessions
|
54
|
+
key = self.artifact_store.generate_artifact_key(session_id, artifact_id)
|
54
55
|
|
55
56
|
try:
|
56
57
|
# Store in object storage
|
@@ -60,7 +61,7 @@ class CoreStorageOperations:
|
|
60
61
|
record = {
|
61
62
|
"artifact_id": artifact_id,
|
62
63
|
"session_id": session_id,
|
63
|
-
"sandbox_id": self.
|
64
|
+
"sandbox_id": self.artifact_store.sandbox_id,
|
64
65
|
"key": key,
|
65
66
|
"mime": mime,
|
66
67
|
"summary": summary,
|
@@ -70,12 +71,12 @@ class CoreStorageOperations:
|
|
70
71
|
"sha256": hashlib.sha256(data).hexdigest(),
|
71
72
|
"stored_at": datetime.utcnow().isoformat() + "Z",
|
72
73
|
"ttl": ttl,
|
73
|
-
"storage_provider": self.
|
74
|
-
"session_provider": self.
|
74
|
+
"storage_provider": self.artifact_store._storage_provider_name,
|
75
|
+
"session_provider": self.artifact_store._session_provider_name,
|
75
76
|
}
|
76
77
|
|
77
78
|
# Store metadata
|
78
|
-
session_ctx_mgr = self.
|
79
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
79
80
|
async with session_ctx_mgr as session:
|
80
81
|
await session.setex(artifact_id, ttl, json.dumps(record))
|
81
82
|
|
@@ -102,16 +103,16 @@ class CoreStorageOperations:
|
|
102
103
|
|
103
104
|
async def retrieve(self, artifact_id: str) -> bytes:
|
104
105
|
"""Retrieve artifact data."""
|
105
|
-
if self.
|
106
|
+
if self.artifact_store._closed:
|
106
107
|
raise ArtifactStoreError("Store is closed")
|
107
108
|
|
108
109
|
try:
|
109
110
|
record = await self._get_record(artifact_id)
|
110
111
|
|
111
|
-
storage_ctx_mgr = self.
|
112
|
+
storage_ctx_mgr = self.artifact_store._s3_factory()
|
112
113
|
async with storage_ctx_mgr as s3:
|
113
114
|
response = await s3.get_object(
|
114
|
-
Bucket=self.
|
115
|
+
Bucket=self.artifact_store.bucket,
|
115
116
|
Key=record["key"]
|
116
117
|
)
|
117
118
|
|
@@ -146,26 +147,26 @@ class CoreStorageOperations:
|
|
146
147
|
"""Store with retry logic."""
|
147
148
|
last_exception = None
|
148
149
|
|
149
|
-
for attempt in range(self.
|
150
|
+
for attempt in range(self.artifact_store.max_retries):
|
150
151
|
try:
|
151
|
-
storage_ctx_mgr = self.
|
152
|
+
storage_ctx_mgr = self.artifact_store._s3_factory()
|
152
153
|
async with storage_ctx_mgr as s3:
|
153
154
|
await s3.put_object(
|
154
|
-
Bucket=self.
|
155
|
+
Bucket=self.artifact_store.bucket,
|
155
156
|
Key=key,
|
156
157
|
Body=data,
|
157
158
|
ContentType=mime,
|
158
159
|
Metadata={
|
159
160
|
"filename": filename or "",
|
160
161
|
"session_id": session_id,
|
161
|
-
"sandbox_id": self.
|
162
|
+
"sandbox_id": self.artifact_store.sandbox_id,
|
162
163
|
},
|
163
164
|
)
|
164
165
|
return # Success
|
165
166
|
|
166
167
|
except Exception as e:
|
167
168
|
last_exception = e
|
168
|
-
if attempt < self.
|
169
|
+
if attempt < self.artifact_store.max_retries - 1:
|
169
170
|
wait_time = 2 ** attempt
|
170
171
|
await asyncio.sleep(wait_time)
|
171
172
|
|
@@ -174,7 +175,7 @@ class CoreStorageOperations:
|
|
174
175
|
async def _get_record(self, artifact_id: str) -> Dict[str, Any]:
|
175
176
|
"""Get artifact metadata."""
|
176
177
|
try:
|
177
|
-
session_ctx_mgr = self.
|
178
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
178
179
|
async with session_ctx_mgr as session:
|
179
180
|
raw = await session.get(artifact_id)
|
180
181
|
except Exception as e:
|
chuk_artifacts/metadata.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
# chuk_artifacts/metadata.py
|
3
3
|
"""
|
4
4
|
Clean metadata operations for grid architecture.
|
5
|
+
Now uses chuk_sessions for session management.
|
5
6
|
"""
|
6
7
|
|
7
8
|
from __future__ import annotations
|
@@ -13,16 +14,16 @@ from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
|
13
14
|
if TYPE_CHECKING:
|
14
15
|
from .store import ArtifactStore
|
15
16
|
|
16
|
-
from .exceptions import ProviderError, SessionError
|
17
|
+
from .exceptions import ProviderError, SessionError, ArtifactNotFoundError
|
17
18
|
|
18
19
|
logger = logging.getLogger(__name__)
|
19
20
|
|
20
21
|
|
21
22
|
class MetadataOperations:
|
22
|
-
"""Clean metadata operations for grid architecture."""
|
23
|
+
"""Clean metadata operations for grid architecture using chuk_sessions."""
|
23
24
|
|
24
25
|
def __init__(self, artifact_store: 'ArtifactStore'):
|
25
|
-
self.
|
26
|
+
self.artifact_store = artifact_store
|
26
27
|
|
27
28
|
async def get_metadata(self, artifact_id: str) -> Dict[str, Any]:
|
28
29
|
"""Get artifact metadata."""
|
@@ -42,15 +43,15 @@ class MetadataOperations:
|
|
42
43
|
record = await self._get_record(artifact_id)
|
43
44
|
|
44
45
|
# Delete from storage
|
45
|
-
storage_ctx_mgr = self.
|
46
|
+
storage_ctx_mgr = self.artifact_store._s3_factory()
|
46
47
|
async with storage_ctx_mgr as s3:
|
47
48
|
await s3.delete_object(
|
48
|
-
Bucket=self.
|
49
|
+
Bucket=self.artifact_store.bucket,
|
49
50
|
Key=record["key"]
|
50
51
|
)
|
51
52
|
|
52
|
-
# Delete metadata
|
53
|
-
session_ctx_mgr = self.
|
53
|
+
# Delete metadata from session provider
|
54
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
54
55
|
async with session_ctx_mgr as session:
|
55
56
|
if hasattr(session, 'delete'):
|
56
57
|
await session.delete(artifact_id)
|
@@ -63,26 +64,27 @@ class MetadataOperations:
|
|
63
64
|
return False
|
64
65
|
|
65
66
|
async def list_by_session(self, session_id: str, limit: int = 100) -> List[Dict[str, Any]]:
|
66
|
-
"""List artifacts in a session using grid prefix."""
|
67
|
+
"""List artifacts in a session using grid prefix from chuk_sessions."""
|
67
68
|
try:
|
68
69
|
artifacts = []
|
69
|
-
prefix
|
70
|
+
# Use the session manager's canonical prefix instead of building our own
|
71
|
+
prefix = self.artifact_store._session_manager.get_canonical_prefix(session_id)
|
70
72
|
|
71
|
-
storage_ctx_mgr = self.
|
73
|
+
storage_ctx_mgr = self.artifact_store._s3_factory()
|
72
74
|
async with storage_ctx_mgr as s3:
|
73
75
|
if hasattr(s3, 'list_objects_v2'):
|
74
76
|
response = await s3.list_objects_v2(
|
75
|
-
Bucket=self.
|
77
|
+
Bucket=self.artifact_store.bucket,
|
76
78
|
Prefix=prefix,
|
77
79
|
MaxKeys=limit
|
78
80
|
)
|
79
81
|
|
80
82
|
for obj in response.get('Contents', []):
|
81
83
|
key = obj['Key']
|
82
|
-
#
|
83
|
-
|
84
|
-
if
|
85
|
-
artifact_id =
|
84
|
+
# Parse the grid key using chuk_sessions
|
85
|
+
parsed = self.artifact_store._session_manager.parse_grid_key(key)
|
86
|
+
if parsed and parsed.get('artifact_id'):
|
87
|
+
artifact_id = parsed['artifact_id']
|
86
88
|
try:
|
87
89
|
record = await self._get_record(artifact_id)
|
88
90
|
artifacts.append(record)
|
@@ -155,8 +157,8 @@ class MetadataOperations:
|
|
155
157
|
if key not in ["summary", "meta"] and value is not None:
|
156
158
|
record[key] = value
|
157
159
|
|
158
|
-
# Store updated record
|
159
|
-
session_ctx_mgr = self.
|
160
|
+
# Store updated record using session provider
|
161
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
160
162
|
async with session_ctx_mgr as session:
|
161
163
|
await session.setex(artifact_id, record.get("ttl", 900), json.dumps(record))
|
162
164
|
|
@@ -181,8 +183,8 @@ class MetadataOperations:
|
|
181
183
|
new_ttl = current_ttl + additional_seconds
|
182
184
|
record["ttl"] = new_ttl
|
183
185
|
|
184
|
-
# Store updated record with new TTL
|
185
|
-
session_ctx_mgr = self.
|
186
|
+
# Store updated record with new TTL using session provider
|
187
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
186
188
|
async with session_ctx_mgr as session:
|
187
189
|
await session.setex(artifact_id, new_ttl, json.dumps(record))
|
188
190
|
|
@@ -193,16 +195,16 @@ class MetadataOperations:
|
|
193
195
|
raise ProviderError(f"TTL extension failed: {e}") from e
|
194
196
|
|
195
197
|
async def _get_record(self, artifact_id: str) -> Dict[str, Any]:
|
196
|
-
"""Get artifact metadata record."""
|
198
|
+
"""Get artifact metadata record from session provider."""
|
197
199
|
try:
|
198
|
-
session_ctx_mgr = self.
|
200
|
+
session_ctx_mgr = self.artifact_store._session_factory()
|
199
201
|
async with session_ctx_mgr as session:
|
200
202
|
raw = await session.get(artifact_id)
|
201
203
|
except Exception as e:
|
202
204
|
raise SessionError(f"Session error for {artifact_id}: {e}") from e
|
203
205
|
|
204
206
|
if raw is None:
|
205
|
-
raise
|
207
|
+
raise ArtifactNotFoundError(f"Artifact {artifact_id} not found")
|
206
208
|
|
207
209
|
try:
|
208
210
|
return json.loads(raw)
|