chuk-artifacts 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chuk_artifacts/admin.py CHANGED
@@ -1,7 +1,8 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  # chuk_artifacts/admin.py
3
3
  """
4
- Administrative and debugging operations
4
+ Administrative and debugging operations.
5
+ Now includes chuk_sessions integration.
5
6
  """
6
7
 
7
8
  from __future__ import annotations
@@ -20,7 +21,7 @@ class AdminOperations:
20
21
  """Handles administrative and debugging operations."""
21
22
 
22
23
  def __init__(self, artifact_store: 'ArtifactStore'):
23
- self.store = artifact_store
24
+ self.artifact_store = artifact_store
24
25
 
25
26
  async def validate_configuration(self) -> Dict[str, Any]:
26
27
  """Validate store configuration and connectivity."""
@@ -70,11 +71,39 @@ class AdminOperations:
70
71
  "provider": self.store._storage_provider_name
71
72
  }
72
73
 
74
+ # Test session manager (chuk_sessions)
75
+ try:
76
+ # Try to allocate a test session
77
+ test_session = await self.store._session_manager.allocate_session(
78
+ user_id="test_admin_user"
79
+ )
80
+ # Validate it
81
+ is_valid = await self.store._session_manager.validate_session(test_session)
82
+ # Clean up
83
+ await self.store._session_manager.delete_session(test_session)
84
+
85
+ if is_valid:
86
+ results["session_manager"] = {
87
+ "status": "ok",
88
+ "sandbox_id": self.store.sandbox_id,
89
+ "test_session": test_session
90
+ }
91
+ else:
92
+ results["session_manager"] = {
93
+ "status": "error",
94
+ "message": "Session validation failed"
95
+ }
96
+ except Exception as e:
97
+ results["session_manager"] = {
98
+ "status": "error",
99
+ "message": str(e)
100
+ }
101
+
73
102
  return results
74
103
 
75
104
  async def get_stats(self) -> Dict[str, Any]:
76
105
  """Get storage statistics."""
77
- return {
106
+ base_stats = {
78
107
  "storage_provider": self.store._storage_provider_name,
79
108
  "session_provider": self.store._session_provider_name,
80
109
  "bucket": self.store.bucket,
@@ -82,4 +111,47 @@ class AdminOperations:
82
111
  "closed": self.store._closed,
83
112
  "sandbox_id": self.store.sandbox_id,
84
113
  "session_ttl_hours": self.store.session_ttl_hours,
114
+ }
115
+
116
+ # Add session manager stats from chuk_sessions
117
+ try:
118
+ session_stats = self.store._session_manager.get_cache_stats()
119
+ base_stats["session_manager"] = session_stats
120
+ except Exception as e:
121
+ base_stats["session_manager"] = {
122
+ "error": str(e),
123
+ "status": "unavailable"
124
+ }
125
+
126
+ return base_stats
127
+
128
+ async def cleanup_all_expired(self) -> Dict[str, int]:
129
+ """Clean up all expired resources."""
130
+ results = {"timestamp": datetime.utcnow().isoformat() + "Z"}
131
+
132
+ # Clean up expired sessions using chuk_sessions
133
+ try:
134
+ expired_sessions = await self.store._session_manager.cleanup_expired_sessions()
135
+ results["expired_sessions_cleaned"] = expired_sessions
136
+ except Exception as e:
137
+ results["session_cleanup_error"] = str(e)
138
+ results["expired_sessions_cleaned"] = 0
139
+
140
+ # TODO: Add artifact cleanup based on TTL
141
+ # This would require scanning metadata to find expired artifacts
142
+ results["expired_artifacts_cleaned"] = 0 # Placeholder
143
+
144
+ return results
145
+
146
+ async def get_sandbox_info(self) -> Dict[str, Any]:
147
+ """Get information about the current sandbox."""
148
+ return {
149
+ "sandbox_id": self.store.sandbox_id,
150
+ "session_prefix_pattern": self.store.get_session_prefix_pattern(),
151
+ "grid_architecture": {
152
+ "enabled": True,
153
+ "pattern": "grid/{sandbox_id}/{session_id}/{artifact_id}",
154
+ "mandatory_sessions": True,
155
+ "federation_ready": True
156
+ }
85
157
  }
chuk_artifacts/base.py CHANGED
@@ -1,7 +1,8 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  # chuk_artifacts/base.py
3
3
  """
4
- base class for operation modules
4
+ Base class for operation modules.
5
+ Updated to work with chuk_sessions integration.
5
6
  """
6
7
 
7
8
  from __future__ import annotations
@@ -17,10 +18,10 @@ logger = logging.getLogger(__name__)
17
18
 
18
19
 
19
20
  class BaseOperations:
20
- """Fixed base class for all operation modules."""
21
+ """Base class for all operation modules."""
21
22
 
22
23
  def __init__(self, store: 'ArtifactStore'):
23
- # FIXED: Renamed from self.store to self._artifact_store to avoid method name conflicts
24
+ # Store reference to artifact store
24
25
  self._artifact_store = store
25
26
 
26
27
  @property
@@ -47,6 +48,11 @@ class BaseOperations:
47
48
  def max_retries(self) -> int:
48
49
  return self._artifact_store.max_retries
49
50
 
51
+ @property
52
+ def session_manager(self):
53
+ """Access to chuk_sessions SessionManager."""
54
+ return self._artifact_store._session_manager
55
+
50
56
  def _check_closed(self):
51
57
  """Check if store is closed and raise error if so."""
52
58
  if self._artifact_store._closed:
chuk_artifacts/batch.py CHANGED
@@ -1,28 +1,32 @@
1
- # ===========================================================================
2
- # chuk_artifacts/batch.py - Batch operations
3
- # ===========================================================================
1
+ # -*- coding: utf-8 -*-
2
+ # chuk_artifacts/batch.py
4
3
  """
5
4
  Batch operations for multiple artifacts.
5
+ Now uses chuk_sessions for session management.
6
6
  """
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
- import uuid, hashlib, json, logging
10
+ import uuid, hashlib, json, logging, asyncio
11
11
  from datetime import datetime
12
- from typing import Any, Dict, List, Optional
12
+ from typing import Any, Dict, List, Optional, TYPE_CHECKING
13
13
 
14
- from .base import BaseOperations
15
- from .exceptions import ArtifactStoreError
14
+ if TYPE_CHECKING:
15
+ from .store import ArtifactStore
16
+
17
+ from .exceptions import ArtifactStoreError, ProviderError, SessionError
16
18
 
17
19
  logger = logging.getLogger(__name__)
18
20
 
19
- _ANON_PREFIX = "anon"
20
21
  _DEFAULT_TTL = 900
21
22
 
22
23
 
23
- class BatchOperations(BaseOperations):
24
+ class BatchOperations:
24
25
  """Handles batch operations for multiple artifacts."""
25
26
 
27
+ def __init__(self, artifact_store: 'ArtifactStore'):
28
+ self.artifact_store = artifact_store
29
+
26
30
  async def store_batch(
27
31
  self,
28
32
  items: List[Dict[str, Any]],
@@ -30,7 +34,14 @@ class BatchOperations(BaseOperations):
30
34
  ttl: int = _DEFAULT_TTL,
31
35
  ) -> List[str]:
32
36
  """Store multiple artifacts in a batch operation."""
33
- self._check_closed()
37
+ if self.artifact_store._closed:
38
+ raise ArtifactStoreError("Store is closed")
39
+
40
+ # Ensure session is allocated using chuk_sessions
41
+ if session_id is None:
42
+ session_id = await self.artifact_store._session_manager.allocate_session()
43
+ else:
44
+ session_id = await self.artifact_store._session_manager.allocate_session(session_id=session_id)
34
45
 
35
46
  artifact_ids = []
36
47
  failed_items = []
@@ -38,18 +49,19 @@ class BatchOperations(BaseOperations):
38
49
  for i, item in enumerate(items):
39
50
  try:
40
51
  artifact_id = uuid.uuid4().hex
41
- scope = session_id or f"{_ANON_PREFIX}_{artifact_id}"
42
- key = f"sess/{scope}/{artifact_id}"
52
+ key = self.artifact_store.generate_artifact_key(session_id, artifact_id)
43
53
 
44
54
  # Store in object storage
45
55
  await self._store_with_retry(
46
56
  item["data"], key, item["mime"],
47
- item.get("filename"), scope
57
+ item.get("filename"), session_id
48
58
  )
49
59
 
50
60
  # Prepare metadata record
51
61
  record = {
52
- "scope": scope,
62
+ "artifact_id": artifact_id,
63
+ "session_id": session_id,
64
+ "sandbox_id": self.artifact_store.sandbox_id,
53
65
  "key": key,
54
66
  "mime": item["mime"],
55
67
  "summary": item["summary"],
@@ -57,14 +69,16 @@ class BatchOperations(BaseOperations):
57
69
  "filename": item.get("filename"),
58
70
  "bytes": len(item["data"]),
59
71
  "sha256": hashlib.sha256(item["data"]).hexdigest(),
60
- "stored_at": datetime.utcnow().isoformat(timespec="seconds") + "Z",
72
+ "stored_at": datetime.utcnow().isoformat() + "Z",
61
73
  "ttl": ttl,
62
- "storage_provider": self.storage_provider_name,
63
- "session_provider": self.session_provider_name,
74
+ "storage_provider": self.artifact_store._storage_provider_name,
75
+ "session_provider": self.artifact_store._session_provider_name,
76
+ "batch_operation": True,
77
+ "batch_index": i,
64
78
  }
65
79
 
66
80
  # Store metadata via session provider
67
- session_ctx_mgr = self.session_factory()
81
+ session_ctx_mgr = self.artifact_store._session_factory()
68
82
  async with session_ctx_mgr as session:
69
83
  await session.setex(artifact_id, ttl, json.dumps(record))
70
84
 
@@ -80,28 +94,30 @@ class BatchOperations(BaseOperations):
80
94
 
81
95
  return artifact_ids
82
96
 
83
- async def _store_with_retry(self, data: bytes, key: str, mime: str, filename: str, scope: str):
97
+ async def _store_with_retry(self, data: bytes, key: str, mime: str, filename: str, session_id: str):
84
98
  """Store data with retry logic (copied from core for batch operations)."""
85
- import asyncio
86
-
87
99
  last_exception = None
88
100
 
89
- for attempt in range(self.max_retries):
101
+ for attempt in range(self.artifact_store.max_retries):
90
102
  try:
91
- storage_ctx_mgr = self.s3_factory()
103
+ storage_ctx_mgr = self.artifact_store._s3_factory()
92
104
  async with storage_ctx_mgr as s3:
93
105
  await s3.put_object(
94
- Bucket=self.bucket,
106
+ Bucket=self.artifact_store.bucket,
95
107
  Key=key,
96
108
  Body=data,
97
109
  ContentType=mime,
98
- Metadata={"filename": filename or "", "scope": scope},
110
+ Metadata={
111
+ "filename": filename or "",
112
+ "session_id": session_id,
113
+ "sandbox_id": self.artifact_store.sandbox_id,
114
+ },
99
115
  )
100
116
  return # Success
101
117
 
102
118
  except Exception as e:
103
119
  last_exception = e
104
- if attempt < self.max_retries - 1:
120
+ if attempt < self.artifact_store.max_retries - 1:
105
121
  wait_time = 2 ** attempt # Exponential backoff
106
122
  logger.warning(
107
123
  f"Batch storage attempt {attempt + 1} failed, retrying in {wait_time}s",
@@ -109,7 +125,6 @@ class BatchOperations(BaseOperations):
109
125
  )
110
126
  await asyncio.sleep(wait_time)
111
127
  else:
112
- logger.error(f"All {self.max_retries} batch storage attempts failed")
128
+ logger.error(f"All {self.artifact_store.max_retries} batch storage attempts failed")
113
129
 
114
- raise last_exception
115
-
130
+ raise last_exception
chuk_artifacts/core.py CHANGED
@@ -2,6 +2,7 @@
2
2
  # chuk_artifacts/core.py
3
3
  """
4
4
  Clean core storage operations - grid architecture only.
5
+ Now uses chuk_sessions for session management.
5
6
  """
6
7
 
7
8
  from __future__ import annotations
@@ -29,7 +30,7 @@ class CoreStorageOperations:
29
30
  """Clean core storage operations with grid architecture."""
30
31
 
31
32
  def __init__(self, artifact_store: 'ArtifactStore'):
32
- self.store = artifact_store
33
+ self.artifact_store = artifact_store
33
34
 
34
35
  async def store(
35
36
  self,
@@ -43,14 +44,14 @@ class CoreStorageOperations:
43
44
  ttl: int = _DEFAULT_TTL,
44
45
  ) -> str:
45
46
  """Store artifact with grid key generation."""
46
- if self.store._closed:
47
+ if self.artifact_store._closed:
47
48
  raise ArtifactStoreError("Store is closed")
48
49
 
49
50
  start_time = time.time()
50
51
  artifact_id = uuid.uuid4().hex
51
52
 
52
- # Generate grid key
53
- key = self.store.generate_artifact_key(session_id, artifact_id)
53
+ # Generate grid key using chuk_sessions
54
+ key = self.artifact_store.generate_artifact_key(session_id, artifact_id)
54
55
 
55
56
  try:
56
57
  # Store in object storage
@@ -60,7 +61,7 @@ class CoreStorageOperations:
60
61
  record = {
61
62
  "artifact_id": artifact_id,
62
63
  "session_id": session_id,
63
- "sandbox_id": self.store.sandbox_id,
64
+ "sandbox_id": self.artifact_store.sandbox_id,
64
65
  "key": key,
65
66
  "mime": mime,
66
67
  "summary": summary,
@@ -70,12 +71,12 @@ class CoreStorageOperations:
70
71
  "sha256": hashlib.sha256(data).hexdigest(),
71
72
  "stored_at": datetime.utcnow().isoformat() + "Z",
72
73
  "ttl": ttl,
73
- "storage_provider": self.store._storage_provider_name,
74
- "session_provider": self.store._session_provider_name,
74
+ "storage_provider": self.artifact_store._storage_provider_name,
75
+ "session_provider": self.artifact_store._session_provider_name,
75
76
  }
76
77
 
77
78
  # Store metadata
78
- session_ctx_mgr = self.store._session_factory()
79
+ session_ctx_mgr = self.artifact_store._session_factory()
79
80
  async with session_ctx_mgr as session:
80
81
  await session.setex(artifact_id, ttl, json.dumps(record))
81
82
 
@@ -102,16 +103,16 @@ class CoreStorageOperations:
102
103
 
103
104
  async def retrieve(self, artifact_id: str) -> bytes:
104
105
  """Retrieve artifact data."""
105
- if self.store._closed:
106
+ if self.artifact_store._closed:
106
107
  raise ArtifactStoreError("Store is closed")
107
108
 
108
109
  try:
109
110
  record = await self._get_record(artifact_id)
110
111
 
111
- storage_ctx_mgr = self.store._s3_factory()
112
+ storage_ctx_mgr = self.artifact_store._s3_factory()
112
113
  async with storage_ctx_mgr as s3:
113
114
  response = await s3.get_object(
114
- Bucket=self.store.bucket,
115
+ Bucket=self.artifact_store.bucket,
115
116
  Key=record["key"]
116
117
  )
117
118
 
@@ -146,26 +147,26 @@ class CoreStorageOperations:
146
147
  """Store with retry logic."""
147
148
  last_exception = None
148
149
 
149
- for attempt in range(self.store.max_retries):
150
+ for attempt in range(self.artifact_store.max_retries):
150
151
  try:
151
- storage_ctx_mgr = self.store._s3_factory()
152
+ storage_ctx_mgr = self.artifact_store._s3_factory()
152
153
  async with storage_ctx_mgr as s3:
153
154
  await s3.put_object(
154
- Bucket=self.store.bucket,
155
+ Bucket=self.artifact_store.bucket,
155
156
  Key=key,
156
157
  Body=data,
157
158
  ContentType=mime,
158
159
  Metadata={
159
160
  "filename": filename or "",
160
161
  "session_id": session_id,
161
- "sandbox_id": self.store.sandbox_id,
162
+ "sandbox_id": self.artifact_store.sandbox_id,
162
163
  },
163
164
  )
164
165
  return # Success
165
166
 
166
167
  except Exception as e:
167
168
  last_exception = e
168
- if attempt < self.store.max_retries - 1:
169
+ if attempt < self.artifact_store.max_retries - 1:
169
170
  wait_time = 2 ** attempt
170
171
  await asyncio.sleep(wait_time)
171
172
 
@@ -174,7 +175,7 @@ class CoreStorageOperations:
174
175
  async def _get_record(self, artifact_id: str) -> Dict[str, Any]:
175
176
  """Get artifact metadata."""
176
177
  try:
177
- session_ctx_mgr = self.store._session_factory()
178
+ session_ctx_mgr = self.artifact_store._session_factory()
178
179
  async with session_ctx_mgr as session:
179
180
  raw = await session.get(artifact_id)
180
181
  except Exception as e:
@@ -2,6 +2,7 @@
2
2
  # chuk_artifacts/metadata.py
3
3
  """
4
4
  Clean metadata operations for grid architecture.
5
+ Now uses chuk_sessions for session management.
5
6
  """
6
7
 
7
8
  from __future__ import annotations
@@ -13,13 +14,13 @@ from typing import Any, Dict, List, Optional, TYPE_CHECKING
13
14
  if TYPE_CHECKING:
14
15
  from .store import ArtifactStore
15
16
 
16
- from .exceptions import ProviderError, SessionError
17
+ from .exceptions import ProviderError, SessionError, ArtifactNotFoundError
17
18
 
18
19
  logger = logging.getLogger(__name__)
19
20
 
20
21
 
21
22
  class MetadataOperations:
22
- """Clean metadata operations for grid architecture."""
23
+ """Clean metadata operations for grid architecture using chuk_sessions."""
23
24
 
24
25
  def __init__(self, artifact_store: 'ArtifactStore'):
25
26
  self.store = artifact_store
@@ -49,7 +50,7 @@ class MetadataOperations:
49
50
  Key=record["key"]
50
51
  )
51
52
 
52
- # Delete metadata
53
+ # Delete metadata from session provider
53
54
  session_ctx_mgr = self.store._session_factory()
54
55
  async with session_ctx_mgr as session:
55
56
  if hasattr(session, 'delete'):
@@ -63,10 +64,11 @@ class MetadataOperations:
63
64
  return False
64
65
 
65
66
  async def list_by_session(self, session_id: str, limit: int = 100) -> List[Dict[str, Any]]:
66
- """List artifacts in a session using grid prefix."""
67
+ """List artifacts in a session using grid prefix from chuk_sessions."""
67
68
  try:
68
69
  artifacts = []
69
- prefix = f"grid/{self.store.sandbox_id}/{session_id}/"
70
+ # Use the session manager's canonical prefix instead of building our own
71
+ prefix = self.store._session_manager.get_canonical_prefix(session_id)
70
72
 
71
73
  storage_ctx_mgr = self.store._s3_factory()
72
74
  async with storage_ctx_mgr as s3:
@@ -79,10 +81,10 @@ class MetadataOperations:
79
81
 
80
82
  for obj in response.get('Contents', []):
81
83
  key = obj['Key']
82
- # Extract artifact ID from key
83
- parts = key.split('/')
84
- if len(parts) >= 4: # grid/sandbox/session/artifact_id
85
- artifact_id = parts[3]
84
+ # Parse the grid key using chuk_sessions
85
+ parsed = self.store._session_manager.parse_grid_key(key)
86
+ if parsed and parsed.get('artifact_id'):
87
+ artifact_id = parsed['artifact_id']
86
88
  try:
87
89
  record = await self._get_record(artifact_id)
88
90
  artifacts.append(record)
@@ -155,7 +157,7 @@ class MetadataOperations:
155
157
  if key not in ["summary", "meta"] and value is not None:
156
158
  record[key] = value
157
159
 
158
- # Store updated record
160
+ # Store updated record using session provider
159
161
  session_ctx_mgr = self.store._session_factory()
160
162
  async with session_ctx_mgr as session:
161
163
  await session.setex(artifact_id, record.get("ttl", 900), json.dumps(record))
@@ -181,7 +183,7 @@ class MetadataOperations:
181
183
  new_ttl = current_ttl + additional_seconds
182
184
  record["ttl"] = new_ttl
183
185
 
184
- # Store updated record with new TTL
186
+ # Store updated record with new TTL using session provider
185
187
  session_ctx_mgr = self.store._session_factory()
186
188
  async with session_ctx_mgr as session:
187
189
  await session.setex(artifact_id, new_ttl, json.dumps(record))
@@ -193,7 +195,7 @@ class MetadataOperations:
193
195
  raise ProviderError(f"TTL extension failed: {e}") from e
194
196
 
195
197
  async def _get_record(self, artifact_id: str) -> Dict[str, Any]:
196
- """Get artifact metadata record."""
198
+ """Get artifact metadata record from session provider."""
197
199
  try:
198
200
  session_ctx_mgr = self.store._session_factory()
199
201
  async with session_ctx_mgr as session:
@@ -202,7 +204,7 @@ class MetadataOperations:
202
204
  raise SessionError(f"Session error for {artifact_id}: {e}") from e
203
205
 
204
206
  if raw is None:
205
- raise ProviderError(f"Artifact {artifact_id} not found")
207
+ raise ArtifactNotFoundError(f"Artifact {artifact_id} not found")
206
208
 
207
209
  try:
208
210
  return json.loads(raw)
@@ -2,6 +2,7 @@
2
2
  # chuk_artifacts/presigned.py
3
3
  """
4
4
  Presigned URL operations: download URLs, upload URLs, and upload registration.
5
+ Now uses chuk_sessions for session management.
5
6
  """
6
7
 
7
8
  from __future__ import annotations
@@ -28,11 +29,11 @@ class PresignedURLOperations:
28
29
  """Handles all presigned URL operations."""
29
30
 
30
31
  def __init__(self, artifact_store: 'ArtifactStore'):
31
- self.store = artifact_store
32
+ self.artifact_store = artifact_store
32
33
 
33
34
  async def presign(self, artifact_id: str, expires: int = _DEFAULT_PRESIGN_EXPIRES) -> str:
34
35
  """Generate a presigned URL for artifact download."""
35
- if self.store._closed:
36
+ if self.artifact_store._closed:
36
37
  raise ArtifactStoreError("Store is closed")
37
38
 
38
39
  start_time = time.time()
@@ -40,11 +41,11 @@ class PresignedURLOperations:
40
41
  try:
41
42
  record = await self._get_record(artifact_id)
42
43
 
43
- storage_ctx_mgr = self.store._s3_factory()
44
+ storage_ctx_mgr = self.artifact_store._s3_factory()
44
45
  async with storage_ctx_mgr as s3:
45
46
  url = await s3.generate_presigned_url(
46
47
  "get_object",
47
- Params={"Bucket": self.store.bucket, "Key": record["key"]},
48
+ Params={"Bucket": self.artifact_store.bucket, "Key": record["key"]},
48
49
  ExpiresIn=expires,
49
50
  )
50
51
 
@@ -101,28 +102,28 @@ class PresignedURLOperations:
101
102
  expires: int = _DEFAULT_PRESIGN_EXPIRES
102
103
  ) -> tuple[str, str]:
103
104
  """Generate a presigned URL for uploading a new artifact."""
104
- if self.store._closed:
105
+ if self.artifact_store._closed:
105
106
  raise ArtifactStoreError("Store is closed")
106
107
 
107
108
  start_time = time.time()
108
109
 
109
- # Ensure session is allocated
110
+ # Ensure session is allocated using chuk_sessions
110
111
  if session_id is None:
111
- session_id = await self.store._session_manager.allocate_session()
112
+ session_id = await self.artifact_store._session_manager.allocate_session()
112
113
  else:
113
- session_id = await self.store._session_manager.allocate_session(session_id=session_id)
114
+ session_id = await self.artifact_store._session_manager.allocate_session(session_id=session_id)
114
115
 
115
116
  # Generate artifact ID and key path
116
117
  artifact_id = uuid.uuid4().hex
117
- key = self.store.generate_artifact_key(session_id, artifact_id)
118
+ key = self.artifact_store.generate_artifact_key(session_id, artifact_id)
118
119
 
119
120
  try:
120
- storage_ctx_mgr = self.store._s3_factory()
121
+ storage_ctx_mgr = self.artifact_store._s3_factory()
121
122
  async with storage_ctx_mgr as s3:
122
123
  url = await s3.generate_presigned_url(
123
124
  "put_object",
124
125
  Params={
125
- "Bucket": self.store.bucket,
126
+ "Bucket": self.artifact_store.bucket,
126
127
  "Key": key,
127
128
  "ContentType": mime_type
128
129
  },
@@ -174,26 +175,26 @@ class PresignedURLOperations:
174
175
  ttl: int = _DEFAULT_TTL,
175
176
  ) -> bool:
176
177
  """Register metadata for an artifact uploaded via presigned URL."""
177
- if self.store._closed:
178
+ if self.artifact_store._closed:
178
179
  raise ArtifactStoreError("Store is closed")
179
180
 
180
181
  start_time = time.time()
181
182
 
182
- # Ensure session is allocated
183
+ # Ensure session is allocated using chuk_sessions
183
184
  if session_id is None:
184
- session_id = await self.store._session_manager.allocate_session()
185
+ session_id = await self.artifact_store._session_manager.allocate_session()
185
186
  else:
186
- session_id = await self.store._session_manager.allocate_session(session_id=session_id)
187
+ session_id = await self.artifact_store._session_manager.allocate_session(session_id=session_id)
187
188
 
188
189
  # Reconstruct the key path
189
- key = self.store.generate_artifact_key(session_id, artifact_id)
190
+ key = self.artifact_store.generate_artifact_key(session_id, artifact_id)
190
191
 
191
192
  try:
192
193
  # Verify the object exists and get its size
193
- storage_ctx_mgr = self.store._s3_factory()
194
+ storage_ctx_mgr = self.artifact_store._s3_factory()
194
195
  async with storage_ctx_mgr as s3:
195
196
  try:
196
- response = await s3.head_object(Bucket=self.store.bucket, Key=key)
197
+ response = await s3.head_object(Bucket=self.artifact_store.bucket, Key=key)
197
198
  file_size = response.get('ContentLength', 0)
198
199
  except Exception:
199
200
  logger.warning(f"Artifact {artifact_id} not found in storage")
@@ -203,7 +204,7 @@ class PresignedURLOperations:
203
204
  record = {
204
205
  "artifact_id": artifact_id,
205
206
  "session_id": session_id,
206
- "sandbox_id": self.store.sandbox_id,
207
+ "sandbox_id": self.artifact_store.sandbox_id,
207
208
  "key": key,
208
209
  "mime": mime,
209
210
  "summary": summary,
@@ -213,13 +214,13 @@ class PresignedURLOperations:
213
214
  "sha256": None, # We don't have the hash since we didn't upload it directly
214
215
  "stored_at": datetime.utcnow().isoformat() + "Z",
215
216
  "ttl": ttl,
216
- "storage_provider": self.store._storage_provider_name,
217
- "session_provider": self.store._session_provider_name,
217
+ "storage_provider": self.artifact_store._storage_provider_name,
218
+ "session_provider": self.artifact_store._session_provider_name,
218
219
  "uploaded_via_presigned": True, # Flag to indicate upload method
219
220
  }
220
221
 
221
222
  # Cache metadata using session provider
222
- session_ctx_mgr = self.store._session_factory()
223
+ session_ctx_mgr = self.artifact_store._session_factory()
223
224
  async with session_ctx_mgr as session:
224
225
  await session.setex(artifact_id, ttl, json.dumps(record))
225
226
 
@@ -288,7 +289,7 @@ class PresignedURLOperations:
288
289
  async def _get_record(self, artifact_id: str) -> Dict[str, Any]:
289
290
  """Get artifact metadata record."""
290
291
  try:
291
- session_ctx_mgr = self.store._session_factory()
292
+ session_ctx_mgr = self.artifact_store._session_factory()
292
293
  async with session_ctx_mgr as session:
293
294
  raw = await session.get(artifact_id)
294
295
  except Exception as e: