altcodepro-polydb-python 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. altcodepro_polydb_python-2.1.0.dist-info/METADATA +378 -0
  2. altcodepro_polydb_python-2.1.0.dist-info/RECORD +51 -0
  3. altcodepro_polydb_python-2.1.0.dist-info/WHEEL +5 -0
  4. altcodepro_polydb_python-2.1.0.dist-info/licenses/LICENSE +21 -0
  5. altcodepro_polydb_python-2.1.0.dist-info/top_level.txt +1 -0
  6. polydb/__init__.py +64 -0
  7. polydb/adapters/AzureBlobStorageAdapter.py +77 -0
  8. polydb/adapters/AzureFileStorageAdapter.py +79 -0
  9. polydb/adapters/AzureQueueAdapter.py +61 -0
  10. polydb/adapters/AzureTableStorageAdapter.py +182 -0
  11. polydb/adapters/DynamoDBAdapter.py +216 -0
  12. polydb/adapters/EFSAdapter.py +50 -0
  13. polydb/adapters/FirestoreAdapter.py +193 -0
  14. polydb/adapters/GCPStorageAdapter.py +81 -0
  15. polydb/adapters/MongoDBAdapter.py +136 -0
  16. polydb/adapters/PostgreSQLAdapter.py +453 -0
  17. polydb/adapters/PubSubAdapter.py +83 -0
  18. polydb/adapters/S3Adapter.py +86 -0
  19. polydb/adapters/S3CompatibleAdapter.py +90 -0
  20. polydb/adapters/SQSAdapter.py +84 -0
  21. polydb/adapters/VercelKVAdapter.py +327 -0
  22. polydb/adapters/__init__.py +0 -0
  23. polydb/advanced_query.py +147 -0
  24. polydb/audit/AuditStorage.py +136 -0
  25. polydb/audit/__init__.py +7 -0
  26. polydb/audit/context.py +53 -0
  27. polydb/audit/manager.py +47 -0
  28. polydb/audit/models.py +86 -0
  29. polydb/base/NoSQLKVAdapter.py +301 -0
  30. polydb/base/ObjectStorageAdapter.py +42 -0
  31. polydb/base/QueueAdapter.py +27 -0
  32. polydb/base/SharedFilesAdapter.py +32 -0
  33. polydb/base/__init__.py +0 -0
  34. polydb/batch.py +163 -0
  35. polydb/cache.py +204 -0
  36. polydb/databaseFactory.py +748 -0
  37. polydb/decorators.py +21 -0
  38. polydb/errors.py +82 -0
  39. polydb/factory.py +107 -0
  40. polydb/models.py +39 -0
  41. polydb/monitoring.py +313 -0
  42. polydb/multitenancy.py +197 -0
  43. polydb/py.typed +0 -0
  44. polydb/query.py +150 -0
  45. polydb/registry.py +71 -0
  46. polydb/retry.py +76 -0
  47. polydb/schema.py +205 -0
  48. polydb/security.py +458 -0
  49. polydb/types.py +127 -0
  50. polydb/utils.py +61 -0
  51. polydb/validation.py +131 -0
@@ -0,0 +1,453 @@
1
+ # src/polydb/adapters/postgres.py
2
+ import os
3
+ import threading
4
+ from typing import Any, List, Optional, Tuple, Union
5
+ import hashlib
6
+ from contextlib import contextmanager
7
+
8
+ from ..errors import DatabaseError, ConnectionError
9
+ from ..retry import retry
10
+ from ..utils import validate_table_name, validate_column_name
11
+ from ..query import QueryBuilder
12
+ from ..types import JsonDict, Lookup
13
+
14
+
15
+ class PostgreSQLAdapter:
16
+ """PostgreSQL with full LINQ support, connection pooling"""
17
+
18
+ def __init__(self):
19
+ from ..utils import setup_logger
20
+
21
+ self.logger = setup_logger(__name__)
22
+ self.connection_string = os.getenv(
23
+ "POSTGRES_CONNECTION_STRING",
24
+ os.getenv("POSTGRES_URL", "postgresql://user:password@localhost:5432/database"),
25
+ )
26
+ self._pool = None
27
+ self._lock = threading.Lock()
28
+ self._initialize_pool()
29
+
30
+ def _initialize_pool(self):
31
+ try:
32
+ import psycopg2.pool
33
+
34
+ with self._lock:
35
+ if not self._pool:
36
+ self._pool = psycopg2.pool.ThreadedConnectionPool(
37
+ minconn=int(os.getenv("POSTGRES_MIN_CONNECTIONS", "2")),
38
+ maxconn=int(os.getenv("POSTGRES_MAX_CONNECTIONS", "20")),
39
+ dsn=self.connection_string,
40
+ )
41
+ self.logger.info("PostgreSQL pool initialized")
42
+ except Exception as e:
43
+ raise ConnectionError(f"Failed to initialize PostgreSQL pool: {str(e)}")
44
+
45
+ def _get_connection(self):
46
+ if not self._pool:
47
+ self._initialize_pool()
48
+ return self._pool.getconn() # type: ignore
49
+
50
+ def _return_connection(self, conn):
51
+ if self._pool and conn:
52
+ self._pool.putconn(conn)
53
+
54
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
55
+ def insert(self, table: str, data: JsonDict) -> JsonDict:
56
+ table = validate_table_name(table)
57
+ for k in data.keys():
58
+ validate_column_name(k)
59
+
60
+ conn = None
61
+ try:
62
+ conn = self._get_connection()
63
+ cursor = conn.cursor()
64
+
65
+ columns = ", ".join(data.keys())
66
+ placeholders = ", ".join(["%s"] * len(data))
67
+ query = f"INSERT INTO {table} ({columns}) VALUES ({placeholders}) RETURNING *"
68
+
69
+ cursor.execute(query, list(data.values()))
70
+ result_row = cursor.fetchone()
71
+ columns_list = [desc[0] for desc in cursor.description]
72
+ result = dict(zip(columns_list, result_row))
73
+
74
+ conn.commit()
75
+ cursor.close()
76
+ return result
77
+ except Exception as e:
78
+ if conn:
79
+ conn.rollback()
80
+ raise DatabaseError(f"Insert failed: {str(e)}")
81
+ finally:
82
+ if conn:
83
+ self._return_connection(conn)
84
+
85
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
86
+ def select(
87
+ self,
88
+ table: str,
89
+ query: Optional[Lookup] = None,
90
+ limit: Optional[int] = None,
91
+ offset: Optional[int] = None,
92
+ ) -> List[JsonDict]:
93
+ table = validate_table_name(table)
94
+ conn = None
95
+
96
+ try:
97
+ conn = self._get_connection()
98
+ cursor = conn.cursor()
99
+
100
+ sql = f"SELECT * FROM {table}"
101
+ params = []
102
+
103
+ if query:
104
+ where_parts = []
105
+ for k, v in query.items():
106
+ validate_column_name(k)
107
+ if isinstance(v, (list, tuple)):
108
+ placeholders = ",".join(["%s"] * len(v))
109
+ where_parts.append(f"{k} IN ({placeholders})")
110
+ params.extend(v)
111
+ else:
112
+ where_parts.append(f"{k} = %s")
113
+ params.append(v)
114
+
115
+ if where_parts:
116
+ sql += " WHERE " + " AND ".join(where_parts)
117
+
118
+ if limit:
119
+ sql += f" LIMIT %s"
120
+ params.append(limit)
121
+ if offset:
122
+ sql += f" OFFSET %s"
123
+ params.append(offset)
124
+
125
+ cursor.execute(sql, params)
126
+ columns = [desc[0] for desc in cursor.description]
127
+ results = [dict(zip(columns, row)) for row in cursor.fetchall()]
128
+ cursor.close()
129
+
130
+ return results
131
+ except Exception as e:
132
+ raise DatabaseError(f"Select failed: {str(e)}")
133
+ finally:
134
+ if conn:
135
+ self._return_connection(conn)
136
+
137
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
138
+ def select_page(
139
+ self, table: str, query: Lookup, page_size: int, continuation_token: Optional[str] = None
140
+ ) -> Tuple[List[JsonDict], Optional[str]]:
141
+ offset = int(continuation_token) if continuation_token else 0
142
+ results = self.select(table, query, limit=page_size + 1, offset=offset)
143
+
144
+ has_more = len(results) > page_size
145
+ if has_more:
146
+ results = results[:page_size]
147
+
148
+ next_token = str(offset + page_size) if has_more else None
149
+ return results, next_token
150
+
151
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
152
+ def update(self, table: str, entity_id: Union[Any, Lookup], data: JsonDict) -> JsonDict:
153
+ table = validate_table_name(table)
154
+ for k in data.keys():
155
+ validate_column_name(k)
156
+
157
+ conn = None
158
+ try:
159
+ conn = self._get_connection()
160
+ cursor = conn.cursor()
161
+
162
+ set_clause = ", ".join([f"{k} = %s" for k in data.keys()])
163
+ params = list(data.values())
164
+
165
+ if isinstance(entity_id, dict):
166
+ where_parts = []
167
+ for k, v in entity_id.items():
168
+ validate_column_name(k)
169
+ where_parts.append(f"{k} = %s")
170
+ params.append(v)
171
+ where_clause = " AND ".join(where_parts)
172
+ else:
173
+ where_clause = "id = %s"
174
+ params.append(entity_id)
175
+
176
+ query = f"UPDATE {table} SET {set_clause} WHERE {where_clause} RETURNING *"
177
+ cursor.execute(query, params)
178
+
179
+ result_row = cursor.fetchone()
180
+ if not result_row:
181
+ raise DatabaseError("No rows updated")
182
+
183
+ columns = [desc[0] for desc in cursor.description]
184
+ result = dict(zip(columns, result_row))
185
+
186
+ conn.commit()
187
+ cursor.close()
188
+ return result
189
+ except Exception as e:
190
+ if conn:
191
+ conn.rollback()
192
+ raise DatabaseError(f"Update failed: {str(e)}")
193
+ finally:
194
+ if conn:
195
+ self._return_connection(conn)
196
+
197
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
198
+ def upsert(self, table: str, data: JsonDict) -> JsonDict:
199
+ table = validate_table_name(table)
200
+ for k in data.keys():
201
+ validate_column_name(k)
202
+
203
+ conn = None
204
+ try:
205
+ conn = self._get_connection()
206
+ cursor = conn.cursor()
207
+
208
+ columns = ", ".join(data.keys())
209
+ placeholders = ", ".join(["%s"] * len(data))
210
+
211
+ conflict_columns = ["id"] if "id" in data else list(data.keys())[:1]
212
+ update_clause = ", ".join(
213
+ [f"{k} = EXCLUDED.{k}" for k in data.keys() if k not in conflict_columns]
214
+ )
215
+
216
+ query = f"""
217
+ INSERT INTO {table} ({columns})
218
+ VALUES ({placeholders})
219
+ ON CONFLICT ({', '.join(conflict_columns)})
220
+ DO UPDATE SET {update_clause}
221
+ RETURNING *
222
+ """
223
+
224
+ cursor.execute(query, list(data.values()))
225
+ result_row = cursor.fetchone()
226
+ columns_list = [desc[0] for desc in cursor.description]
227
+ result = dict(zip(columns_list, result_row))
228
+
229
+ conn.commit()
230
+ cursor.close()
231
+ return result
232
+ except Exception as e:
233
+ if conn:
234
+ conn.rollback()
235
+ raise DatabaseError(f"Upsert failed: {str(e)}")
236
+ finally:
237
+ if conn:
238
+ self._return_connection(conn)
239
+
240
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
241
+ def delete(self, table: str, entity_id: Union[Any, Lookup]) -> JsonDict:
242
+ table = validate_table_name(table)
243
+ conn = None
244
+
245
+ try:
246
+ conn = self._get_connection()
247
+ cursor = conn.cursor()
248
+
249
+ params = []
250
+ if isinstance(entity_id, dict):
251
+ where_parts = []
252
+ for k, v in entity_id.items():
253
+ validate_column_name(k)
254
+ where_parts.append(f"{k} = %s")
255
+ params.append(v)
256
+ where_clause = " AND ".join(where_parts)
257
+ else:
258
+ where_clause = "id = %s"
259
+ params.append(entity_id)
260
+
261
+ query = f"DELETE FROM {table} WHERE {where_clause} RETURNING *"
262
+ cursor.execute(query, params)
263
+ result_row = cursor.fetchone()
264
+
265
+ if not result_row:
266
+ raise DatabaseError("No rows deleted")
267
+
268
+ columns = [desc[0] for desc in cursor.description]
269
+ result = dict(zip(columns, result_row))
270
+
271
+ conn.commit()
272
+ cursor.close()
273
+ return result
274
+ except Exception as e:
275
+ if conn:
276
+ conn.rollback()
277
+ raise DatabaseError(f"Delete failed: {str(e)}")
278
+ finally:
279
+ if conn:
280
+ self._return_connection(conn)
281
+
282
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
283
+ def query_linq(self, table: str, builder: QueryBuilder) -> Union[List[JsonDict], int]:
284
+ table = validate_table_name(table)
285
+ conn = None
286
+
287
+ try:
288
+ conn = self._get_connection()
289
+ cursor = conn.cursor()
290
+
291
+ if builder.count_only:
292
+ sql = f"SELECT COUNT(*) FROM {table}"
293
+ elif builder.select_fields:
294
+ for f in builder.select_fields:
295
+ validate_column_name(f)
296
+ fields = ", ".join(builder.select_fields)
297
+ if builder.distinct:
298
+ sql = f"SELECT DISTINCT {fields} FROM {table}"
299
+ else:
300
+ sql = f"SELECT {fields} FROM {table}"
301
+ else:
302
+ sql = f"SELECT * FROM {table}"
303
+
304
+ params = []
305
+
306
+ # WHERE
307
+ where_clause, where_params = builder.to_sql_where()
308
+ if where_clause:
309
+ sql += f" WHERE {where_clause}"
310
+ params.extend(where_params)
311
+
312
+ # GROUP BY
313
+ if builder.group_by_fields:
314
+ for f in builder.group_by_fields:
315
+ validate_column_name(f)
316
+ sql += f" GROUP BY {', '.join(builder.group_by_fields)}"
317
+
318
+ # ORDER BY
319
+ if builder.order_by_fields:
320
+ order_parts = []
321
+ for field, desc in builder.order_by_fields:
322
+ validate_column_name(field)
323
+ direction = "DESC" if desc else "ASC"
324
+ order_parts.append(f"{field} {direction}")
325
+ sql += f" ORDER BY {', '.join(order_parts)}"
326
+
327
+ # LIMIT / OFFSET
328
+ if builder.take_count:
329
+ sql += f" LIMIT %s"
330
+ params.append(builder.take_count)
331
+
332
+ if builder.skip_count:
333
+ sql += f" OFFSET %s"
334
+ params.append(builder.skip_count)
335
+
336
+ cursor.execute(sql, params)
337
+
338
+ if builder.count_only:
339
+ return cursor.fetchone()[0]
340
+
341
+ columns = [desc[0] for desc in cursor.description]
342
+ results = [dict(zip(columns, row)) for row in cursor.fetchall()]
343
+ cursor.close()
344
+
345
+ return results
346
+ except Exception as e:
347
+ raise DatabaseError(f"LINQ query failed: {str(e)}")
348
+ finally:
349
+ if conn:
350
+ self._return_connection(conn)
351
+
352
+ def __del__(self):
353
+ if self._pool:
354
+ try:
355
+ self._pool.closeall()
356
+ except:
357
+ pass
358
+
359
+ @retry(max_attempts=3, delay=1.0, exceptions=(DatabaseError,))
360
+ def execute(
361
+ self,
362
+ sql: str,
363
+ params: Optional[List[Any]] = None,
364
+ *,
365
+ fetch: bool = False,
366
+ fetch_one: bool = False,
367
+ ) -> Union[None, JsonDict, List[JsonDict]]:
368
+ conn = None
369
+ cursor = None
370
+ try:
371
+ conn = self._get_connection()
372
+ cursor = conn.cursor()
373
+
374
+ self.logger.debug("Executing raw SQL: %s", sql)
375
+ cursor.execute(sql, params or [])
376
+
377
+ # Fetch results (if any) BEFORE commit (fine either way)
378
+ if fetch_one:
379
+ row = cursor.fetchone()
380
+ result = None
381
+ if row:
382
+ columns = [desc[0] for desc in cursor.description]
383
+ result = dict(zip(columns, row))
384
+ conn.commit()
385
+ return result
386
+
387
+ if fetch:
388
+ rows = cursor.fetchall()
389
+ columns = [desc[0] for desc in cursor.description]
390
+ results = [dict(zip(columns, r)) for r in rows]
391
+ conn.commit()
392
+ return results
393
+
394
+ # Non-fetch execution (DDL/DML)
395
+ conn.commit()
396
+ return None
397
+
398
+ except Exception as e:
399
+ if conn:
400
+ try:
401
+ conn.rollback()
402
+ except Exception:
403
+ pass
404
+ raise DatabaseError(f"Execute failed: {str(e)}")
405
+
406
+ finally:
407
+ if cursor:
408
+ try:
409
+ cursor.close()
410
+ except Exception:
411
+ pass
412
+ if conn:
413
+ self._return_connection(conn)
414
+
415
+ @contextmanager
416
+ def distributed_lock(self, lock_name: str):
417
+ """
418
+ PostgreSQL advisory lock.
419
+ Cluster-wide distributed lock.
420
+
421
+ Safe across:
422
+ - Multiple pods
423
+ - Multiple containers
424
+ - Multiple instances
425
+ """
426
+
427
+ conn = None
428
+ try:
429
+ conn = self._get_connection()
430
+ cursor = conn.cursor()
431
+
432
+ # Convert lock_name to 64-bit hash
433
+ lock_id = int(hashlib.sha256(lock_name.encode()).hexdigest(), 16) % (2**63)
434
+
435
+ cursor.execute("SELECT pg_advisory_lock(%s);", (lock_id,))
436
+ self.logger.debug(f"Acquired distributed lock: {lock_name}")
437
+
438
+ yield
439
+
440
+ cursor.execute("SELECT pg_advisory_unlock(%s);", (lock_id,))
441
+ self.logger.debug(f"Released distributed lock: {lock_name}")
442
+
443
+ cursor.close()
444
+ conn.commit()
445
+
446
+ except Exception as e:
447
+ if conn:
448
+ conn.rollback()
449
+ raise DatabaseError(f"Distributed lock failed: {str(e)}")
450
+
451
+ finally:
452
+ if conn:
453
+ self._return_connection(conn)
@@ -0,0 +1,83 @@
1
+ # src/polydb/adapters/PubSubAdapter.py
2
+
3
+ from polydb.base.QueueAdapter import QueueAdapter
4
+ from polydb.errors import ConnectionError, QueueError
5
+ from polydb.retry import retry
6
+ import os
7
+ import threading
8
+ from typing import Any, Dict, List
9
+
10
+ class PubSubAdapter(QueueAdapter):
11
+ """GCP Pub/Sub with client reuse"""
12
+
13
+ def __init__(self):
14
+ super().__init__()
15
+ self.project_id = os.getenv("GOOGLE_CLOUD_PROJECT") or ""
16
+ self.topic_name = os.getenv("PUBSUB_TOPIC", "default")
17
+ self._publisher = None
18
+ self._subscriber = None
19
+ self._lock = threading.Lock()
20
+ self._initialize_clients()
21
+
22
+ def _initialize_clients(self):
23
+ """Initialize Pub/Sub clients once"""
24
+ try:
25
+ from google.cloud import pubsub_v1
26
+
27
+ with self._lock:
28
+ if not self._publisher:
29
+ self._publisher = pubsub_v1.PublisherClient()
30
+ self._subscriber = pubsub_v1.SubscriberClient()
31
+ self.logger.info("Initialized Pub/Sub clients")
32
+ except Exception as e:
33
+ raise ConnectionError(f"Failed to initialize Pub/Sub: {str(e)}")
34
+
35
+ @retry(max_attempts=3, delay=1.0, exceptions=(QueueError,))
36
+ def send(self, message: Dict[str, Any], queue_name: str = "default") -> str:
37
+ """Send message to topic"""
38
+ try:
39
+ import json
40
+
41
+ if self._publisher:
42
+ topic_path = self._publisher.topic_path(
43
+ self.project_id, queue_name or self.topic_name
44
+ )
45
+ data = json.dumps(message).encode("utf-8")
46
+ future = self._publisher.publish(topic_path, data)
47
+ return future.result()
48
+ return ""
49
+ except Exception as e:
50
+ raise QueueError(f"Pub/Sub send failed: {str(e)}")
51
+
52
+ @retry(max_attempts=3, delay=1.0, exceptions=(QueueError,))
53
+ def receive(self, queue_name: str = "default", max_messages: int = 1) -> List[Dict[str, Any]]:
54
+ """Receive messages from subscription"""
55
+ try:
56
+ import json
57
+
58
+ if self._subscriber:
59
+ subscription_path = self._subscriber.subscription_path(
60
+ self.project_id, queue_name or self.topic_name
61
+ )
62
+
63
+ response = self._subscriber.pull(
64
+ subscription=subscription_path, max_messages=max_messages, timeout=5.0
65
+ )
66
+
67
+ messages = [
68
+ json.loads(msg.message.data.decode()) for msg in response.received_messages
69
+ ]
70
+
71
+ # Acknowledge messages
72
+ if response.received_messages:
73
+ ack_ids = [msg.ack_id for msg in response.received_messages]
74
+ self._subscriber.acknowledge(subscription=subscription_path, ack_ids=ack_ids)
75
+
76
+ return messages
77
+ return []
78
+ except Exception as e:
79
+ raise QueueError(f"Pub/Sub receive failed: {str(e)}")
80
+
81
+ def delete(self, message_id: str, queue_name: str = "default") -> bool:
82
+ """Messages are acknowledged upon receipt"""
83
+ return True
@@ -0,0 +1,86 @@
1
+ # src/polydb/adapters/S3Adapter.py
2
+ """
3
+ S3 adapter
4
+ """
5
+
6
+ import os
7
+ import threading
8
+ from typing import List, Optional
9
+ from ..base.ObjectStorageAdapter import ObjectStorageAdapter
10
+ from ..errors import StorageError, ConnectionError
11
+ from ..retry import retry
12
+ class S3Adapter(ObjectStorageAdapter):
13
+ """AWS S3 with client reuse"""
14
+
15
+ def __init__(self):
16
+ super().__init__()
17
+ self.bucket_name = os.getenv("S3_BUCKET_NAME", "default")
18
+ self._client = None
19
+ self._lock = threading.Lock()
20
+ self._initialize_client()
21
+
22
+ def _initialize_client(self):
23
+ """Initialize S3 client once"""
24
+ try:
25
+ import boto3
26
+
27
+ with self._lock:
28
+ if not self._client:
29
+ self._client = boto3.client("s3")
30
+ self.logger.info("Initialized S3 client")
31
+ except Exception as e:
32
+ raise ConnectionError(f"Failed to initialize S3 client: {str(e)}")
33
+
34
+ @retry(max_attempts=3, delay=1.0, exceptions=(StorageError,))
35
+ def _put_raw(self, key: str, data: bytes) -> str:
36
+ """Store object"""
37
+ try:
38
+ if not self._client:
39
+ self._initialize_client()
40
+ if self._client:
41
+ self._client.put_object(Bucket=self.bucket_name, Key=key, Body=data)
42
+ self.logger.debug(f"Uploaded to S3: {key}")
43
+ return key
44
+ except Exception as e:
45
+ raise StorageError(f"S3 put failed: {str(e)}")
46
+
47
+ @retry(max_attempts=3, delay=1.0, exceptions=(StorageError,))
48
+ def get(self, key: str) -> bytes | None:
49
+ """Get object"""
50
+ try:
51
+ if not self._client:
52
+ self._initialize_client()
53
+ if self._client:
54
+ response = self._client.get_object(Bucket=self.bucket_name, Key=key)
55
+ return response["Body"].read()
56
+ return None
57
+ except Exception as e:
58
+ raise StorageError(f"S3 get failed: {str(e)}")
59
+
60
+ @retry(max_attempts=3, delay=1.0, exceptions=(StorageError,))
61
+ def delete(self, key: str) -> bool:
62
+ """Delete object"""
63
+ try:
64
+ if not self._client:
65
+ self._initialize_client()
66
+ if self._client:
67
+ self._client.delete_object(Bucket=self.bucket_name, Key=key)
68
+ return True
69
+ return False
70
+ except Exception as e:
71
+ raise StorageError(f"S3 delete failed: {str(e)}")
72
+
73
+ @retry(max_attempts=3, delay=1.0, exceptions=(StorageError,))
74
+ def list(self, prefix: str = "") -> List[str]:
75
+ """List objects with prefix"""
76
+ try:
77
+ if not self._client:
78
+ self._initialize_client()
79
+ if self._client:
80
+ response = self._client.list_objects_v2(Bucket=self.bucket_name, Prefix=prefix)
81
+ return [obj["Key"] for obj in response.get("Contents", [])]
82
+ return []
83
+ except Exception as e:
84
+ raise StorageError(f"S3 list failed: {str(e)}")
85
+
86
+