altcodepro-polydb-python 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. altcodepro_polydb_python-2.1.0.dist-info/METADATA +378 -0
  2. altcodepro_polydb_python-2.1.0.dist-info/RECORD +51 -0
  3. altcodepro_polydb_python-2.1.0.dist-info/WHEEL +5 -0
  4. altcodepro_polydb_python-2.1.0.dist-info/licenses/LICENSE +21 -0
  5. altcodepro_polydb_python-2.1.0.dist-info/top_level.txt +1 -0
  6. polydb/__init__.py +64 -0
  7. polydb/adapters/AzureBlobStorageAdapter.py +77 -0
  8. polydb/adapters/AzureFileStorageAdapter.py +79 -0
  9. polydb/adapters/AzureQueueAdapter.py +61 -0
  10. polydb/adapters/AzureTableStorageAdapter.py +182 -0
  11. polydb/adapters/DynamoDBAdapter.py +216 -0
  12. polydb/adapters/EFSAdapter.py +50 -0
  13. polydb/adapters/FirestoreAdapter.py +193 -0
  14. polydb/adapters/GCPStorageAdapter.py +81 -0
  15. polydb/adapters/MongoDBAdapter.py +136 -0
  16. polydb/adapters/PostgreSQLAdapter.py +453 -0
  17. polydb/adapters/PubSubAdapter.py +83 -0
  18. polydb/adapters/S3Adapter.py +86 -0
  19. polydb/adapters/S3CompatibleAdapter.py +90 -0
  20. polydb/adapters/SQSAdapter.py +84 -0
  21. polydb/adapters/VercelKVAdapter.py +327 -0
  22. polydb/adapters/__init__.py +0 -0
  23. polydb/advanced_query.py +147 -0
  24. polydb/audit/AuditStorage.py +136 -0
  25. polydb/audit/__init__.py +7 -0
  26. polydb/audit/context.py +53 -0
  27. polydb/audit/manager.py +47 -0
  28. polydb/audit/models.py +86 -0
  29. polydb/base/NoSQLKVAdapter.py +301 -0
  30. polydb/base/ObjectStorageAdapter.py +42 -0
  31. polydb/base/QueueAdapter.py +27 -0
  32. polydb/base/SharedFilesAdapter.py +32 -0
  33. polydb/base/__init__.py +0 -0
  34. polydb/batch.py +163 -0
  35. polydb/cache.py +204 -0
  36. polydb/databaseFactory.py +748 -0
  37. polydb/decorators.py +21 -0
  38. polydb/errors.py +82 -0
  39. polydb/factory.py +107 -0
  40. polydb/models.py +39 -0
  41. polydb/monitoring.py +313 -0
  42. polydb/multitenancy.py +197 -0
  43. polydb/py.typed +0 -0
  44. polydb/query.py +150 -0
  45. polydb/registry.py +71 -0
  46. polydb/retry.py +76 -0
  47. polydb/schema.py +205 -0
  48. polydb/security.py +458 -0
  49. polydb/types.py +127 -0
  50. polydb/utils.py +61 -0
  51. polydb/validation.py +131 -0
@@ -0,0 +1,61 @@
1
+ # src/polydb/adapters/AzureQueueAdapter.py
2
+ from polydb.base.QueueAdapter import QueueAdapter
3
+ from polydb.errors import ConnectionError, QueueError
4
+ from polydb.retry import retry
5
+ import os
6
+ import threading
7
+ from typing import Any, Dict, List
8
+
9
+ class AzureQueueAdapter(QueueAdapter):
10
+ """Azure Queue Storage with client reuse"""
11
+
12
+ def __init__(self):
13
+ super().__init__()
14
+ self.connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING") or ""
15
+ self._client = None
16
+ self._lock = threading.Lock()
17
+ self._initialize_client()
18
+
19
+ def _initialize_client(self):
20
+ """Initialize Azure Queue Storage client once"""
21
+ try:
22
+ from azure.storage.queue import QueueServiceClient
23
+
24
+ with self._lock:
25
+ if not self._client:
26
+ self._client = QueueServiceClient.from_connection_string(self.connection_string)
27
+ self.logger.info("Initialized Azure Queue Storage client")
28
+ except Exception as e:
29
+ raise ConnectionError(f"Failed to initialize Azure Queue Storage: {str(e)}")
30
+
31
+ @retry(max_attempts=3, delay=1.0, exceptions=(QueueError,))
32
+ def send(self, message: Dict[str, Any], queue_name: str = "default") -> str:
33
+ """Send message to queue"""
34
+ try:
35
+ import json
36
+
37
+ if self._client:
38
+ queue_client = self._client.get_queue_client(queue_name)
39
+ response = queue_client.send_message(json.dumps(message))
40
+ return response.id
41
+ return ""
42
+ except Exception as e:
43
+ raise QueueError(f"Azure Queue send failed: {str(e)}")
44
+
45
+ @retry(max_attempts=3, delay=1.0, exceptions=(QueueError,))
46
+ def receive(self, queue_name: str = "default", max_messages: int = 1) -> List[Dict[str, Any]]:
47
+ """Receive messages from queue"""
48
+ try:
49
+ import json
50
+
51
+ if self._client:
52
+ queue_client = self._client.get_queue_client(queue_name)
53
+ messages = queue_client.receive_messages(max_messages=max_messages)
54
+ return [json.loads(msg.content) for msg in messages]
55
+ return []
56
+ except Exception as e:
57
+ raise QueueError(f"Azure Queue receive failed: {str(e)}")
58
+
59
+ def delete(self, message_id: str, queue_name: str = "default") -> bool:
60
+ """Delete message from queue (requires receipt handle)"""
61
+ return True
@@ -0,0 +1,182 @@
1
+ # src/polydb/adapters/AzureTableStorageAdapter.py
2
+ import os
3
+ import threading
4
+ from typing import Any, Dict, List, Optional
5
+ from polydb.base.NoSQLKVAdapter import NoSQLKVAdapter
6
+ from ..errors import NoSQLError, ConnectionError
7
+ from ..retry import retry
8
+ from ..types import JsonDict
9
+ from ..models import PartitionConfig
10
+
11
+
12
+ class AzureTableStorageAdapter(NoSQLKVAdapter):
13
+ """Azure Table Storage with Azure Blob overflow (limit: 1MB per entity)"""
14
+
15
+ AZURE_TABLE_MAX_SIZE = 1024 * 1024 # 1MB
16
+
17
+ def __init__(self, partition_config: Optional[PartitionConfig] = None):
18
+ super().__init__(partition_config)
19
+ self.max_size = self.AZURE_TABLE_MAX_SIZE
20
+ self.connection_string = os.getenv("AZURE_STORAGE_CONNECTION_STRING") or ""
21
+ self.table_name = os.getenv("AZURE_TABLE_NAME", "defaulttable") or ""
22
+ self.container_name = os.getenv("AZURE_CONTAINER_NAME", "overflow") or ""
23
+ self._client = None
24
+ self._table_client = None
25
+ self._blob_service = None
26
+ self._client_lock = threading.Lock()
27
+ self._initialize_client()
28
+
29
+ def _initialize_client(self):
30
+ try:
31
+ from azure.data.tables import TableServiceClient
32
+ from azure.storage.blob import BlobServiceClient
33
+
34
+ with self._client_lock:
35
+ if not self._client:
36
+ self._client = TableServiceClient.from_connection_string(self.connection_string)
37
+ self._table_client = self._client.get_table_client(self.table_name)
38
+ self._blob_service = BlobServiceClient.from_connection_string(self.connection_string)
39
+ try:
40
+ self._blob_service.create_container(self.container_name)
41
+ except:
42
+ pass # Already exists
43
+
44
+ self.logger.info("Azure Table Storage initialized with Blob overflow")
45
+ except Exception as e:
46
+ raise ConnectionError(f"Azure Table init failed: {str(e)}")
47
+
48
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
49
+ def _put_raw(self, model: type, pk: str, rk: str, data: JsonDict) -> JsonDict:
50
+ try:
51
+ import json
52
+ import hashlib
53
+
54
+ data_copy = dict(data)
55
+ data_copy['PartitionKey'] = pk
56
+ data_copy['RowKey'] = rk
57
+
58
+ # Check size
59
+ data_bytes = json.dumps(data_copy).encode()
60
+ data_size = len(data_bytes)
61
+
62
+ if data_size > self.AZURE_TABLE_MAX_SIZE:
63
+ # Store in Blob
64
+ blob_id = hashlib.md5(data_bytes).hexdigest()
65
+ blob_key = f"overflow/{pk}/{rk}/{blob_id}.json"
66
+
67
+ if self._blob_service:
68
+ blob_client = self._blob_service.get_blob_client(self.container_name, blob_key)
69
+ blob_client.upload_blob(data_bytes, overwrite=True)
70
+ self.logger.info(f"Stored overflow to Blob: {blob_key} ({data_size} bytes)")
71
+
72
+ # Store reference in table
73
+ reference_data = {
74
+ 'PartitionKey': pk,
75
+ 'RowKey': rk,
76
+ '_overflow': True,
77
+ '_blob_key': blob_key,
78
+ '_size': data_size,
79
+ '_checksum': blob_id,
80
+ }
81
+
82
+ if self._table_client:
83
+ self._table_client.upsert_entity(reference_data)
84
+ else:
85
+ # Store directly in table
86
+ if self._table_client:
87
+ self._table_client.upsert_entity(data_copy)
88
+
89
+ return {'PartitionKey': pk, 'RowKey': rk}
90
+ except Exception as e:
91
+ raise NoSQLError(f"Azure Table put failed: {str(e)}")
92
+
93
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
94
+ def _get_raw(self, model: type, pk: str, rk: str) -> Optional[JsonDict]:
95
+ try:
96
+ import json
97
+ import hashlib
98
+
99
+ if not self._table_client:
100
+ return None
101
+
102
+ entity = self._table_client.get_entity(pk, rk)
103
+ entity_dict = dict(entity)
104
+
105
+ # Check if overflow
106
+ if entity_dict.get('_overflow'):
107
+ blob_key = entity_dict.get('_blob_key')
108
+ checksum = entity_dict.get('_checksum')
109
+
110
+ if blob_key and self._blob_service:
111
+ blob_client = self._blob_service.get_blob_client(self.container_name, blob_key)
112
+ blob_data = blob_client.download_blob().readall()
113
+
114
+ # Verify checksum
115
+ actual_checksum = hashlib.md5(blob_data).hexdigest()
116
+ if actual_checksum != checksum:
117
+ raise NoSQLError(f"Checksum mismatch: expected {checksum}, got {actual_checksum}")
118
+
119
+ retrieved = json.loads(blob_data.decode())
120
+ self.logger.debug(f"Retrieved overflow from Blob: {blob_key}")
121
+ return retrieved
122
+
123
+ return entity_dict
124
+ except Exception as e:
125
+ if "ResourceNotFound" in str(e):
126
+ return None
127
+ raise NoSQLError(f"Azure Table get failed: {str(e)}")
128
+
129
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
130
+ def _query_raw(self, model: type, filters: Dict[str, Any], limit: Optional[int]) -> List[JsonDict]:
131
+ try:
132
+ if not self._table_client:
133
+ return []
134
+
135
+ query_filter = None
136
+ if filters:
137
+ filter_parts = []
138
+ for k, v in filters.items():
139
+ if isinstance(v, str):
140
+ filter_parts.append(f"{k} eq '{v}'")
141
+ elif isinstance(v, bool):
142
+ filter_parts.append(f"{k} eq {str(v).lower()}")
143
+ else:
144
+ filter_parts.append(f"{k} eq {v}")
145
+
146
+ query_filter = " and ".join(filter_parts)
147
+
148
+ entities = self._table_client.query_entities(
149
+ query_filter=query_filter, # type: ignore
150
+ results_per_page=limit
151
+ )
152
+
153
+ return [dict(entity) for entity in entities]
154
+ except Exception as e:
155
+ raise NoSQLError(f"Azure Table query failed: {str(e)}")
156
+
157
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
158
+ def _delete_raw(self, model: type, pk: str, rk: str, etag: Optional[str]) -> JsonDict:
159
+ try:
160
+ if not self._table_client:
161
+ return {'deleted': False}
162
+
163
+ # Check if overflow before deleting
164
+ try:
165
+ entity = self._table_client.get_entity(pk, rk)
166
+ entity_dict = dict(entity)
167
+
168
+ if entity_dict.get('_overflow'):
169
+ blob_key = entity_dict.get('_blob_key')
170
+ if blob_key and self._blob_service:
171
+ blob_client = self._blob_service.get_blob_client(self.container_name, blob_key)
172
+ blob_client.delete_blob()
173
+ self.logger.debug(f"Deleted overflow blob: {blob_key}")
174
+ except:
175
+ pass # Entity might not exist or no overflow
176
+
177
+ # Delete table entity
178
+ self._table_client.delete_entity(pk, rk, etag=etag)
179
+ return {'deleted': True, 'PartitionKey': pk, 'RowKey': rk}
180
+ except Exception as e:
181
+ raise NoSQLError(f"Azure Table delete failed: {str(e)}")
182
+
@@ -0,0 +1,216 @@
1
+ # src/polydb/adapters/DynamoDBAdapter.py
2
+ import os
3
+ import threading
4
+ from typing import Any, Dict, List, Optional
5
+ import boto3
6
+ from boto3.dynamodb.conditions import Key, Attr
7
+
8
+ from polydb.base.NoSQLKVAdapter import NoSQLKVAdapter
9
+
10
+ from ..errors import NoSQLError, ConnectionError
11
+ from ..retry import retry
12
+ from ..types import JsonDict
13
+ from ..models import PartitionConfig
14
+
15
+
16
+ class DynamoDBAdapter(NoSQLKVAdapter):
17
+ """DynamoDB with S3 overflow (limit: 400KB per item)"""
18
+
19
+ DYNAMODB_MAX_SIZE = 400 * 1024 # 400KB
20
+
21
+ def __init__(self, partition_config: Optional[PartitionConfig] = None):
22
+ super().__init__(partition_config)
23
+ self.max_size = self.DYNAMODB_MAX_SIZE
24
+ self.table_name = os.getenv("DYNAMODB_TABLE_NAME", "default")
25
+ self.bucket_name = os.getenv("S3_OVERFLOW_BUCKET", "dynamodb-overflow")
26
+ self._resource = None
27
+ self._s3_client = None
28
+ self._client_lock = threading.Lock()
29
+ self._initialize()
30
+
31
+ def _initialize(self):
32
+ try:
33
+ with self._client_lock:
34
+ if not self._resource:
35
+ self._resource = boto3.resource('dynamodb')
36
+ self._s3_client = boto3.client('s3')
37
+
38
+ # Ensure bucket exists
39
+ try:
40
+ self._s3_client.create_bucket(Bucket=self.bucket_name)
41
+ except:
42
+ pass # Already exists
43
+
44
+ self.logger.info("DynamoDB initialized with S3 overflow")
45
+ except Exception as e:
46
+ raise ConnectionError(f"DynamoDB init failed: {str(e)}")
47
+
48
+ def _get_table(self, model: type):
49
+ if not self._resource:
50
+ self._initialize()
51
+
52
+ meta = getattr(model, '__polydb__', {})
53
+ table_name = meta.get('table') or self.table_name or model.__name__.lower()
54
+ return self._resource.Table(table_name) # type: ignore
55
+
56
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
57
+ def _put_raw(self, model: type, pk: str, rk: str, data: JsonDict) -> JsonDict:
58
+ try:
59
+ import json
60
+ import hashlib
61
+
62
+ data_copy = dict(data)
63
+ data_copy['PK'] = pk
64
+ data_copy['SK'] = rk
65
+
66
+ # Check size
67
+ data_bytes = json.dumps(data_copy).encode()
68
+ data_size = len(data_bytes)
69
+
70
+ if data_size > self.DYNAMODB_MAX_SIZE:
71
+ # Store in S3
72
+ blob_id = hashlib.md5(data_bytes).hexdigest()
73
+ blob_key = f"overflow/{pk}/{rk}/{blob_id}.json"
74
+
75
+ if self._s3_client:
76
+ self._s3_client.put_object(
77
+ Bucket=self.bucket_name,
78
+ Key=blob_key,
79
+ Body=data_bytes
80
+ )
81
+ self.logger.info(f"Stored overflow to S3: {blob_key} ({data_size} bytes)")
82
+
83
+ # Store reference in DynamoDB
84
+ reference_data = {
85
+ 'PK': pk,
86
+ 'SK': rk,
87
+ '_overflow': True,
88
+ '_blob_key': blob_key,
89
+ '_size': data_size,
90
+ '_checksum': blob_id,
91
+ }
92
+
93
+ table = self._get_table(model)
94
+ table.put_item(Item=reference_data)
95
+ else:
96
+ # Store directly in DynamoDB
97
+ table = self._get_table(model)
98
+ table.put_item(Item=data_copy)
99
+
100
+ return {'PK': pk, 'SK': rk}
101
+ except Exception as e:
102
+ raise NoSQLError(f"DynamoDB put failed: {str(e)}")
103
+
104
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
105
+ def _get_raw(self, model: type, pk: str, rk: str) -> Optional[JsonDict]:
106
+ try:
107
+ import json
108
+ import hashlib
109
+
110
+ table = self._get_table(model)
111
+ response = table.get_item(Key={'PK': pk, 'SK': rk})
112
+
113
+ if 'Item' not in response:
114
+ return None
115
+
116
+ item = response['Item']
117
+
118
+ # Check if overflow
119
+ if item.get('_overflow'):
120
+ blob_key = item.get('_blob_key')
121
+ checksum = item.get('_checksum')
122
+
123
+ if blob_key and self._s3_client:
124
+ s3_response = self._s3_client.get_object(
125
+ Bucket=self.bucket_name,
126
+ Key=blob_key
127
+ )
128
+ blob_data = s3_response['Body'].read()
129
+
130
+ # Verify checksum
131
+ actual_checksum = hashlib.md5(blob_data).hexdigest()
132
+ if actual_checksum != checksum:
133
+ raise NoSQLError(f"Checksum mismatch: expected {checksum}, got {actual_checksum}")
134
+
135
+ retrieved = json.loads(blob_data.decode())
136
+ self.logger.debug(f"Retrieved overflow from S3: {blob_key}")
137
+ return retrieved
138
+
139
+ return item
140
+ except Exception as e:
141
+ raise NoSQLError(f"DynamoDB get failed: {str(e)}")
142
+
143
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
144
+ def _query_raw(self, model: type, filters: Dict[str, Any], limit: Optional[int]) -> List[JsonDict]:
145
+ try:
146
+ table = self._get_table(model)
147
+
148
+ # If PK in filters, use query, else scan
149
+ if 'PK' in filters or 'partition_key' in filters:
150
+ pk_value = filters.get('PK') or filters.get('partition_key')
151
+ key_condition = Key('PK').eq(pk_value)
152
+
153
+ if 'SK' in filters:
154
+ key_condition = key_condition & Key('SK').eq(filters['SK'])
155
+
156
+ kwargs = {'KeyConditionExpression': key_condition}
157
+
158
+ # Other filters as FilterExpression
159
+ other_filters = {k: v for k, v in filters.items() if k not in ['PK', 'SK', 'partition_key']}
160
+ if other_filters:
161
+ filter_expr = None
162
+ for k, v in other_filters.items():
163
+ expr = Attr(k).eq(v)
164
+ filter_expr = expr if filter_expr is None else filter_expr & expr
165
+ kwargs['FilterExpression'] = filter_expr # type: ignore
166
+
167
+ if limit:
168
+ kwargs['Limit'] = limit # type: ignore
169
+
170
+ response = table.query(**kwargs)
171
+ else:
172
+ # Scan with filters
173
+ kwargs = {}
174
+ if filters:
175
+ filter_expr = None
176
+ for k, v in filters.items():
177
+ expr = Attr(k).eq(v)
178
+ filter_expr = expr if filter_expr is None else filter_expr & expr
179
+ kwargs['FilterExpression'] = filter_expr
180
+
181
+ if limit:
182
+ kwargs['Limit'] = limit
183
+
184
+ response = table.scan(**kwargs)
185
+
186
+ return response.get('Items', [])
187
+ except Exception as e:
188
+ raise NoSQLError(f"DynamoDB query failed: {str(e)}")
189
+
190
+ @retry(max_attempts=3, delay=1.0, exceptions=(NoSQLError,))
191
+ def _delete_raw(self, model: type, pk: str, rk: str, etag: Optional[str]) -> JsonDict:
192
+ try:
193
+ table = self._get_table(model)
194
+
195
+ # Check if overflow before deleting
196
+ try:
197
+ response = table.get_item(Key={'PK': pk, 'SK': rk})
198
+ if 'Item' in response:
199
+ item = response['Item']
200
+
201
+ if item.get('_overflow'):
202
+ blob_key = item.get('_blob_key')
203
+ if blob_key and self._s3_client:
204
+ self._s3_client.delete_object(
205
+ Bucket=self.bucket_name,
206
+ Key=blob_key
207
+ )
208
+ self.logger.debug(f"Deleted overflow S3 object: {blob_key}")
209
+ except:
210
+ pass # Item might not exist or no overflow
211
+
212
+ # Delete DynamoDB item
213
+ table.delete_item(Key={'PK': pk, 'SK': rk})
214
+ return {'deleted': True, 'PK': pk, 'SK': rk}
215
+ except Exception as e:
216
+ raise NoSQLError(f"DynamoDB delete failed: {str(e)}")
@@ -0,0 +1,50 @@
1
+ # src/polydb/adapters/EFSAdapter.py
2
+
3
+ from polydb.base.SharedFilesAdapter import SharedFilesAdapter
4
+ from polydb.errors import StorageError
5
+ import os
6
+ from typing import List
7
+
8
+ class EFSAdapter(SharedFilesAdapter):
9
+ """AWS EFS (mounted filesystem)"""
10
+
11
+ def __init__(self):
12
+ super().__init__()
13
+ self.mount_point = os.getenv("EFS_MOUNT_POINT", "/mnt/efs")
14
+
15
+ def write(self, path: str, data: bytes) -> bool:
16
+ """Write file"""
17
+ try:
18
+ full_path = os.path.join(self.mount_point, path)
19
+ os.makedirs(os.path.dirname(full_path), exist_ok=True)
20
+ with open(full_path, "wb") as f:
21
+ f.write(data)
22
+ return True
23
+ except Exception as e:
24
+ raise StorageError(f"EFS write failed: {str(e)}")
25
+
26
+ def read(self, path: str) -> bytes:
27
+ """Read file"""
28
+ try:
29
+ full_path = os.path.join(self.mount_point, path)
30
+ with open(full_path, "rb") as f:
31
+ return f.read()
32
+ except Exception as e:
33
+ raise StorageError(f"EFS read failed: {str(e)}")
34
+
35
+ def delete(self, path: str) -> bool:
36
+ """Delete file"""
37
+ try:
38
+ full_path = os.path.join(self.mount_point, path)
39
+ os.remove(full_path)
40
+ return True
41
+ except Exception as e:
42
+ raise StorageError(f"EFS delete failed: {str(e)}")
43
+
44
+ def list(self, directory: str = "/") -> List[str]:
45
+ """List files in directory"""
46
+ try:
47
+ full_path = os.path.join(self.mount_point, directory)
48
+ return os.listdir(full_path)
49
+ except Exception as e:
50
+ raise StorageError(f"EFS list failed: {str(e)}")