erioon 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- erioon/auth.py +34 -0
- erioon/client.py +118 -0
- erioon/collection.py +371 -0
- erioon/create.py +130 -0
- erioon/database.py +82 -0
- erioon/delete.py +342 -0
- erioon/functions.py +422 -0
- erioon/ping.py +53 -0
- erioon/read.py +301 -0
- erioon/transaction.py +58 -0
- erioon/update.py +322 -0
- {erioon-0.1.5.dist-info → erioon-0.1.6.dist-info}/METADATA +1 -1
- erioon-0.1.6.dist-info/RECORD +16 -0
- erioon-0.1.6.dist-info/top_level.txt +1 -0
- erioon-0.1.5.dist-info/RECORD +0 -5
- erioon-0.1.5.dist-info/top_level.txt +0 -1
- {erioon-0.1.5.dist-info → erioon-0.1.6.dist-info}/LICENSE +0 -0
- {erioon-0.1.5.dist-info → erioon-0.1.6.dist-info}/WHEEL +0 -0
erioon/database.py
ADDED
@@ -0,0 +1,82 @@
|
|
1
|
+
# Copyright 2025-present Erioon, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# Visit www.erioon.com/dev-docs for more information about the python SDK
|
15
|
+
|
16
|
+
import json
|
17
|
+
from erioon.collection import Collection
|
18
|
+
|
19
|
+
class Database:
|
20
|
+
def __init__(self, user_id, metadata, database=None, cluster=None, sas_url=None):
|
21
|
+
"""
|
22
|
+
Initialize a Database instance.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
user_id (str): The ID of the authenticated user.
|
26
|
+
metadata (dict): Metadata containing information about the database and its collections.
|
27
|
+
database (str, optional): The name or identifier of the database.
|
28
|
+
cluster (str, optional): The cluster where the database is hosted.
|
29
|
+
sas_url (str, optional): SAS URL for accessing Azure Blob Storage container.
|
30
|
+
"""
|
31
|
+
self.user_id = user_id
|
32
|
+
self.metadata = metadata
|
33
|
+
self.db_id = metadata.get("database_info", {}).get("_id")
|
34
|
+
self.database = database
|
35
|
+
self.cluster = cluster
|
36
|
+
self.sas_url = sas_url
|
37
|
+
|
38
|
+
def __getitem__(self, collection_id):
|
39
|
+
"""
|
40
|
+
Enables dictionary-like access to collections within the database.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
collection_id (str): Identifier of the collection to retrieve.
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
Collection: An instance of the Collection class initialized with metadata.
|
47
|
+
str: Error message if the collection is not found.
|
48
|
+
"""
|
49
|
+
collections = self.metadata.get("database_info", {}).get("collections", {})
|
50
|
+
coll_meta = collections.get(collection_id)
|
51
|
+
|
52
|
+
if not coll_meta:
|
53
|
+
return "No collection found"
|
54
|
+
|
55
|
+
return Collection(
|
56
|
+
user_id=self.user_id,
|
57
|
+
db_id=self.db_id,
|
58
|
+
coll_id=collection_id,
|
59
|
+
metadata=coll_meta,
|
60
|
+
database=self.database,
|
61
|
+
cluster=self.cluster,
|
62
|
+
sas_url=self.sas_url
|
63
|
+
)
|
64
|
+
|
65
|
+
def __str__(self):
|
66
|
+
"""
|
67
|
+
Returns a nicely formatted JSON string of the database metadata.
|
68
|
+
Useful for debugging and inspecting the database info.
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
str: Pretty-printed JSON metadata.
|
72
|
+
"""
|
73
|
+
return json.dumps(self.metadata, indent=4)
|
74
|
+
|
75
|
+
def __repr__(self):
|
76
|
+
"""
|
77
|
+
Returns a concise, informative string representation of the Database instance.
|
78
|
+
|
79
|
+
Returns:
|
80
|
+
str: Formatted string showing the database ID, cluster, and database name.
|
81
|
+
"""
|
82
|
+
return f"<Database db_id={self.db_id}, cluster={self.cluster}, database={self.database}>"
|
erioon/delete.py
ADDED
@@ -0,0 +1,342 @@
|
|
1
|
+
# Copyright 2025-present Erioon, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
# Visit www.erioon.com/dev-docs for more information about the python SDK
|
15
|
+
|
16
|
+
import json
|
17
|
+
import io
|
18
|
+
import msgpack
|
19
|
+
from azure.storage.blob import ContainerClient
|
20
|
+
from erioon.functions import update_index_file_delete, check_nested_key, async_log
|
21
|
+
|
22
|
+
# DELETE ONE RECORD
|
23
|
+
def handle_delete_one(user_id, db_id, coll_id, data_to_delete, container_url):
|
24
|
+
"""
|
25
|
+
Delete a single record from a collection.
|
26
|
+
|
27
|
+
The record can be identified either by the unique '_id' field or by a nested key-value pair.
|
28
|
+
|
29
|
+
Args:
|
30
|
+
user_id: Identifier of the user performing the operation.
|
31
|
+
db_id: Database ID containing the collection.
|
32
|
+
coll_id: Collection ID.
|
33
|
+
data_to_delete: Dictionary containing either '_id' or key-value pair to match.
|
34
|
+
container_url: SAS URL pointing to the storage container.
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
A tuple (response dict, status code) indicating success or failure.
|
38
|
+
"""
|
39
|
+
if "_id" in data_to_delete:
|
40
|
+
record_id = data_to_delete["_id"]
|
41
|
+
return handle_delete_with_id(user_id, db_id, coll_id, record_id, container_url)
|
42
|
+
else:
|
43
|
+
return handle_delete_without_id(user_id, db_id, coll_id, data_to_delete, container_url)
|
44
|
+
|
45
|
+
# DELETE RECORD USING ID FILTER
|
46
|
+
def handle_delete_with_id(user_id, db_id, coll_id, record_id, container_url):
|
47
|
+
"""
|
48
|
+
Delete a record exactly matching the given '_id'.
|
49
|
+
|
50
|
+
Steps:
|
51
|
+
- Parse container URL and create a ContainerClient.
|
52
|
+
- Load the index.json file which maps shards to record IDs.
|
53
|
+
- Locate the shard containing the target record_id.
|
54
|
+
- Download and unpack the shard blob.
|
55
|
+
- Remove the record from the shard data.
|
56
|
+
- Repack and upload the updated shard if record found.
|
57
|
+
- Update index.json to reflect deletion.
|
58
|
+
- Log success or errors asynchronously.
|
59
|
+
|
60
|
+
Args:
|
61
|
+
user_id, db_id, coll_id: Identifiers for user, database, and collection.
|
62
|
+
record_id: The unique '_id' of the record to delete.
|
63
|
+
container_url: Azure Blob Storage container SAS URL.
|
64
|
+
|
65
|
+
Returns:
|
66
|
+
Tuple (response dict, status code) indicating operation result.
|
67
|
+
"""
|
68
|
+
|
69
|
+
container_client = ContainerClient.from_container_url(container_url)
|
70
|
+
|
71
|
+
index_blob_client = container_client.get_blob_client(f"{db_id}/{coll_id}/index.json")
|
72
|
+
|
73
|
+
if not index_blob_client.exists():
|
74
|
+
return {"error": "Index file does not exist"}, 404
|
75
|
+
|
76
|
+
index_data = json.loads(index_blob_client.download_blob().readall())
|
77
|
+
shard_number = None
|
78
|
+
|
79
|
+
for shard in index_data:
|
80
|
+
for shard_key, ids in shard.items():
|
81
|
+
if record_id in ids:
|
82
|
+
shard_number = int(shard_key.split("_")[-1])
|
83
|
+
break
|
84
|
+
if shard_number:
|
85
|
+
break
|
86
|
+
|
87
|
+
if shard_number is None:
|
88
|
+
async_log(user_id, db_id, coll_id, "DELETE", "ERROR", f"Record with _id {record_id} not found", 1, container_url)
|
89
|
+
return {"error": f"Record with _id {record_id} not found"}, 404
|
90
|
+
|
91
|
+
msgpack_blob_client = container_client.get_blob_client(f"{db_id}/{coll_id}/{coll_id}_{shard_number}.msgpack")
|
92
|
+
|
93
|
+
try:
|
94
|
+
msgpack_data = msgpack_blob_client.download_blob().readall()
|
95
|
+
with io.BytesIO(msgpack_data) as buffer:
|
96
|
+
records = []
|
97
|
+
original_length = 0
|
98
|
+
|
99
|
+
unpacked_data = msgpack.unpackb(buffer.read(), raw=False)
|
100
|
+
if isinstance(unpacked_data, list):
|
101
|
+
for record in unpacked_data:
|
102
|
+
original_length += 1
|
103
|
+
if record.get("_id") == record_id:
|
104
|
+
continue
|
105
|
+
records.append(record)
|
106
|
+
|
107
|
+
if len(records) < original_length:
|
108
|
+
with io.BytesIO() as out_file:
|
109
|
+
packed_data = msgpack.packb(records)
|
110
|
+
out_file.write(packed_data)
|
111
|
+
out_file.seek(0)
|
112
|
+
msgpack_blob_client.upload_blob(out_file, overwrite=True)
|
113
|
+
|
114
|
+
update_index_file_delete(user_id, db_id, coll_id, record_id, shard_number, container_url)
|
115
|
+
async_log(user_id, db_id, coll_id, "DELETE", "SUCCESS", f"Record with _id {record_id} deleted successfully", 1, container_url)
|
116
|
+
return {"success": f"Record with _id {record_id} deleted successfully"}, 200
|
117
|
+
else:
|
118
|
+
async_log(user_id, db_id, coll_id, "DELETE", "ERROR", f"Record with _id {record_id} not found in shard", 1, container_url)
|
119
|
+
return {"error": f"Record with _id {record_id} not found in shard"}, 404
|
120
|
+
|
121
|
+
except Exception as e:
|
122
|
+
async_log(user_id, db_id, coll_id, "DELETE", "ERROR", f"Error deleting record {record_id}: {str(e)}", 1, container_url)
|
123
|
+
return {"error": f"Error deleting record {record_id}: {str(e)}"}, 500
|
124
|
+
|
125
|
+
# DELETE RECORD USING KEY FILTER
|
126
|
+
def handle_delete_without_id(user_id, db_id, coll_id, data_to_delete, container_url):
|
127
|
+
"""
|
128
|
+
Delete a single record matching a nested key-value pair when '_id' is not provided.
|
129
|
+
Behaves like MongoDB's delete_one: deletes only the first matched record.
|
130
|
+
"""
|
131
|
+
container_client = ContainerClient.from_container_url(container_url)
|
132
|
+
|
133
|
+
nested_key = list(data_to_delete.keys())[0]
|
134
|
+
key, value = nested_key, data_to_delete[nested_key]
|
135
|
+
|
136
|
+
directory_path = f"{db_id}/{coll_id}/"
|
137
|
+
blob_list = container_client.list_blobs(name_starts_with=directory_path)
|
138
|
+
|
139
|
+
for blob in blob_list:
|
140
|
+
if blob.name.endswith(".msgpack"):
|
141
|
+
try:
|
142
|
+
blob_client = container_client.get_blob_client(blob.name)
|
143
|
+
msgpack_data = blob_client.download_blob().readall()
|
144
|
+
|
145
|
+
with io.BytesIO(msgpack_data) as buffer:
|
146
|
+
unpacked_data = msgpack.unpackb(buffer.read(), raw=False)
|
147
|
+
if isinstance(unpacked_data, list):
|
148
|
+
for record in unpacked_data:
|
149
|
+
if check_nested_key(record, key, value):
|
150
|
+
delete_response, status = handle_delete_with_id(user_id, db_id, coll_id, record["_id"], container_url)
|
151
|
+
if status == 200:
|
152
|
+
return {"success": f"Record with _id {record['_id']} deleted successfully"}, 200
|
153
|
+
else:
|
154
|
+
return delete_response, status
|
155
|
+
except Exception as e:
|
156
|
+
continue
|
157
|
+
|
158
|
+
async_log(user_id, db_id, coll_id, "DELETE", "ERROR", f"No matching record found for key-value pair {key}:{value}", 1, container_url)
|
159
|
+
return {"error": f"No matching record found for the specified key-value pair {key}:{value}"}, 404
|
160
|
+
|
161
|
+
# DELETE MULTIPLE RECORDS
|
162
|
+
def handle_delete_many(user_id, db_id, coll_id, data_to_delete_list, container_url, batch_size=10):
|
163
|
+
"""
|
164
|
+
Delete multiple records from a collection.
|
165
|
+
|
166
|
+
Supports a mix of deletions by '_id' and by key-value pair filters.
|
167
|
+
Processes deletions in batches for performance and error isolation.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
user_id: Identifier of the user making the request.
|
171
|
+
db_id: The database identifier.
|
172
|
+
coll_id: The collection identifier.
|
173
|
+
data_to_delete_list: List of dictionaries representing deletion filters (must contain either '_id' or a key-value pair).
|
174
|
+
container_url: SAS URL of the blob storage container.
|
175
|
+
batch_size: Number of deletions to process per batch.
|
176
|
+
|
177
|
+
Returns:
|
178
|
+
Tuple (response dict, status code). The response includes a summary of successes and failures.
|
179
|
+
"""
|
180
|
+
|
181
|
+
batch_results = []
|
182
|
+
|
183
|
+
for i in range(0, len(data_to_delete_list), batch_size):
|
184
|
+
batch = data_to_delete_list[i : i + batch_size]
|
185
|
+
|
186
|
+
ids_to_delete = [d["_id"] for d in batch if "_id" in d]
|
187
|
+
non_id_queries = [d for d in batch if "_id" not in d]
|
188
|
+
|
189
|
+
batch_success = []
|
190
|
+
batch_errors = []
|
191
|
+
|
192
|
+
if ids_to_delete:
|
193
|
+
results = handle_delete_many_with_id(user_id, db_id, coll_id, ids_to_delete, container_url)
|
194
|
+
for data_to_delete, (response, status_code) in zip([{"_id": rid} for rid in ids_to_delete], results):
|
195
|
+
if 200 <= status_code < 300:
|
196
|
+
batch_success.append({
|
197
|
+
"delete_query": data_to_delete,
|
198
|
+
"message": response.get("success", "Record deleted successfully"),
|
199
|
+
})
|
200
|
+
else:
|
201
|
+
batch_errors.append({
|
202
|
+
"delete_query": data_to_delete,
|
203
|
+
"error": response.get("error", f"Failed to delete record - Status code {status_code}"),
|
204
|
+
})
|
205
|
+
|
206
|
+
if non_id_queries:
|
207
|
+
deleted_results, errors = handle_delete_many_without_id(user_id, db_id, coll_id, non_id_queries, container_url)
|
208
|
+
|
209
|
+
for res in deleted_results:
|
210
|
+
batch_success.append({
|
211
|
+
"delete_query": res["query"],
|
212
|
+
"message": "Records deleted successfully",
|
213
|
+
})
|
214
|
+
|
215
|
+
for err in errors:
|
216
|
+
batch_errors.append({
|
217
|
+
"delete_query": err["query"],
|
218
|
+
"error": err.get("error", "Unknown error"),
|
219
|
+
})
|
220
|
+
|
221
|
+
batch_results.append({
|
222
|
+
"queries": len(batch),
|
223
|
+
"success": batch_success,
|
224
|
+
"errors": batch_errors,
|
225
|
+
})
|
226
|
+
|
227
|
+
total_success = sum(len(batch["success"]) for batch in batch_results)
|
228
|
+
total_errors = sum(len(batch["errors"]) for batch in batch_results)
|
229
|
+
|
230
|
+
if total_errors == 0:
|
231
|
+
return {
|
232
|
+
"success": f"Selected records deleted successfully",
|
233
|
+
"details": batch_results,
|
234
|
+
"total_deleted": total_success,
|
235
|
+
}, 200
|
236
|
+
elif total_success > 0:
|
237
|
+
return {
|
238
|
+
"warning": "Partial success deleting selected records",
|
239
|
+
"details": batch_results,
|
240
|
+
"total_deleted": total_success,
|
241
|
+
"total_errors": total_errors,
|
242
|
+
}, 207
|
243
|
+
else:
|
244
|
+
return {
|
245
|
+
"error": "Error deleting selected records",
|
246
|
+
"details": batch_results,
|
247
|
+
}, 500
|
248
|
+
|
249
|
+
# DELETE MULTIPLE RECORDS WITH ID FILTER
|
250
|
+
def handle_delete_many_with_id(user_id, db_id, coll_id, record_ids, container_url):
|
251
|
+
"""
|
252
|
+
Delete multiple records by their '_id' values.
|
253
|
+
|
254
|
+
Args:
|
255
|
+
user_id: User identifier.
|
256
|
+
db_id: Database identifier.
|
257
|
+
coll_id: Collection identifier.
|
258
|
+
record_ids: List of '_id' values of records to delete.
|
259
|
+
container_url: Azure Blob Storage container SAS URL.
|
260
|
+
|
261
|
+
Returns:
|
262
|
+
List of tuples (response dict, status code) for each deletion attempt.
|
263
|
+
"""
|
264
|
+
|
265
|
+
results = []
|
266
|
+
for record_id in record_ids:
|
267
|
+
resp, status = handle_delete_with_id(user_id, db_id, coll_id, record_id, container_url)
|
268
|
+
results.append((resp, status))
|
269
|
+
return results
|
270
|
+
|
271
|
+
# DELETE MULTIPLE RECORDS WITH KEY FILTER
|
272
|
+
def handle_delete_many_without_id(user_id, db_id, coll_id, queries, container_url):
|
273
|
+
"""
|
274
|
+
Delete multiple records that match key-value queries across all shards.
|
275
|
+
|
276
|
+
For each query in the list, it finds all records matching the key-value condition
|
277
|
+
and deletes them.
|
278
|
+
|
279
|
+
Args:
|
280
|
+
user_id: ID of the user performing the operation.
|
281
|
+
db_id: Database ID containing the collection.
|
282
|
+
coll_id: Collection ID.
|
283
|
+
queries: List of dictionaries containing key-value match conditions.
|
284
|
+
container_url: SAS URL pointing to the blob storage container.
|
285
|
+
|
286
|
+
Returns:
|
287
|
+
Tuple of:
|
288
|
+
- deleted_results (list): List of queries that resulted in deletion.
|
289
|
+
- errors (list): List of dictionaries containing error details for failed deletions.
|
290
|
+
"""
|
291
|
+
|
292
|
+
container_client = ContainerClient.from_container_url(container_url)
|
293
|
+
deleted_results = []
|
294
|
+
errors = []
|
295
|
+
|
296
|
+
directory_path = f"{db_id}/{coll_id}/"
|
297
|
+
blob_list = list(container_client.list_blobs(name_starts_with=directory_path))
|
298
|
+
|
299
|
+
for query in queries:
|
300
|
+
key = list(query.keys())[0]
|
301
|
+
value = query[key]
|
302
|
+
deleted_any = False
|
303
|
+
for blob in blob_list:
|
304
|
+
if blob.name.endswith(".msgpack"):
|
305
|
+
try:
|
306
|
+
blob_client = container_client.get_blob_client(blob.name)
|
307
|
+
msgpack_data = blob_client.download_blob().readall()
|
308
|
+
|
309
|
+
with io.BytesIO(msgpack_data) as buffer:
|
310
|
+
records = msgpack.unpackb(buffer.read(), raw=False)
|
311
|
+
if not isinstance(records, list):
|
312
|
+
continue
|
313
|
+
new_records = []
|
314
|
+
deleted_in_blob = False
|
315
|
+
for record in records:
|
316
|
+
if check_nested_key(record, key, value):
|
317
|
+
deleted_in_blob = True
|
318
|
+
deleted_any = True
|
319
|
+
else:
|
320
|
+
new_records.append(record)
|
321
|
+
|
322
|
+
if deleted_in_blob:
|
323
|
+
with io.BytesIO() as out_file:
|
324
|
+
out_file.write(msgpack.packb(new_records))
|
325
|
+
out_file.seek(0)
|
326
|
+
blob_client.upload_blob(out_file, overwrite=True)
|
327
|
+
|
328
|
+
deleted_ids = [r["_id"] for r in records if check_nested_key(r, key, value)]
|
329
|
+
shard_number = int(blob.name.split("_")[-1].split(".")[0])
|
330
|
+
for rid in deleted_ids:
|
331
|
+
update_index_file_delete(user_id, db_id, coll_id, rid, shard_number, container_url)
|
332
|
+
async_log(user_id, db_id, coll_id, "DELETE", "SUCCESS", f"Record with _id {rid} deleted successfully", 1, container_url)
|
333
|
+
|
334
|
+
except Exception as e:
|
335
|
+
errors.append({"query": query, "error": str(e)})
|
336
|
+
|
337
|
+
if deleted_any:
|
338
|
+
deleted_results.append({"query": query, "status": "deleted"})
|
339
|
+
else:
|
340
|
+
errors.append({"query": query, "error": "No matching records found"})
|
341
|
+
|
342
|
+
return deleted_results, errors
|