erioon 0.1.2__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
erioon-0.1.4/PKG-INFO ADDED
@@ -0,0 +1,30 @@
1
+ Metadata-Version: 2.2
2
+ Name: erioon
3
+ Version: 0.1.4
4
+ Summary: Erioon Python SDK for seamless interaction with Erioon data services
5
+ Author: Zyber Pireci
6
+ Author-email: zyber.pireci@erioon.com
7
+ License: MIT
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Topic :: Software Development :: Libraries
12
+ Classifier: Intended Audience :: Developers
13
+ Requires-Python: >=3.6
14
+ Description-Content-Type: text/plain
15
+ License-File: LICENSE
16
+ Requires-Dist: requests>=2.25.1
17
+ Requires-Dist: pyodbc
18
+ Requires-Dist: azure-storage-blob
19
+ Requires-Dist: msgpack
20
+ Dynamic: author
21
+ Dynamic: author-email
22
+ Dynamic: classifier
23
+ Dynamic: description
24
+ Dynamic: description-content-type
25
+ Dynamic: license
26
+ Dynamic: requires-dist
27
+ Dynamic: requires-python
28
+ Dynamic: summary
29
+
30
+ The Erioon SDK for Python provides a robust interface to interact with Erioon resources such as collections, databases, and clusters. It supports CRUD operations, querying, and connection management with ease, enabling developers to integrate Erioon data services into their applications efficiently.
@@ -0,0 +1,33 @@
1
+ # Copyright 2025-present Erioon, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from erioon.client import ErioonClient
16
+
17
+ def Auth(credential_string):
18
+ """
19
+ Authenticates a user using a colon-separated email:password string.
20
+
21
+ Parameters:
22
+ - credential_string (str): A string in the format "email:password"
23
+
24
+ Returns:
25
+ - ErioonClient instance: An instance representing the authenticated user.
26
+ If authentication fails, the instance will contain the error message.
27
+
28
+ Example usage:
29
+ >>> from erioon.auth import Auth
30
+ >>> client = Auth("<API_KEY>:<EMAIL>:<PASSWORD>")
31
+ >>> print(client) # prints user_id if successful or error message if not
32
+ """
33
+ return ErioonClient(credential_string)
@@ -1,46 +1,49 @@
1
- import os
2
- import json
1
+ # Copyright 2025-present Erioon, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
3
15
  import requests
4
- from datetime import datetime, timezone
5
16
  from erioon.database import Database
6
17
 
7
18
  class ErioonClient:
8
- def __init__(self, api, email, password, base_url="https://sdk.erioon.com"):
9
- self.api = api
10
- self.email = email
11
- self.password = password
19
+ def __init__(self, credential_string, base_url="https://sdk.erioon.com"):
20
+ self.credential_string = credential_string
12
21
  self.base_url = base_url
13
22
  self.user_id = None
14
- self.token_path = os.path.expanduser(f"~/.erioon_token_{self._safe_filename(email)}")
15
23
  self.login_metadata = None
16
24
 
25
+ parts = credential_string.split(":")
26
+
27
+ if len(parts) == 1 and credential_string.startswith("erioon"):
28
+ self.api = credential_string
29
+ self.email = None
30
+ self.password = None
31
+ else:
32
+ if len(parts) == 2:
33
+ self.email, self.password = parts
34
+ self.api = None
35
+ else:
36
+ raise ValueError("Invalid credential format. Use 'erioon-xxxxx' or 'email:password'")
37
+
17
38
  try:
18
- self.login_metadata = self._load_or_login()
39
+ self.login_metadata = self._login()
19
40
  self._update_metadata_fields()
20
41
  except Exception as e:
21
42
  print(f"[ErioonClient] Initialization error: {e}")
22
43
 
23
- def _safe_filename(self, text):
24
- return "".join(c if c.isalnum() else "_" for c in text)
25
-
26
- def _do_login_and_cache(self):
27
- metadata = self._login()
28
- with open(self.token_path, "w") as f:
29
- json.dump(metadata, f)
30
- return metadata
31
-
32
- def _load_or_login(self):
33
- if os.path.exists(self.token_path):
34
- with open(self.token_path, "r") as f:
35
- metadata = json.load(f)
36
- if self._is_sas_expired(metadata):
37
- return self._do_login_and_cache()
38
- return metadata
39
- else:
40
- return self._do_login_and_cache()
41
44
 
42
45
  def _login(self):
43
- url = f"{self.base_url}/login_with_credentials"
46
+ url = f"{self.base_url}/login"
44
47
  payload = {"api_key": self.api, "email": self.email, "password": self.password}
45
48
  headers = {"Content-Type": "application/json"}
46
49
 
@@ -48,7 +51,6 @@ class ErioonClient:
48
51
  if response.status_code == 200:
49
52
  data = response.json()
50
53
  self.login_metadata = data
51
- self._update_metadata_fields()
52
54
  return data
53
55
  else:
54
56
  try:
@@ -65,46 +67,11 @@ class ErioonClient:
65
67
  self.database = self.login_metadata.get("database")
66
68
  self.sas_tokens = self.login_metadata.get("sas_tokens", {})
67
69
 
68
- def _clear_cached_token(self):
69
- if os.path.exists(self.token_path):
70
- os.remove(self.token_path)
71
- self.user_id = None
72
- self.login_metadata = None
73
-
74
- def _is_sas_expired(self, metadata):
75
- expiry_str = metadata.get("sas_token_expiry") or metadata.get("expiry")
76
- if not expiry_str:
77
- return True
78
- try:
79
- expiry_dt = datetime.fromisoformat(expiry_str.replace("Z", "+00:00"))
80
- now = datetime.now(timezone.utc)
81
- return now >= expiry_dt
82
- except Exception:
83
- return True
84
-
85
70
  def __getitem__(self, db_id):
86
71
  if not self.user_id:
87
- print(f"[ErioonClient] Not authenticated. Cannot access database {db_id}.")
88
- raise ValueError("Client not authenticated.")
89
-
90
- try:
91
- return self._get_database_info(db_id)
92
- except Exception as e:
93
- print(f"[ErioonClient] Access failed for DB {db_id}, retrying login... ({e})")
94
- self._clear_cached_token()
95
-
96
- try:
97
- self.login_metadata = self._do_login_and_cache()
98
- self._update_metadata_fields()
99
- except Exception as login_error:
100
- print(f"[ErioonClient] Re-login failed: {login_error}")
101
- raise RuntimeError(login_error)
72
+ raise ValueError("Client not authenticated. Cannot access database.")
102
73
 
103
- try:
104
- return self._get_database_info(db_id)
105
- except Exception as second_error:
106
- print(f"[ErioonClient] DB fetch after re-auth failed: {second_error}")
107
- raise RuntimeError(second_error)
74
+ return self._get_database_info(db_id)
108
75
 
109
76
  def _get_database_info(self, db_id):
110
77
  payload = {"user_id": self.user_id, "db_id": db_id}
@@ -0,0 +1,370 @@
1
+ # Copyright 2025-present Erioon, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ from urllib.parse import urlparse
17
+ from erioon.read import handle_get_all, handle_find_one, handle_find_many, handle_count_records
18
+ from erioon.create import handle_insert_one, handle_insert_many
19
+ from erioon.delete import handle_delete_one, handle_delete_many
20
+ from erioon.update import handle_update_one, handle_update_many, handle_replace_one
21
+ from erioon.ping import handle_connection_ping
22
+
23
+ class Collection:
24
+ def __init__(
25
+ self,
26
+ user_id,
27
+ db_id,
28
+ coll_id,
29
+ metadata,
30
+ database,
31
+ cluster,
32
+ sas_url,
33
+ ):
34
+ """
35
+ Initialize a Collection object that wraps Erioon collection access.
36
+
37
+ Args:
38
+ user_id (str): The authenticated user's ID.
39
+ db_id (str): The database ID.
40
+ coll_id (str): The collection ID.
41
+ metadata (dict): Metadata info about this collection (e.g., schema, indexing, etc.).
42
+ database (str): Name or ID of the database.
43
+ cluster (str): Cluster name or ID hosting the database.
44
+ sas_url (str): Full SAS URL used to access the storage container.
45
+ """
46
+ self.user_id = user_id
47
+ self.db_id = db_id
48
+ self.coll_id = coll_id
49
+ self.metadata = metadata
50
+ self.database = database
51
+ self.cluster = cluster
52
+
53
+ parsed_url = urlparse(sas_url.rstrip("/"))
54
+ container_name = parsed_url.path.lstrip("/").split("/")[0]
55
+ account_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
56
+ sas_token = parsed_url.query
57
+ self.container_url = f"{account_url}/{container_name}?{sas_token}"
58
+
59
+ # PRINT ERIOON
60
+ def _print_loading(self):
61
+ """Prints a loading message (likely for UX in CLI or SDK usage)."""
62
+ print("Erioon is loading...")
63
+
64
+ # CHECK READ / WRITE LICENCE
65
+ def _is_read_only(self):
66
+ """Check if the current database is marked as read-only."""
67
+ return self.database == "read"
68
+
69
+ # RESPONSE FOR ONLY WRITE
70
+ def _read_only_response(self):
71
+ """Standardized error response for blocked write operations."""
72
+ return "This user is not allowed to perform write operations.", 403
73
+
74
+ # GET ALL RECORDS OF A COLLECTION
75
+ def get_all(self, limit=1000000):
76
+ """
77
+ Fetch all records from the collection (up to a limit).
78
+ """
79
+ self._print_loading()
80
+ result, status_code = handle_get_all(
81
+ user_id=self.user_id,
82
+ db_id=self.db_id,
83
+ coll_id=self.coll_id,
84
+ limit=limit,
85
+ container_url=self.container_url,
86
+ )
87
+ return result
88
+
89
+ # FINDS A SPECIFIC RECORD OF A COLLECTION
90
+ def find_one(self, filters: dict | None = None):
91
+ """
92
+ Fetch a single record that matches specific key-value filters.
93
+ """
94
+ if self._is_read_only():
95
+ return self._read_only_response()
96
+
97
+ if filters is None:
98
+ filters = {}
99
+
100
+ search_criteria = [{k: v} for k, v in filters.items()]
101
+
102
+ result, status_code = handle_find_one(
103
+ user_id=self.user_id,
104
+ db_id=self.db_id,
105
+ coll_id=self.coll_id,
106
+ search_criteria=search_criteria,
107
+ container_url=self.container_url,
108
+ )
109
+ return result
110
+
111
+ # FINDS MULTIPLE RECORDS OF A COLLECTION
112
+ def find_many(self, filters: dict | None = None, limit: int = 1000):
113
+ """
114
+ Fetch multiple records that match specific key-value filters.
115
+
116
+ Args:
117
+ filters (dict): Filters to match records.
118
+ limit (int): Maximum number of records to return (default: 1000).
119
+
120
+ Returns:
121
+ dict: Result from `handle_find_many()`
122
+ """
123
+ if self._is_read_only():
124
+ return self._read_only_response()
125
+
126
+ self._print_loading()
127
+
128
+ if filters is None:
129
+ filters = {}
130
+
131
+ if limit > 500_000:
132
+ raise ValueError("Limit of 500,000 exceeded")
133
+
134
+ search_criteria = [{k: v} for k, v in filters.items()]
135
+
136
+ result, status_code = handle_find_many(
137
+ user_id=self.user_id,
138
+ db_id=self.db_id,
139
+ coll_id=self.coll_id,
140
+ search_criteria=search_criteria,
141
+ limit=limit,
142
+ container_url=self.container_url,
143
+ )
144
+ return result
145
+
146
+ # INSERT A SINGLE RECORD IN A COLLECTION
147
+ def insert_one(self, record):
148
+ """
149
+ Insert a single record into the collection.
150
+ """
151
+ if self._is_read_only():
152
+ return self._read_only_response()
153
+ response, status = handle_insert_one(
154
+ user_id_cont=self.user_id,
155
+ database=self.db_id,
156
+ collection=self.coll_id,
157
+ record=record,
158
+ container_url=self.container_url,
159
+ )
160
+ if status == 200:
161
+ print("Insertion was successful.")
162
+ else:
163
+ print(f"Error inserting record: {response}")
164
+ return response, status
165
+
166
+ # INSERT MULTIPLE RECORDS INTO A COLLECTION
167
+ def insert_many(self, data):
168
+ """
169
+ Insert multiple records into the collection.
170
+
171
+ Args:
172
+ data (list): List of record dicts.
173
+
174
+ Returns:
175
+ tuple: (response message, HTTP status code)
176
+ """
177
+ if self._is_read_only():
178
+ return self._read_only_response()
179
+ self._print_loading()
180
+ response, status = handle_insert_many(
181
+ user_id_cont=self.user_id,
182
+ database=self.db_id,
183
+ collection=self.coll_id,
184
+ data=data,
185
+ container_url=self.container_url,
186
+ )
187
+ if status == 200:
188
+ print("Insertion of multiple records was successful.")
189
+ else:
190
+ print(f"Error inserting records: {response}")
191
+ return response, status
192
+
193
+ # DELETE A SINGLE RECORD BASED ON _ID OR KEY
194
+ def delete_one(self, record_to_delete):
195
+ """
196
+ Delete a single record based on its _id or nested key.
197
+ """
198
+ if self._is_read_only():
199
+ return self._read_only_response()
200
+ response, status = handle_delete_one(
201
+ user_id=self.user_id,
202
+ db_id=self.db_id,
203
+ coll_id=self.coll_id,
204
+ data_to_delete=record_to_delete,
205
+ container_url=self.container_url,
206
+ )
207
+ if status == 200:
208
+ print("Deletion was successful.")
209
+ else:
210
+ print(f"Error deleting record: {response}")
211
+ return response, status
212
+
213
+ # DELETE MANY RECORDS IN BATCHES
214
+ def delete_many(self, records_to_delete_list, batch_size=10):
215
+ """
216
+ Delete multiple records in batches.
217
+ """
218
+ if self._is_read_only():
219
+ return self._read_only_response()
220
+ self._print_loading()
221
+ response, status = handle_delete_many(
222
+ user_id=self.user_id,
223
+ db_id=self.db_id,
224
+ coll_id=self.coll_id,
225
+ data_to_delete_list=records_to_delete_list,
226
+ batch_size=batch_size,
227
+ container_url=self.container_url,
228
+ )
229
+ if status == 200:
230
+ print("Batch deletion was successful.")
231
+ else:
232
+ print(f"Error deleting records: {response}")
233
+ return response, status
234
+
235
+ # UPDATE A RECORD
236
+ def update_one(self, filter_query: dict, update_query: dict):
237
+ """
238
+ Update a record in-place by filtering and applying update logic.
239
+ """
240
+ if self._is_read_only():
241
+ return self._read_only_response()
242
+ response, status = handle_update_one(
243
+ user_id=self.user_id,
244
+ db_id=self.db_id,
245
+ coll_id=self.coll_id,
246
+ filter_query=filter_query,
247
+ update_query=update_query,
248
+ container_url=self.container_url,
249
+ )
250
+ if status == 200:
251
+ print("Update was successful.")
252
+ else:
253
+ print(f"Error updating record: {response}")
254
+ return response, status
255
+
256
+ # UPDATE MULTIPLE RECORDS
257
+ def update_many(self, update_tasks: list):
258
+ """
259
+ Update multiple records in-place by applying a list of filter + update operations.
260
+
261
+ Each item in `update_tasks` should be a dict:
262
+ {
263
+ "filter": { ... },
264
+ "update": {
265
+ "$set": {...}, "$push": {...}, "$remove": [...]
266
+ }
267
+ }
268
+
269
+ Returns:
270
+ (dict, int): Summary response and HTTP status code.
271
+ """
272
+ if self._is_read_only():
273
+ return self._read_only_response()
274
+ self._print_loading()
275
+
276
+ response, status = handle_update_many(
277
+ user_id=self.user_id,
278
+ db_id=self.db_id,
279
+ coll_id=self.coll_id,
280
+ update_tasks=update_tasks,
281
+ container_url=self.container_url,
282
+ )
283
+
284
+ if status == 200:
285
+ print(f"Successfully updated {response.get('success')}")
286
+ else:
287
+ print(f"Error updating records: {response}")
288
+
289
+ return response, status
290
+
291
+ # REPLACE A SINGLE RECORDS BASED ON THE FILTER QUERY
292
+ def replace_one(self, filter_query: dict, replacement: dict):
293
+ """
294
+ Replaces a single record matching `filter_query` with the full `replacement` document.
295
+
296
+ - If `_id` is **not** in the replacement, preserves the original `_id`.
297
+ - If `_id` **is** in the replacement, uses the new `_id`.
298
+
299
+ Args:
300
+ filter_query (dict): Must contain a single key-value pair.
301
+ replacement (dict): New record to replace the old one.
302
+
303
+ Returns:
304
+ (dict, int): Response message and HTTP status code.
305
+ """
306
+ if self._is_read_only():
307
+ return self._read_only_response()
308
+
309
+ response, status = handle_replace_one(
310
+ user_id=self.user_id,
311
+ db_id=self.db_id,
312
+ coll_id=self.coll_id,
313
+ filter_query=filter_query,
314
+ replacement=replacement,
315
+ container_url=self.container_url,
316
+ )
317
+
318
+ if status == 200:
319
+ print("Replacement was successful.")
320
+ else:
321
+ print(f"Error replacing record: {response}")
322
+
323
+ return response, status
324
+
325
+ # PING AND CHECK CONNECTION
326
+ def ping(self):
327
+ """
328
+ Health check / ping to verify collection accessibility.
329
+ """
330
+ self._print_loading()
331
+ response, status = handle_connection_ping(
332
+ user_id=self.user_id,
333
+ db_id=self.db_id,
334
+ coll_id=self.coll_id,
335
+ container_url=self.container_url,
336
+ )
337
+ if status == 200:
338
+ print("Connection ping successful.")
339
+ else:
340
+ print(f"Ping failed: {response}")
341
+ return response, status
342
+
343
+ # COUNT ALL THE RECORDS
344
+ def count_records(self) -> int:
345
+ """
346
+ Count the total number of documents in the collection (across all shards).
347
+
348
+ Returns:
349
+ int: Total document count.
350
+ """
351
+ if self._is_read_only():
352
+ return 0
353
+ self._print_loading()
354
+
355
+ count, status = handle_count_records(
356
+ user_id=self.user_id,
357
+ db_id=self.db_id,
358
+ coll_id=self.coll_id,
359
+ container_url=self.container_url,
360
+ )
361
+ return count
362
+
363
+
364
+ def __str__(self):
365
+ """Pretty print the collection metadata."""
366
+ return json.dumps(self.metadata, indent=4)
367
+
368
+ def __repr__(self):
369
+ """Simplified representation for debugging or introspection."""
370
+ return f"<Collection coll_id={self.coll_id}>"
@@ -0,0 +1,130 @@
1
+ # Copyright 2025-present Erioon, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import uuid
17
+ from erioon.functions import (
18
+ create_msgpack_file,
19
+ update_index_file_insert,
20
+ calculate_shard_number,
21
+ async_log,
22
+ is_duplicate_id
23
+ )
24
+
25
+ # INSERT ONE RECORD
26
+ def handle_insert_one(user_id_cont, database, collection, record, container_url):
27
+ """
28
+ Insert a single record into the collection.
29
+
30
+ - If no '_id' provided, generate a new UUID.
31
+ - If provided '_id' is duplicate, generate a new one and update the record.
32
+ - Create or append the record in a shard file.
33
+ - Update index.json to map the record to the appropriate shard.
34
+ - Log success or errors asynchronously.
35
+
36
+ Args:
37
+ user_id_cont: User identifier.
38
+ database: Database name.
39
+ collection: Collection name.
40
+ record: Dict representing the record to insert.
41
+ container_url: Blob Storage container SAS URL.
42
+
43
+ Returns:
44
+ Tuple (response dict, status code) indicating success or failure.
45
+ """
46
+ try:
47
+ if "_id" not in record or not record["_id"]:
48
+ record["_id"] = str(uuid.uuid4())
49
+
50
+ rec_id = record["_id"]
51
+
52
+ if is_duplicate_id(user_id_cont, database, collection, rec_id, container_url):
53
+ new_id = str(uuid.uuid4())
54
+ record["_id"] = new_id
55
+ rec_id = new_id
56
+ msg = f"Record inserted successfully in {collection} with a new _id {rec_id} because the provided _id was already present."
57
+ else:
58
+ msg = f"Record inserted successfully in {collection} with _id {rec_id}"
59
+
60
+ async_log(user_id_cont, database, collection, "POST", "SUCCESS", msg, 1, container_url)
61
+
62
+ create_msgpack_file(user_id_cont, database, collection, record, container_url)
63
+
64
+ shard_number = calculate_shard_number(user_id_cont, database, collection, container_url)
65
+ update_index_file_insert(user_id_cont, database, collection, rec_id, shard_number, container_url)
66
+
67
+ return {"status": "OK", "message": msg, "record": record}, 200
68
+
69
+ except Exception as e:
70
+ error_msg = f"An error occurred during insert in {collection}: {str(e)}"
71
+ async_log(user_id_cont, database, collection,"POST", "ERROR", error_msg, 1, container_url)
72
+ return {"status": "KO", "message": "Failed to insert record.", "error": str(e)}, 500
73
+
74
+ # INSERT MANY RECORDS
75
+ def handle_insert_many(user_id_cont, database, collection, data, container_url):
76
+ """
77
+ Insert multiple records in bulk.
78
+
79
+ - `data` is a list of dicts, each representing a record.
80
+ - For each record:
81
+ - Ensure it has a unique _id (generate new UUID if missing or duplicate).
82
+ - Write the record to the appropriate shard.
83
+ - Update index.json with _id to shard mapping.
84
+ - Log the batch insert operation with details.
85
+ - Return aggregate success or failure response.
86
+
87
+ Args:
88
+ user_id_cont: User identifier.
89
+ database: Database name.
90
+ collection: Collection name.
91
+ data: List of record dicts.
92
+ container_url: Blob Storage container SAS URL.
93
+
94
+ Returns:
95
+ Tuple (response dict, status code) with summary of insert results.
96
+ """
97
+ insert_results = []
98
+ count = len(data)
99
+
100
+ try:
101
+ for record in data:
102
+ if "_id" not in record or not record["_id"]:
103
+ record["_id"] = str(uuid.uuid4())
104
+
105
+ rec_id = record["_id"]
106
+
107
+ if is_duplicate_id(user_id_cont, database, collection, rec_id, container_url):
108
+ new_id = str(uuid.uuid4())
109
+ record["_id"] = new_id
110
+ rec_id = new_id
111
+ msg = f"Inserted with new _id {rec_id} (original _id was already present)."
112
+ else:
113
+ msg = f"Inserted with _id {rec_id}."
114
+
115
+ create_msgpack_file(user_id_cont, database, collection, record, container_url)
116
+
117
+ shard_number = calculate_shard_number(user_id_cont, database, collection, container_url)
118
+ update_index_file_insert(
119
+ user_id_cont, database, collection, rec_id, shard_number, container_url
120
+ )
121
+
122
+ insert_results.append({"_id": rec_id, "message": msg})
123
+
124
+ async_log(user_id_cont, database, collection, "POST", "SUCCESS", insert_results, count, container_url)
125
+ return {"success": "Records inserted successfully", "details": insert_results}, 200
126
+
127
+ except Exception as e:
128
+ general_error_msg = f"Unexpected error during bulk insert: {str(e)}"
129
+ async_log(user_id_cont, database, collection, "POST", "ERROR", general_error_msg, 1, container_url)
130
+ return {"status": "KO", "message": general_error_msg}, 500