esuls 0.1.13__tar.gz → 0.1.15__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {esuls-0.1.13/src/esuls.egg-info → esuls-0.1.15}/PKG-INFO +2 -3
- {esuls-0.1.13 → esuls-0.1.15}/README.md +1 -2
- {esuls-0.1.13 → esuls-0.1.15}/pyproject.toml +1 -1
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls/db_cli.py +72 -41
- esuls-0.1.15/src/esuls/tests/test_db_concurrent.py +161 -0
- {esuls-0.1.13 → esuls-0.1.15/src/esuls.egg-info}/PKG-INFO +2 -3
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls.egg-info/SOURCES.txt +2 -1
- {esuls-0.1.13 → esuls-0.1.15}/LICENSE +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/setup.cfg +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls/__init__.py +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls/download_icon.py +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls/request_cli.py +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls/utils.py +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls.egg-info/dependency_links.txt +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls.egg-info/requires.txt +0 -0
- {esuls-0.1.13 → esuls-0.1.15}/src/esuls.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: esuls
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.15
|
|
4
4
|
Summary: Utility library for async database operations, HTTP requests, and parallel execution
|
|
5
5
|
Author-email: IperGiove <ipergiove@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -228,8 +228,7 @@ pip install -e .
|
|
|
228
228
|
|
|
229
229
|
```bash
|
|
230
230
|
# With uv
|
|
231
|
-
uv build
|
|
232
|
-
twine upload dist/*
|
|
231
|
+
uv build && twine upload dist/*
|
|
233
232
|
|
|
234
233
|
# Or with traditional tools
|
|
235
234
|
pip install build twine
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "esuls"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.15"
|
|
8
8
|
description = "Utility library for async database operations, HTTP requests, and parallel execution"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.14"
|
|
@@ -31,6 +31,8 @@ class AsyncDB(Generic[SchemaType]):
|
|
|
31
31
|
|
|
32
32
|
# Shared write locks per database file (class-level)
|
|
33
33
|
_db_locks: dict[str, asyncio.Lock] = {}
|
|
34
|
+
# Lock for schema initialization (class-level)
|
|
35
|
+
_schema_init_lock: asyncio.Lock = None
|
|
34
36
|
|
|
35
37
|
def __init__(self, db_path: Union[str, Path], table_name: str, schema_class: Type[SchemaType]):
|
|
36
38
|
"""Initialize AsyncDB with a path and schema dataclass."""
|
|
@@ -59,6 +61,10 @@ class AsyncDB(Generic[SchemaType]):
|
|
|
59
61
|
|
|
60
62
|
async def _get_connection(self, max_retries: int = 5) -> aiosqlite.Connection:
|
|
61
63
|
"""Create a new optimized connection with retry logic for concurrent access."""
|
|
64
|
+
# Ensure schema init lock exists (lazy init for asyncio compatibility)
|
|
65
|
+
if AsyncDB._schema_init_lock is None:
|
|
66
|
+
AsyncDB._schema_init_lock = asyncio.Lock()
|
|
67
|
+
|
|
62
68
|
last_error = None
|
|
63
69
|
for attempt in range(max_retries):
|
|
64
70
|
try:
|
|
@@ -69,10 +75,13 @@ class AsyncDB(Generic[SchemaType]):
|
|
|
69
75
|
await db.execute("PRAGMA cache_size=10000")
|
|
70
76
|
await db.execute("PRAGMA busy_timeout=30000") # 30s busy timeout
|
|
71
77
|
|
|
72
|
-
# Initialize schema if needed (
|
|
78
|
+
# Initialize schema if needed (with lock to prevent race condition)
|
|
73
79
|
if self._db_key not in AsyncDB._initialized_schemas:
|
|
74
|
-
|
|
75
|
-
|
|
80
|
+
async with AsyncDB._schema_init_lock:
|
|
81
|
+
# Double-check after acquiring lock
|
|
82
|
+
if self._db_key not in AsyncDB._initialized_schemas:
|
|
83
|
+
await self._init_schema(db)
|
|
84
|
+
AsyncDB._initialized_schemas.add(self._db_key)
|
|
76
85
|
|
|
77
86
|
return db
|
|
78
87
|
except Exception as e:
|
|
@@ -287,40 +296,51 @@ class AsyncDB(Generic[SchemaType]):
|
|
|
287
296
|
|
|
288
297
|
saved_count = 0
|
|
289
298
|
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
299
|
+
max_retries = 3
|
|
300
|
+
for attempt in range(max_retries):
|
|
301
|
+
try:
|
|
302
|
+
async with self._write_lock:
|
|
303
|
+
async with self.transaction() as db:
|
|
304
|
+
for item in items:
|
|
305
|
+
try:
|
|
306
|
+
if not isinstance(item, self.schema_class):
|
|
307
|
+
if not skip_errors:
|
|
308
|
+
raise TypeError(f"Expected {self.schema_class.__name__}, got {type(item).__name__}")
|
|
309
|
+
continue
|
|
310
|
+
|
|
311
|
+
# Extract and process data
|
|
312
|
+
data = asdict(item)
|
|
313
|
+
item_id = data.pop('id', None) or str(uuid.uuid4())
|
|
314
|
+
|
|
315
|
+
# Ensure created_at and updated_at are set
|
|
316
|
+
now = datetime.now()
|
|
317
|
+
if not data.get('created_at'):
|
|
318
|
+
data['created_at'] = now
|
|
319
|
+
data['updated_at'] = now
|
|
320
|
+
|
|
321
|
+
# Prepare SQL and values
|
|
322
|
+
field_names = tuple(sorted(data.keys()))
|
|
323
|
+
sql = self._generate_save_sql(field_names)
|
|
324
|
+
values = [self._serialize_value(data[name]) for name in field_names]
|
|
325
|
+
values.append(item_id)
|
|
326
|
+
|
|
327
|
+
# Execute save
|
|
328
|
+
await db.execute(sql, values)
|
|
329
|
+
saved_count += 1
|
|
330
|
+
|
|
331
|
+
except Exception as e:
|
|
332
|
+
if skip_errors:
|
|
333
|
+
logger.warning(f"Save error (skipped): {e}")
|
|
334
|
+
continue
|
|
335
|
+
raise
|
|
336
|
+
break
|
|
337
|
+
except Exception as e:
|
|
338
|
+
if "database is locked" in str(e) and attempt < max_retries - 1:
|
|
339
|
+
wait_time = 0.2 * (2 ** attempt)
|
|
340
|
+
logger.debug(f"DB locked, retry {attempt + 1}/{max_retries} in {wait_time}s")
|
|
341
|
+
await asyncio.sleep(wait_time)
|
|
342
|
+
continue
|
|
343
|
+
raise
|
|
324
344
|
|
|
325
345
|
return saved_count
|
|
326
346
|
|
|
@@ -356,10 +376,21 @@ class AsyncDB(Generic[SchemaType]):
|
|
|
356
376
|
values = [self._serialize_value(data[name]) for name in field_names]
|
|
357
377
|
values.append(item_id)
|
|
358
378
|
|
|
359
|
-
# Perform save with reliable transaction
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
379
|
+
# Perform save with reliable transaction (retry on "database is locked")
|
|
380
|
+
max_retries = 3
|
|
381
|
+
for attempt in range(max_retries):
|
|
382
|
+
try:
|
|
383
|
+
async with self._write_lock:
|
|
384
|
+
async with self.transaction() as db:
|
|
385
|
+
await db.execute(sql, values)
|
|
386
|
+
break
|
|
387
|
+
except Exception as e:
|
|
388
|
+
if "database is locked" in str(e) and attempt < max_retries - 1:
|
|
389
|
+
wait_time = 0.2 * (2 ** attempt)
|
|
390
|
+
logger.debug(f"DB locked, retry {attempt + 1}/{max_retries} in {wait_time}s")
|
|
391
|
+
await asyncio.sleep(wait_time)
|
|
392
|
+
continue
|
|
393
|
+
raise
|
|
363
394
|
|
|
364
395
|
return True
|
|
365
396
|
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test concurrent database access to verify retry logic works.
|
|
3
|
+
"""
|
|
4
|
+
import asyncio
|
|
5
|
+
import tempfile
|
|
6
|
+
import os
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from esuls.db_cli import AsyncDB, BaseModel
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class TestItem(BaseModel):
|
|
15
|
+
name: str = ""
|
|
16
|
+
value: int = 0
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
async def test_concurrent_reads(temp_db):
|
|
20
|
+
"""Test many concurrent read operations."""
|
|
21
|
+
db = AsyncDB(temp_db, "items", TestItem)
|
|
22
|
+
|
|
23
|
+
# Save some test data first
|
|
24
|
+
for i in range(10):
|
|
25
|
+
await db.save(TestItem(name=f"item_{i}", value=i))
|
|
26
|
+
|
|
27
|
+
# Run 100 concurrent reads
|
|
28
|
+
async def read_all():
|
|
29
|
+
return await db.find()
|
|
30
|
+
|
|
31
|
+
tasks = [read_all() for _ in range(100)]
|
|
32
|
+
results = await asyncio.gather(*tasks)
|
|
33
|
+
|
|
34
|
+
# All reads should succeed and return same data
|
|
35
|
+
assert all(len(r) == 10 for r in results)
|
|
36
|
+
print(f"✓ 100 concurrent reads completed successfully")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
async def test_concurrent_writes(temp_db):
|
|
40
|
+
"""Test many concurrent write operations."""
|
|
41
|
+
db = AsyncDB(temp_db, "items", TestItem)
|
|
42
|
+
|
|
43
|
+
# Run 50 concurrent writes
|
|
44
|
+
async def write_item(i: int):
|
|
45
|
+
return await db.save(TestItem(name=f"concurrent_{i}", value=i))
|
|
46
|
+
|
|
47
|
+
tasks = [write_item(i) for i in range(50)]
|
|
48
|
+
results = await asyncio.gather(*tasks)
|
|
49
|
+
|
|
50
|
+
# All writes should succeed
|
|
51
|
+
assert all(r is True for r in results)
|
|
52
|
+
|
|
53
|
+
# Verify all items were saved
|
|
54
|
+
items = await db.find()
|
|
55
|
+
assert len(items) == 50
|
|
56
|
+
print(f"✓ 50 concurrent writes completed successfully")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def test_concurrent_mixed_operations(temp_db):
|
|
60
|
+
"""Test concurrent reads and writes together."""
|
|
61
|
+
db = AsyncDB(temp_db, "items", TestItem)
|
|
62
|
+
|
|
63
|
+
# Seed some data
|
|
64
|
+
for i in range(5):
|
|
65
|
+
await db.save(TestItem(name=f"seed_{i}", value=i))
|
|
66
|
+
|
|
67
|
+
async def read_op():
|
|
68
|
+
return await db.find()
|
|
69
|
+
|
|
70
|
+
async def write_op(i: int):
|
|
71
|
+
return await db.save(TestItem(name=f"mixed_{i}", value=i))
|
|
72
|
+
|
|
73
|
+
async def count_op():
|
|
74
|
+
return await db.count()
|
|
75
|
+
|
|
76
|
+
# Mix of 100 reads, 50 writes, 50 counts - all concurrent
|
|
77
|
+
tasks = []
|
|
78
|
+
tasks.extend([read_op() for _ in range(100)])
|
|
79
|
+
tasks.extend([write_op(i) for i in range(50)])
|
|
80
|
+
tasks.extend([count_op() for _ in range(50)])
|
|
81
|
+
|
|
82
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
83
|
+
|
|
84
|
+
# Check no exceptions
|
|
85
|
+
exceptions = [r for r in results if isinstance(r, Exception)]
|
|
86
|
+
if exceptions:
|
|
87
|
+
print(f"✗ {len(exceptions)} exceptions occurred:")
|
|
88
|
+
for e in exceptions[:5]:
|
|
89
|
+
print(f" - {type(e).__name__}: {e}")
|
|
90
|
+
raise AssertionError(f"{len(exceptions)} operations failed")
|
|
91
|
+
|
|
92
|
+
# Verify final state
|
|
93
|
+
items = await db.find()
|
|
94
|
+
assert len(items) == 55 # 5 seed + 50 writes
|
|
95
|
+
print(f"✓ 200 concurrent mixed operations completed successfully")
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
async def test_stress_concurrent_access(temp_db):
|
|
99
|
+
"""Stress test with very high concurrency."""
|
|
100
|
+
db = AsyncDB(temp_db, "items", TestItem)
|
|
101
|
+
|
|
102
|
+
# Run 500 concurrent operations
|
|
103
|
+
async def random_op(i: int):
|
|
104
|
+
if i % 3 == 0:
|
|
105
|
+
return await db.save(TestItem(name=f"stress_{i}", value=i))
|
|
106
|
+
elif i % 3 == 1:
|
|
107
|
+
return await db.find()
|
|
108
|
+
else:
|
|
109
|
+
return await db.count()
|
|
110
|
+
|
|
111
|
+
tasks = [random_op(i) for i in range(500)]
|
|
112
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
113
|
+
|
|
114
|
+
# Count successes and failures
|
|
115
|
+
exceptions = [r for r in results if isinstance(r, Exception)]
|
|
116
|
+
successes = len(results) - len(exceptions)
|
|
117
|
+
|
|
118
|
+
print(f"Results: {successes} successes, {len(exceptions)} failures")
|
|
119
|
+
|
|
120
|
+
if exceptions:
|
|
121
|
+
print(f"Sample exceptions:")
|
|
122
|
+
for e in exceptions[:3]:
|
|
123
|
+
print(f" - {type(e).__name__}: {e}")
|
|
124
|
+
|
|
125
|
+
# Should have very few or no failures with retry logic
|
|
126
|
+
assert len(exceptions) == 0, f"{len(exceptions)} operations failed"
|
|
127
|
+
print(f"✓ 500 concurrent stress operations completed successfully")
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
if __name__ == "__main__":
|
|
131
|
+
import sys
|
|
132
|
+
|
|
133
|
+
async def run_all_tests():
|
|
134
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
135
|
+
db_path = Path(tmpdir) / "test_concurrent.db"
|
|
136
|
+
|
|
137
|
+
print("\n" + "=" * 60)
|
|
138
|
+
print("CONCURRENT DATABASE ACCESS TESTS")
|
|
139
|
+
print("=" * 60)
|
|
140
|
+
|
|
141
|
+
print("\n[Test 1] Concurrent reads...")
|
|
142
|
+
await test_concurrent_reads(db_path)
|
|
143
|
+
|
|
144
|
+
# New db for each test
|
|
145
|
+
db_path2 = Path(tmpdir) / "test_concurrent2.db"
|
|
146
|
+
print("\n[Test 2] Concurrent writes...")
|
|
147
|
+
await test_concurrent_writes(db_path2)
|
|
148
|
+
|
|
149
|
+
db_path3 = Path(tmpdir) / "test_concurrent3.db"
|
|
150
|
+
print("\n[Test 3] Mixed operations...")
|
|
151
|
+
await test_concurrent_mixed_operations(db_path3)
|
|
152
|
+
|
|
153
|
+
db_path4 = Path(tmpdir) / "test_concurrent4.db"
|
|
154
|
+
print("\n[Test 4] Stress test (500 ops)...")
|
|
155
|
+
await test_stress_concurrent_access(db_path4)
|
|
156
|
+
|
|
157
|
+
print("\n" + "=" * 60)
|
|
158
|
+
print("ALL TESTS PASSED!")
|
|
159
|
+
print("=" * 60)
|
|
160
|
+
|
|
161
|
+
asyncio.run(run_all_tests())
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: esuls
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.15
|
|
4
4
|
Summary: Utility library for async database operations, HTTP requests, and parallel execution
|
|
5
5
|
Author-email: IperGiove <ipergiove@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -228,8 +228,7 @@ pip install -e .
|
|
|
228
228
|
|
|
229
229
|
```bash
|
|
230
230
|
# With uv
|
|
231
|
-
uv build
|
|
232
|
-
twine upload dist/*
|
|
231
|
+
uv build && twine upload dist/*
|
|
233
232
|
|
|
234
233
|
# Or with traditional tools
|
|
235
234
|
pip install build twine
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|