starddb 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- starddb-1.0.0/PKG-INFO +89 -0
- starddb-1.0.0/README.md +63 -0
- starddb-1.0.0/pyproject.toml +43 -0
- starddb-1.0.0/setup.cfg +4 -0
- starddb-1.0.0/stardb.py +199 -0
- starddb-1.0.0/starddb.egg-info/PKG-INFO +89 -0
- starddb-1.0.0/starddb.egg-info/SOURCES.txt +10 -0
- starddb-1.0.0/starddb.egg-info/dependency_links.txt +1 -0
- starddb-1.0.0/starddb.egg-info/requires.txt +4 -0
- starddb-1.0.0/starddb.egg-info/top_level.txt +1 -0
- starddb-1.0.0/tests/test_crawl.py +148 -0
- starddb-1.0.0/tests/test_errors.py +119 -0
starddb-1.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: starddb
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: A lightweight JSON document database with field-level operation queuing and concurrency support
|
|
5
|
+
Author:
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/obama/stardb
|
|
8
|
+
Project-URL: Repository, https://github.com/obama/stardb
|
|
9
|
+
Keywords: json,database,document-db,queue,concurrency,lightweight,embedded
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Database
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Requires-Python: >=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: filelock>=3.0
|
|
24
|
+
Provides-Extra: dev
|
|
25
|
+
Requires-Dist: pytest; extra == "dev"
|
|
26
|
+
|
|
27
|
+
# StarDDB (Star Document-database)
|
|
28
|
+
|
|
29
|
+
> "Shoot for the moon. Even if you miss, you'll land among the stars." - Norman Vincent Peale
|
|
30
|
+
|
|
31
|
+
StarDDB is a simple to use, lightweight (single file implementation) DB for efficient JSON-like storage management.
|
|
32
|
+
|
|
33
|
+
## Features
|
|
34
|
+
|
|
35
|
+
- Concurrency support via field-level operation queuing
|
|
36
|
+
- Easy-to-use API
|
|
37
|
+
- Automatic background persistence
|
|
38
|
+
- Nested document support
|
|
39
|
+
- Thread-safe (Python) / Event-loop safe (Node.js)
|
|
40
|
+
|
|
41
|
+
## Installation
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
pip install starddb
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Quick Start
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
from stardb import StarDDBField, StarDDB
|
|
51
|
+
|
|
52
|
+
# Create a field and queue operations
|
|
53
|
+
field = StarDDBField(0)
|
|
54
|
+
field.update("set", 1)
|
|
55
|
+
field.update("mult", 5)
|
|
56
|
+
field.update("div", 0.5)
|
|
57
|
+
field.flush()
|
|
58
|
+
print(field.value) # 10.0
|
|
59
|
+
|
|
60
|
+
# Use with a database file
|
|
61
|
+
db = StarDDB("data.json", save_time=5)
|
|
62
|
+
hook = db.db()
|
|
63
|
+
hook["health"].update("sub", 30)
|
|
64
|
+
hook["mana"].update("mult", 2)
|
|
65
|
+
db.close()
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## API
|
|
69
|
+
|
|
70
|
+
### StarDDBField(value, max_queue_size=10000)
|
|
71
|
+
|
|
72
|
+
- `value` — Initial value (any JSON-serializable type)
|
|
73
|
+
- `max_queue_size` — Maximum queued operations (default: 10000)
|
|
74
|
+
|
|
75
|
+
**Methods:**
|
|
76
|
+
- `update(method, value)` — Queue an operation. Methods: `set`, `add`, `sub`, `mult`, `div`
|
|
77
|
+
- `flush()` — Block until all queued operations are processed
|
|
78
|
+
|
|
79
|
+
### StarDDB(database, save_time, database_hook=None, safe_root=None)
|
|
80
|
+
|
|
81
|
+
- `database` — Path to the JSON database file
|
|
82
|
+
- `save_time` — Seconds between automatic saves
|
|
83
|
+
- `database_hook` — Optional pre-loaded dict (skips file read)
|
|
84
|
+
- `safe_root` — Optional root directory to restrict path traversal
|
|
85
|
+
|
|
86
|
+
**Methods:**
|
|
87
|
+
- `db()` — Get the database hook (dict of StarDDBField instances)
|
|
88
|
+
- `flush()` — Block until all field operations are processed
|
|
89
|
+
- `close()` — Flush, save, and stop the background save thread
|
starddb-1.0.0/README.md
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# StarDDB (Star Document-database)
|
|
2
|
+
|
|
3
|
+
> "Shoot for the moon. Even if you miss, you'll land among the stars." - Norman Vincent Peale
|
|
4
|
+
|
|
5
|
+
StarDDB is a simple to use, lightweight (single file implementation) DB for efficient JSON-like storage management.
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
|
|
9
|
+
- Concurrency support via field-level operation queuing
|
|
10
|
+
- Easy-to-use API
|
|
11
|
+
- Automatic background persistence
|
|
12
|
+
- Nested document support
|
|
13
|
+
- Thread-safe (Python) / Event-loop safe (Node.js)
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pip install starddb
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Quick Start
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
from stardb import StarDDBField, StarDDB
|
|
25
|
+
|
|
26
|
+
# Create a field and queue operations
|
|
27
|
+
field = StarDDBField(0)
|
|
28
|
+
field.update("set", 1)
|
|
29
|
+
field.update("mult", 5)
|
|
30
|
+
field.update("div", 0.5)
|
|
31
|
+
field.flush()
|
|
32
|
+
print(field.value) # 10.0
|
|
33
|
+
|
|
34
|
+
# Use with a database file
|
|
35
|
+
db = StarDDB("data.json", save_time=5)
|
|
36
|
+
hook = db.db()
|
|
37
|
+
hook["health"].update("sub", 30)
|
|
38
|
+
hook["mana"].update("mult", 2)
|
|
39
|
+
db.close()
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
## API
|
|
43
|
+
|
|
44
|
+
### StarDDBField(value, max_queue_size=10000)
|
|
45
|
+
|
|
46
|
+
- `value` — Initial value (any JSON-serializable type)
|
|
47
|
+
- `max_queue_size` — Maximum queued operations (default: 10000)
|
|
48
|
+
|
|
49
|
+
**Methods:**
|
|
50
|
+
- `update(method, value)` — Queue an operation. Methods: `set`, `add`, `sub`, `mult`, `div`
|
|
51
|
+
- `flush()` — Block until all queued operations are processed
|
|
52
|
+
|
|
53
|
+
### StarDDB(database, save_time, database_hook=None, safe_root=None)
|
|
54
|
+
|
|
55
|
+
- `database` — Path to the JSON database file
|
|
56
|
+
- `save_time` — Seconds between automatic saves
|
|
57
|
+
- `database_hook` — Optional pre-loaded dict (skips file read)
|
|
58
|
+
- `safe_root` — Optional root directory to restrict path traversal
|
|
59
|
+
|
|
60
|
+
**Methods:**
|
|
61
|
+
- `db()` — Get the database hook (dict of StarDDBField instances)
|
|
62
|
+
- `flush()` — Block until all field operations are processed
|
|
63
|
+
- `close()` — Flush, save, and stop the background save thread
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "starddb"
|
|
7
|
+
version = "1.0.0"
|
|
8
|
+
description = "A lightweight JSON document database with field-level operation queuing and concurrency support"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = {text = "MIT"}
|
|
11
|
+
authors = [
|
|
12
|
+
{name = ""}
|
|
13
|
+
]
|
|
14
|
+
keywords = ["json", "database", "document-db", "queue", "concurrency", "lightweight", "embedded"]
|
|
15
|
+
classifiers = [
|
|
16
|
+
"Development Status :: 4 - Beta",
|
|
17
|
+
"Intended Audience :: Developers",
|
|
18
|
+
"License :: OSI Approved :: MIT License",
|
|
19
|
+
"Programming Language :: Python :: 3",
|
|
20
|
+
"Programming Language :: Python :: 3.8",
|
|
21
|
+
"Programming Language :: Python :: 3.9",
|
|
22
|
+
"Programming Language :: Python :: 3.10",
|
|
23
|
+
"Programming Language :: Python :: 3.11",
|
|
24
|
+
"Programming Language :: Python :: 3.12",
|
|
25
|
+
"Topic :: Database",
|
|
26
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
27
|
+
]
|
|
28
|
+
requires-python = ">=3.8"
|
|
29
|
+
dependencies = [
|
|
30
|
+
"filelock>=3.0",
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
[project.optional-dependencies]
|
|
34
|
+
dev = [
|
|
35
|
+
"pytest",
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
[project.urls]
|
|
39
|
+
Homepage = "https://github.com/obama/stardb"
|
|
40
|
+
Repository = "https://github.com/obama/stardb"
|
|
41
|
+
|
|
42
|
+
[tool.setuptools]
|
|
43
|
+
py-modules = ["stardb"]
|
starddb-1.0.0/setup.cfg
ADDED
starddb-1.0.0/stardb.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import threading
|
|
3
|
+
import os
|
|
4
|
+
import logging
|
|
5
|
+
import time
|
|
6
|
+
from filelock import FileLock
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("stardb")
|
|
9
|
+
|
|
10
|
+
VALID_METHODS = frozenset({"set", "add", "sub", "mult", "div"})
|
|
11
|
+
MAX_QUEUE_SIZE = 10000
|
|
12
|
+
MAX_RECURSION_DEPTH = 100
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class StarDDBField:
|
|
16
|
+
def __init__(self, value=None, max_queue_size=MAX_QUEUE_SIZE):
|
|
17
|
+
self.value = value
|
|
18
|
+
self.queue = []
|
|
19
|
+
self._lock = threading.RLock()
|
|
20
|
+
self._running = False
|
|
21
|
+
self._done_event = threading.Event()
|
|
22
|
+
self._done_event.set() # initially "done" — nothing queued
|
|
23
|
+
self._max_queue_size = max_queue_size
|
|
24
|
+
|
|
25
|
+
def update(self, method, value):
|
|
26
|
+
if method not in VALID_METHODS:
|
|
27
|
+
raise ValueError(
|
|
28
|
+
f'Invalid operation "{method}". Valid operations: {", ".join(sorted(VALID_METHODS))}'
|
|
29
|
+
)
|
|
30
|
+
if method == "div" and value == 0:
|
|
31
|
+
raise ZeroDivisionError("Division by zero is not allowed")
|
|
32
|
+
with self._lock:
|
|
33
|
+
self.queue.append({"method": method, "value": value})
|
|
34
|
+
if len(self.queue) > self._max_queue_size:
|
|
35
|
+
self.queue.pop()
|
|
36
|
+
raise RuntimeError(
|
|
37
|
+
f"Queue is full (max size: {self._max_queue_size}). "
|
|
38
|
+
"Wait for pending operations to complete."
|
|
39
|
+
)
|
|
40
|
+
self._done_event.clear()
|
|
41
|
+
if not self._running:
|
|
42
|
+
self._running = True
|
|
43
|
+
self._run_list()
|
|
44
|
+
|
|
45
|
+
def _run_list(self):
|
|
46
|
+
while True:
|
|
47
|
+
with self._lock:
|
|
48
|
+
if not self.queue:
|
|
49
|
+
self._running = False
|
|
50
|
+
self._done_event.set()
|
|
51
|
+
return
|
|
52
|
+
item = self.queue.pop(0)
|
|
53
|
+
|
|
54
|
+
if item["method"] == "set":
|
|
55
|
+
self.value = item["value"]
|
|
56
|
+
elif item["method"] == "add":
|
|
57
|
+
self.value += item["value"]
|
|
58
|
+
elif item["method"] == "sub":
|
|
59
|
+
self.value -= item["value"]
|
|
60
|
+
elif item["method"] == "mult":
|
|
61
|
+
self.value *= item["value"]
|
|
62
|
+
elif item["method"] == "div":
|
|
63
|
+
if item["value"] == 0:
|
|
64
|
+
logger.warning("[StarDDB] Division by zero skipped")
|
|
65
|
+
continue
|
|
66
|
+
self.value /= item["value"]
|
|
67
|
+
else:
|
|
68
|
+
logger.error('[StarDDB] Unknown method "%s" dropped', item["method"])
|
|
69
|
+
|
|
70
|
+
def flush(self):
|
|
71
|
+
"""Block until all queued operations have been processed."""
|
|
72
|
+
self._done_event.wait()
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _crawl_db(hook, depth=0):
|
|
76
|
+
if depth > MAX_RECURSION_DEPTH:
|
|
77
|
+
raise RecursionError(
|
|
78
|
+
f"Max recursion depth ({MAX_RECURSION_DEPTH}) exceeded. "
|
|
79
|
+
"Nested objects are too deep."
|
|
80
|
+
)
|
|
81
|
+
for key in hook:
|
|
82
|
+
if isinstance(hook[key], dict):
|
|
83
|
+
_crawl_db(hook[key], depth + 1)
|
|
84
|
+
else:
|
|
85
|
+
hook[key] = StarDDBField(hook[key])
|
|
86
|
+
return hook
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _serialize_db(hook, depth=0):
|
|
90
|
+
if depth > MAX_RECURSION_DEPTH:
|
|
91
|
+
raise RecursionError(
|
|
92
|
+
f"Max recursion depth ({MAX_RECURSION_DEPTH}) exceeded during serialization."
|
|
93
|
+
)
|
|
94
|
+
result = {}
|
|
95
|
+
for key in hook:
|
|
96
|
+
if isinstance(hook[key], StarDDBField):
|
|
97
|
+
result[key] = hook[key].value
|
|
98
|
+
elif isinstance(hook[key], dict):
|
|
99
|
+
result[key] = _serialize_db(hook[key], depth + 1)
|
|
100
|
+
else:
|
|
101
|
+
result[key] = hook[key]
|
|
102
|
+
return result
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _sanitize_path(db_path, safe_root=None):
|
|
106
|
+
resolved = os.path.abspath(db_path)
|
|
107
|
+
if safe_root is not None:
|
|
108
|
+
root = os.path.abspath(safe_root)
|
|
109
|
+
if not (resolved.startswith(root + os.sep) or resolved == root):
|
|
110
|
+
raise ValueError(
|
|
111
|
+
f'Database path escapes safe root "{root}". '
|
|
112
|
+
f'Resolved path: "{resolved}"'
|
|
113
|
+
)
|
|
114
|
+
return resolved
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _save_db_loop(db_path, hook, interval, stop_event, safe_root=None):
|
|
118
|
+
lock_path = db_path + ".lock"
|
|
119
|
+
while not stop_event.is_set():
|
|
120
|
+
stop_event.wait(interval)
|
|
121
|
+
if stop_event.is_set():
|
|
122
|
+
break
|
|
123
|
+
try:
|
|
124
|
+
data = _serialize_db(hook)
|
|
125
|
+
with FileLock(lock_path, timeout=10):
|
|
126
|
+
with open(db_path, "w") as f:
|
|
127
|
+
json.dump(data, f)
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.error("[StarDDB] Failed to save database to %s: %s", db_path, e)
|
|
130
|
+
# Do NOT raise — this runs in a daemon thread.
|
|
131
|
+
# Logging the error is enough; we keep trying on the next interval.
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _flush_all_fields(node):
|
|
135
|
+
"""Recursively flush every StarDDBField in the hook tree."""
|
|
136
|
+
for key in node:
|
|
137
|
+
if isinstance(node[key], StarDDBField):
|
|
138
|
+
node[key].flush()
|
|
139
|
+
elif isinstance(node[key], dict):
|
|
140
|
+
_flush_all_fields(node[key])
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class StarDDB:
|
|
144
|
+
def __init__(self, database, save_time, database_hook=None, safe_root=None):
|
|
145
|
+
self.database = _sanitize_path(database, safe_root)
|
|
146
|
+
|
|
147
|
+
if database_hook is None:
|
|
148
|
+
if not os.path.exists(self.database):
|
|
149
|
+
raise FileNotFoundError(
|
|
150
|
+
f"Database file does not exist: {self.database}"
|
|
151
|
+
)
|
|
152
|
+
try:
|
|
153
|
+
with open(self.database, "r") as f:
|
|
154
|
+
database_hook = json.load(f)
|
|
155
|
+
except json.JSONDecodeError as e:
|
|
156
|
+
logger.error(
|
|
157
|
+
"[StarDDB] Failed to parse database file %s: %s", self.database, e
|
|
158
|
+
)
|
|
159
|
+
raise
|
|
160
|
+
|
|
161
|
+
self.database_hook = _crawl_db(database_hook)
|
|
162
|
+
|
|
163
|
+
self._stop_event = threading.Event()
|
|
164
|
+
self._save_thread = threading.Thread(
|
|
165
|
+
target=_save_db_loop,
|
|
166
|
+
args=(
|
|
167
|
+
self.database,
|
|
168
|
+
self.database_hook,
|
|
169
|
+
save_time,
|
|
170
|
+
self._stop_event,
|
|
171
|
+
safe_root,
|
|
172
|
+
),
|
|
173
|
+
daemon=True,
|
|
174
|
+
)
|
|
175
|
+
self._save_thread.start()
|
|
176
|
+
|
|
177
|
+
def db(self):
|
|
178
|
+
return self.database_hook
|
|
179
|
+
|
|
180
|
+
def flush(self):
|
|
181
|
+
"""Block until all queued field operations have been processed."""
|
|
182
|
+
_flush_all_fields(self.database_hook)
|
|
183
|
+
|
|
184
|
+
def close(self):
|
|
185
|
+
"""Flush pending ops, do a final save, then stop the background thread."""
|
|
186
|
+
self.flush()
|
|
187
|
+
lock_path = self.database + ".lock"
|
|
188
|
+
try:
|
|
189
|
+
data = _serialize_db(self.database_hook)
|
|
190
|
+
with FileLock(lock_path, timeout=10):
|
|
191
|
+
with open(self.database, "w") as f:
|
|
192
|
+
json.dump(data, f)
|
|
193
|
+
except Exception as e:
|
|
194
|
+
logger.error(
|
|
195
|
+
"[StarDDB] Failed final save on close to %s: %s", self.database, e
|
|
196
|
+
)
|
|
197
|
+
raise
|
|
198
|
+
self._stop_event.set()
|
|
199
|
+
self._save_thread.join()
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: starddb
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: A lightweight JSON document database with field-level operation queuing and concurrency support
|
|
5
|
+
Author:
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/obama/stardb
|
|
8
|
+
Project-URL: Repository, https://github.com/obama/stardb
|
|
9
|
+
Keywords: json,database,document-db,queue,concurrency,lightweight,embedded
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Database
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Requires-Python: >=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: filelock>=3.0
|
|
24
|
+
Provides-Extra: dev
|
|
25
|
+
Requires-Dist: pytest; extra == "dev"
|
|
26
|
+
|
|
27
|
+
# StarDDB (Star Document-database)
|
|
28
|
+
|
|
29
|
+
> "Shoot for the moon. Even if you miss, you'll land among the stars." - Norman Vincent Peale
|
|
30
|
+
|
|
31
|
+
StarDDB is a simple to use, lightweight (single file implementation) DB for efficient JSON-like storage management.
|
|
32
|
+
|
|
33
|
+
## Features
|
|
34
|
+
|
|
35
|
+
- Concurrency support via field-level operation queuing
|
|
36
|
+
- Easy-to-use API
|
|
37
|
+
- Automatic background persistence
|
|
38
|
+
- Nested document support
|
|
39
|
+
- Thread-safe (Python) / Event-loop safe (Node.js)
|
|
40
|
+
|
|
41
|
+
## Installation
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
pip install starddb
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Quick Start
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
from stardb import StarDDBField, StarDDB
|
|
51
|
+
|
|
52
|
+
# Create a field and queue operations
|
|
53
|
+
field = StarDDBField(0)
|
|
54
|
+
field.update("set", 1)
|
|
55
|
+
field.update("mult", 5)
|
|
56
|
+
field.update("div", 0.5)
|
|
57
|
+
field.flush()
|
|
58
|
+
print(field.value) # 10.0
|
|
59
|
+
|
|
60
|
+
# Use with a database file
|
|
61
|
+
db = StarDDB("data.json", save_time=5)
|
|
62
|
+
hook = db.db()
|
|
63
|
+
hook["health"].update("sub", 30)
|
|
64
|
+
hook["mana"].update("mult", 2)
|
|
65
|
+
db.close()
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## API
|
|
69
|
+
|
|
70
|
+
### StarDDBField(value, max_queue_size=10000)
|
|
71
|
+
|
|
72
|
+
- `value` — Initial value (any JSON-serializable type)
|
|
73
|
+
- `max_queue_size` — Maximum queued operations (default: 10000)
|
|
74
|
+
|
|
75
|
+
**Methods:**
|
|
76
|
+
- `update(method, value)` — Queue an operation. Methods: `set`, `add`, `sub`, `mult`, `div`
|
|
77
|
+
- `flush()` — Block until all queued operations are processed
|
|
78
|
+
|
|
79
|
+
### StarDDB(database, save_time, database_hook=None, safe_root=None)
|
|
80
|
+
|
|
81
|
+
- `database` — Path to the JSON database file
|
|
82
|
+
- `save_time` — Seconds between automatic saves
|
|
83
|
+
- `database_hook` — Optional pre-loaded dict (skips file read)
|
|
84
|
+
- `safe_root` — Optional root directory to restrict path traversal
|
|
85
|
+
|
|
86
|
+
**Methods:**
|
|
87
|
+
- `db()` — Get the database hook (dict of StarDDBField instances)
|
|
88
|
+
- `flush()` — Block until all field operations are processed
|
|
89
|
+
- `close()` — Flush, save, and stop the background save thread
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
stardb
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import os
|
|
3
|
+
import json
|
|
4
|
+
sys.path.append("..")
|
|
5
|
+
from stardb import StarDDBField, StarDDB
|
|
6
|
+
|
|
7
|
+
# ── Test 1: crawl_db via StarDDB with nested structures ──
|
|
8
|
+
|
|
9
|
+
print("--- crawl_db: nested structures ---")
|
|
10
|
+
|
|
11
|
+
db_path = os.path.join(os.path.dirname(__file__), "test_db_crawl.json")
|
|
12
|
+
|
|
13
|
+
initial = {
|
|
14
|
+
"player": {
|
|
15
|
+
"name": "Hero",
|
|
16
|
+
"stats": {
|
|
17
|
+
"hp": 100,
|
|
18
|
+
"mp": 50,
|
|
19
|
+
"atk": 25
|
|
20
|
+
},
|
|
21
|
+
"inventory": {
|
|
22
|
+
"gold": 300,
|
|
23
|
+
"gems": 12
|
|
24
|
+
}
|
|
25
|
+
},
|
|
26
|
+
"world": {
|
|
27
|
+
"level": 1,
|
|
28
|
+
"difficulty": 0.5
|
|
29
|
+
},
|
|
30
|
+
"debug": True,
|
|
31
|
+
"version": 2
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
with open(db_path, "w") as f:
|
|
35
|
+
json.dump(initial, f)
|
|
36
|
+
|
|
37
|
+
db = StarDDB(db_path, save_time=1)
|
|
38
|
+
hook = db.db()
|
|
39
|
+
|
|
40
|
+
# Walk every level and verify every leaf is a StarDDBField
|
|
41
|
+
def assert_crawled(path, node):
|
|
42
|
+
if isinstance(node, dict):
|
|
43
|
+
for key in node:
|
|
44
|
+
assert_crawled(f"{path}.{key}", node[key])
|
|
45
|
+
else:
|
|
46
|
+
assert isinstance(node, StarDDBField), f"{path} is {type(node)}, expected StarDDBField"
|
|
47
|
+
|
|
48
|
+
# Check all leaves are StarDDBField
|
|
49
|
+
assert_crawled("root", hook)
|
|
50
|
+
print("All leaf nodes are StarDDBField instances: PASS")
|
|
51
|
+
|
|
52
|
+
# Verify values survived crawling
|
|
53
|
+
assert hook["player"]["stats"]["hp"].value == 100
|
|
54
|
+
assert hook["player"]["inventory"]["gold"].value == 300
|
|
55
|
+
assert hook["world"]["level"].value == 1
|
|
56
|
+
assert hook["world"]["difficulty"].value == 0.5
|
|
57
|
+
assert hook["debug"].value is True
|
|
58
|
+
assert hook["version"].value == 2
|
|
59
|
+
print("Crawled values match original: PASS")
|
|
60
|
+
|
|
61
|
+
# Update deeply nested fields
|
|
62
|
+
hook["player"]["stats"]["atk"].update("mult", 2) # 50
|
|
63
|
+
hook["player"]["inventory"]["gems"].update("add", 3) # 15
|
|
64
|
+
hook["world"]["difficulty"].update("mult", 2) # 1.0
|
|
65
|
+
|
|
66
|
+
assert hook["player"]["stats"]["atk"].value == 50
|
|
67
|
+
assert hook["player"]["inventory"]["gems"].value == 15
|
|
68
|
+
assert hook["world"]["difficulty"].value == 1.0
|
|
69
|
+
print("Nested field updates work: PASS")
|
|
70
|
+
|
|
71
|
+
# Persist and verify
|
|
72
|
+
import time
|
|
73
|
+
time.sleep(1.5)
|
|
74
|
+
|
|
75
|
+
with open(db_path, "r") as f:
|
|
76
|
+
saved = json.load(f)
|
|
77
|
+
|
|
78
|
+
expected_saved = {
|
|
79
|
+
"player": {
|
|
80
|
+
"name": "Hero",
|
|
81
|
+
"stats": {"hp": 100, "mp": 50, "atk": 50},
|
|
82
|
+
"inventory": {"gold": 300, "gems": 15}
|
|
83
|
+
},
|
|
84
|
+
"world": {"level": 1, "difficulty": 1.0},
|
|
85
|
+
"debug": True,
|
|
86
|
+
"version": 2
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
assert saved == expected_saved, f"Persisted data mismatch:\n got: {saved}\n expected: {expected_saved}"
|
|
90
|
+
print("Nested data persistence: PASS")
|
|
91
|
+
|
|
92
|
+
db.close()
|
|
93
|
+
|
|
94
|
+
# ── Test 2: edge cases ──
|
|
95
|
+
|
|
96
|
+
print("\n--- crawl_db: edge cases ---")
|
|
97
|
+
|
|
98
|
+
edge_path = os.path.join(os.path.dirname(__file__), "test_db_edge.json")
|
|
99
|
+
|
|
100
|
+
# Empty dicts, None values, empty strings, zero, lists (left as-is)
|
|
101
|
+
edge_data = {
|
|
102
|
+
"empty_section": {},
|
|
103
|
+
"null_val": None,
|
|
104
|
+
"empty_str": "",
|
|
105
|
+
"zero": 0,
|
|
106
|
+
"negative": -42,
|
|
107
|
+
"float": 3.14159
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
with open(edge_path, "w") as f:
|
|
111
|
+
json.dump(edge_data, f)
|
|
112
|
+
|
|
113
|
+
db2 = StarDDB(edge_path, save_time=1)
|
|
114
|
+
hook2 = db2.db()
|
|
115
|
+
|
|
116
|
+
assert hook2["null_val"].value is None
|
|
117
|
+
assert hook2["empty_str"].value == ""
|
|
118
|
+
assert hook2["zero"].value == 0
|
|
119
|
+
assert hook2["negative"].value == -42
|
|
120
|
+
assert hook2["float"].value == 3.14159
|
|
121
|
+
print("Edge values crawled correctly: PASS")
|
|
122
|
+
|
|
123
|
+
assert hook2["empty_section"] == {}, "Empty dict should remain a dict"
|
|
124
|
+
print("Empty dict preserved: PASS")
|
|
125
|
+
|
|
126
|
+
hook2["null_val"].update("set", "now set")
|
|
127
|
+
assert hook2["null_val"].value == "now set"
|
|
128
|
+
hook2["zero"].update("add", 100)
|
|
129
|
+
assert hook2["zero"].value == 100
|
|
130
|
+
print("Edge field updates work: PASS")
|
|
131
|
+
|
|
132
|
+
time.sleep(1.5)
|
|
133
|
+
|
|
134
|
+
with open(edge_path, "r") as f:
|
|
135
|
+
saved2 = json.load(f)
|
|
136
|
+
|
|
137
|
+
assert saved2["null_val"] == "now set"
|
|
138
|
+
assert saved2["zero"] == 100
|
|
139
|
+
assert saved2["empty_section"] == {}
|
|
140
|
+
print("Edge data persistence: PASS")
|
|
141
|
+
|
|
142
|
+
db2.close()
|
|
143
|
+
|
|
144
|
+
# Cleanup
|
|
145
|
+
os.remove(db_path)
|
|
146
|
+
os.remove(edge_path)
|
|
147
|
+
|
|
148
|
+
print("\n--- All crawl_db tests PASSED ---")
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import os
|
|
3
|
+
import json
|
|
4
|
+
sys.path.append("..")
|
|
5
|
+
from stardb import StarDDBField, StarDDB
|
|
6
|
+
|
|
7
|
+
# ── Test 1: Invalid operation names ──
|
|
8
|
+
|
|
9
|
+
print("--- Error path: invalid operations ---")
|
|
10
|
+
|
|
11
|
+
field = StarDDBField(0)
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
field.update("sett", 1)
|
|
15
|
+
print("FAIL: Should have thrown for invalid operation 'sett'")
|
|
16
|
+
except ValueError as e:
|
|
17
|
+
print(f"PASS: Invalid operation rejected: {e}")
|
|
18
|
+
|
|
19
|
+
try:
|
|
20
|
+
field.update("addx", 5)
|
|
21
|
+
print("FAIL: Should have thrown for invalid operation 'addx'")
|
|
22
|
+
except ValueError as e:
|
|
23
|
+
print(f"PASS: Invalid operation rejected: {e}")
|
|
24
|
+
|
|
25
|
+
# ── Test 2: Division by zero ──
|
|
26
|
+
|
|
27
|
+
print("\n--- Error path: division by zero ---")
|
|
28
|
+
|
|
29
|
+
field2 = StarDDBField(100)
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
field2.update("div", 0)
|
|
33
|
+
print("FAIL: Should have thrown for division by zero")
|
|
34
|
+
except ZeroDivisionError as e:
|
|
35
|
+
print(f"PASS: Division by zero rejected: {e}")
|
|
36
|
+
|
|
37
|
+
assert field2.value == 100, "Value should be unchanged after failed div"
|
|
38
|
+
print(f"Value unchanged after failed div: {'PASS' if field2.value == 100 else 'FAIL'}")
|
|
39
|
+
|
|
40
|
+
# ── Test 3: Queue overflow ──
|
|
41
|
+
|
|
42
|
+
print("\n--- Error path: queue overflow ---")
|
|
43
|
+
|
|
44
|
+
# In Python, _run_list drains synchronously in the calling thread, so we need
|
|
45
|
+
# to hold the lock to prevent draining while we flood the queue from another thread.
|
|
46
|
+
field3 = StarDDBField(0, max_queue_size=5)
|
|
47
|
+
|
|
48
|
+
# Hold the lock so _run_list can't drain
|
|
49
|
+
field3._lock.acquire()
|
|
50
|
+
overflow_caught = False
|
|
51
|
+
try:
|
|
52
|
+
for i in range(10):
|
|
53
|
+
field3.queue.append({"method": "add", "value": 1})
|
|
54
|
+
field3._running = True # prevent _run_list from thinking it's done
|
|
55
|
+
except Exception:
|
|
56
|
+
pass
|
|
57
|
+
field3._lock.release()
|
|
58
|
+
|
|
59
|
+
# Now call update — queue is already over the limit
|
|
60
|
+
try:
|
|
61
|
+
field3.update("add", 1)
|
|
62
|
+
except RuntimeError as e:
|
|
63
|
+
if "Queue is full" in str(e):
|
|
64
|
+
overflow_caught = True
|
|
65
|
+
|
|
66
|
+
print(f"PASS: Queue overflow rejected: {overflow_caught}")
|
|
67
|
+
|
|
68
|
+
# ── Test 4: Corrupted JSON file ──
|
|
69
|
+
|
|
70
|
+
print("\n--- Error path: corrupted database file ---")
|
|
71
|
+
|
|
72
|
+
corrupt_path = os.path.join(os.path.dirname(__file__), "test_corrupt.json")
|
|
73
|
+
with open(corrupt_path, "w") as f:
|
|
74
|
+
f.write("{ this is not valid json !!!")
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
db = StarDDB(corrupt_path, save_time=1)
|
|
78
|
+
print("FAIL: Should have thrown for corrupted JSON")
|
|
79
|
+
except json.JSONDecodeError as e:
|
|
80
|
+
print(f"PASS: Corrupted file rejected: {e}")
|
|
81
|
+
|
|
82
|
+
if os.path.exists(corrupt_path):
|
|
83
|
+
os.remove(corrupt_path)
|
|
84
|
+
|
|
85
|
+
# ── Test 5: Missing database file ──
|
|
86
|
+
|
|
87
|
+
print("\n--- Error path: missing database file ---")
|
|
88
|
+
|
|
89
|
+
missing_path = os.path.join(os.path.dirname(__file__), "does_not_exist.json")
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
db = StarDDB(missing_path, save_time=1)
|
|
93
|
+
print("FAIL: Should have thrown for missing file")
|
|
94
|
+
except FileNotFoundError as e:
|
|
95
|
+
print(f"PASS: Missing file rejected: {e}")
|
|
96
|
+
|
|
97
|
+
# ── Test 6: Path traversal protection ──
|
|
98
|
+
|
|
99
|
+
print("\n--- Error path: path traversal ---")
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
db = StarDDB("../../etc/passwd", save_time=1, safe_root=os.path.dirname(__file__))
|
|
103
|
+
print("FAIL: Should have thrown for path traversal")
|
|
104
|
+
except ValueError as e:
|
|
105
|
+
print(f"PASS: Path traversal rejected: {e}")
|
|
106
|
+
|
|
107
|
+
# ── Test 7: Valid operation after error ──
|
|
108
|
+
|
|
109
|
+
print("\n--- Error path: recovery after errors ---")
|
|
110
|
+
|
|
111
|
+
field4 = StarDDBField(10)
|
|
112
|
+
field4.update("add", 5)
|
|
113
|
+
field4.update("mult", 3)
|
|
114
|
+
field4.flush()
|
|
115
|
+
|
|
116
|
+
print(f"Field value after valid ops: {field4.value} (expect 45)")
|
|
117
|
+
print(f"Recovery test: {'PASS' if field4.value == 45 else 'FAIL'}")
|
|
118
|
+
|
|
119
|
+
print("\n--- All error path tests completed ---")
|