astreum 0.2.61__py3-none-any.whl → 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astreum/__init__.py +16 -7
- astreum/{_communication → communication}/__init__.py +3 -3
- astreum/communication/handlers/handshake.py +89 -0
- astreum/communication/handlers/object_request.py +176 -0
- astreum/communication/handlers/object_response.py +115 -0
- astreum/communication/handlers/ping.py +34 -0
- astreum/communication/handlers/route_request.py +76 -0
- astreum/communication/handlers/route_response.py +53 -0
- astreum/communication/models/__init__.py +0 -0
- astreum/communication/models/message.py +124 -0
- astreum/communication/models/peer.py +51 -0
- astreum/{_communication → communication/models}/route.py +7 -12
- astreum/communication/processors/__init__.py +0 -0
- astreum/communication/processors/incoming.py +98 -0
- astreum/communication/processors/outgoing.py +20 -0
- astreum/communication/setup.py +166 -0
- astreum/communication/start.py +37 -0
- astreum/{_communication → communication}/util.py +7 -0
- astreum/consensus/__init__.py +20 -0
- astreum/consensus/genesis.py +66 -0
- astreum/consensus/models/__init__.py +0 -0
- astreum/consensus/models/account.py +84 -0
- astreum/consensus/models/accounts.py +72 -0
- astreum/consensus/models/block.py +364 -0
- astreum/{_consensus → consensus/models}/chain.py +7 -7
- astreum/{_consensus → consensus/models}/fork.py +8 -8
- astreum/consensus/models/receipt.py +98 -0
- astreum/{_consensus → consensus/models}/transaction.py +76 -78
- astreum/{_consensus → consensus}/setup.py +18 -50
- astreum/consensus/start.py +67 -0
- astreum/consensus/validator.py +95 -0
- astreum/{_consensus → consensus}/workers/discovery.py +19 -1
- astreum/consensus/workers/validation.py +307 -0
- astreum/{_consensus → consensus}/workers/verify.py +29 -2
- astreum/crypto/chacha20poly1305.py +74 -0
- astreum/machine/__init__.py +20 -0
- astreum/machine/evaluations/__init__.py +0 -0
- astreum/{_lispeum → machine/evaluations}/high_evaluation.py +237 -236
- astreum/machine/evaluations/low_evaluation.py +281 -0
- astreum/machine/evaluations/script_evaluation.py +27 -0
- astreum/machine/models/__init__.py +0 -0
- astreum/machine/models/environment.py +31 -0
- astreum/{_lispeum → machine/models}/expression.py +36 -8
- astreum/machine/tokenizer.py +90 -0
- astreum/node.py +78 -767
- astreum/storage/__init__.py +7 -0
- astreum/storage/actions/get.py +183 -0
- astreum/storage/actions/set.py +178 -0
- astreum/{_storage → storage/models}/atom.py +55 -57
- astreum/{_storage/patricia.py → storage/models/trie.py} +227 -203
- astreum/storage/requests.py +28 -0
- astreum/storage/setup.py +22 -15
- astreum/utils/config.py +48 -0
- {astreum-0.2.61.dist-info → astreum-0.3.9.dist-info}/METADATA +27 -26
- astreum-0.3.9.dist-info/RECORD +71 -0
- astreum/_communication/message.py +0 -101
- astreum/_communication/peer.py +0 -23
- astreum/_communication/setup.py +0 -322
- astreum/_consensus/__init__.py +0 -20
- astreum/_consensus/account.py +0 -95
- astreum/_consensus/accounts.py +0 -38
- astreum/_consensus/block.py +0 -311
- astreum/_consensus/genesis.py +0 -72
- astreum/_consensus/receipt.py +0 -136
- astreum/_consensus/workers/validation.py +0 -125
- astreum/_lispeum/__init__.py +0 -16
- astreum/_lispeum/environment.py +0 -13
- astreum/_lispeum/low_evaluation.py +0 -123
- astreum/_lispeum/tokenizer.py +0 -22
- astreum/_node.py +0 -198
- astreum/_storage/__init__.py +0 -7
- astreum/_storage/setup.py +0 -35
- astreum/format.py +0 -75
- astreum/models/block.py +0 -441
- astreum/models/merkle.py +0 -205
- astreum/models/patricia.py +0 -393
- astreum/storage/object.py +0 -68
- astreum-0.2.61.dist-info/RECORD +0 -57
- /astreum/{models → communication/handlers}/__init__.py +0 -0
- /astreum/{_communication → communication/models}/ping.py +0 -0
- /astreum/{_consensus → consensus}/workers/__init__.py +0 -0
- /astreum/{_lispeum → machine/models}/meter.py +0 -0
- /astreum/{_lispeum → machine}/parser.py +0 -0
- {astreum-0.2.61.dist-info → astreum-0.3.9.dist-info}/WHEEL +0 -0
- {astreum-0.2.61.dist-info → astreum-0.3.9.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.2.61.dist-info → astreum-0.3.9.dist-info}/top_level.txt +0 -0
|
@@ -1,123 +0,0 @@
|
|
|
1
|
-
from typing import Dict, List, Union
|
|
2
|
-
from .expression import Expr, error_expr
|
|
3
|
-
from .meter import Meter
|
|
4
|
-
|
|
5
|
-
def tc_to_int(b: bytes) -> int:
|
|
6
|
-
"""bytes -> int using two's complement (width = len(b)*8)."""
|
|
7
|
-
if not b:
|
|
8
|
-
return 0
|
|
9
|
-
return int.from_bytes(b, "big", signed=True)
|
|
10
|
-
|
|
11
|
-
def int_to_tc(n: int, width_bytes: int) -> bytes:
|
|
12
|
-
"""int -> bytes (two's complement, fixed width)."""
|
|
13
|
-
if width_bytes <= 0:
|
|
14
|
-
return b"\x00"
|
|
15
|
-
return n.to_bytes(width_bytes, "big", signed=True)
|
|
16
|
-
|
|
17
|
-
def min_tc_width(n: int) -> int:
|
|
18
|
-
"""minimum bytes to store n in two's complement."""
|
|
19
|
-
if n == 0:
|
|
20
|
-
return 1
|
|
21
|
-
w = 1
|
|
22
|
-
while True:
|
|
23
|
-
try:
|
|
24
|
-
n.to_bytes(w, "big", signed=True)
|
|
25
|
-
return w
|
|
26
|
-
except OverflowError:
|
|
27
|
-
w += 1
|
|
28
|
-
|
|
29
|
-
def nand_bytes(a: bytes, b: bytes) -> bytes:
|
|
30
|
-
"""Bitwise NAND on two byte strings, zero-extending to max width."""
|
|
31
|
-
w = max(len(a), len(b), 1)
|
|
32
|
-
au = int.from_bytes(a.rjust(w, b"\x00"), "big", signed=False)
|
|
33
|
-
bu = int.from_bytes(b.rjust(w, b"\x00"), "big", signed=False)
|
|
34
|
-
mask = (1 << (w * 8)) - 1
|
|
35
|
-
resu = (~(au & bu)) & mask
|
|
36
|
-
return resu.to_bytes(w, "big", signed=False)
|
|
37
|
-
|
|
38
|
-
def low_eval(self, code: List[bytes], meter: Meter) -> Expr:
|
|
39
|
-
|
|
40
|
-
heap: Dict[bytes, bytes] = {}
|
|
41
|
-
|
|
42
|
-
stack: List[bytes] = []
|
|
43
|
-
pc = 0
|
|
44
|
-
|
|
45
|
-
while True:
|
|
46
|
-
if pc >= len(code):
|
|
47
|
-
if len(stack) != 1:
|
|
48
|
-
return error_expr("low_eval", "bad stack")
|
|
49
|
-
# wrap successful result as an Expr.Bytes
|
|
50
|
-
return Expr.Bytes(stack.pop())
|
|
51
|
-
|
|
52
|
-
tok = code[pc]
|
|
53
|
-
pc += 1
|
|
54
|
-
|
|
55
|
-
# ---------- ADD ----------
|
|
56
|
-
if tok == b"add":
|
|
57
|
-
if len(stack) < 2:
|
|
58
|
-
return error_expr("low_eval", "underflow")
|
|
59
|
-
b_b = stack.pop()
|
|
60
|
-
a_b = stack.pop()
|
|
61
|
-
a_i = tc_to_int(a_b)
|
|
62
|
-
b_i = tc_to_int(b_b)
|
|
63
|
-
res_i = a_i + b_i
|
|
64
|
-
width = max(len(a_b), len(b_b), min_tc_width(res_i))
|
|
65
|
-
res_b = int_to_tc(res_i, width)
|
|
66
|
-
# charge for both operands' byte widths
|
|
67
|
-
if not meter.charge_bytes(len(a_b) + len(b_b)):
|
|
68
|
-
return error_expr("low_eval", "meter limit")
|
|
69
|
-
stack.append(res_b)
|
|
70
|
-
continue
|
|
71
|
-
|
|
72
|
-
# ---------- NAND ----------
|
|
73
|
-
if tok == b"nand":
|
|
74
|
-
if len(stack) < 2:
|
|
75
|
-
return error_expr("low_eval", "underflow")
|
|
76
|
-
b_b = stack.pop()
|
|
77
|
-
a_b = stack.pop()
|
|
78
|
-
res_b = nand_bytes(a_b, b_b)
|
|
79
|
-
# bitwise cost: 2 * max(len(a), len(b))
|
|
80
|
-
if not meter.charge_bytes(2 * max(len(a_b), len(b_b), 1)):
|
|
81
|
-
return error_expr("low_eval", "meter limit")
|
|
82
|
-
stack.append(res_b)
|
|
83
|
-
continue
|
|
84
|
-
|
|
85
|
-
# ---------- JUMP ----------
|
|
86
|
-
if tok == b"jump":
|
|
87
|
-
if len(stack) < 1:
|
|
88
|
-
return error_expr("low_eval", "underflow")
|
|
89
|
-
tgt_b = stack.pop()
|
|
90
|
-
if not meter.charge_bytes(1):
|
|
91
|
-
return error_expr("low_eval", "meter limit")
|
|
92
|
-
tgt_i = tc_to_int(tgt_b)
|
|
93
|
-
if tgt_i < 0 or tgt_i >= len(code):
|
|
94
|
-
return error_expr("low_eval", "bad jump")
|
|
95
|
-
pc = tgt_i
|
|
96
|
-
continue
|
|
97
|
-
|
|
98
|
-
# ---------- HEAP GET ----------
|
|
99
|
-
if tok == b"heap_get":
|
|
100
|
-
if len(stack) < 1:
|
|
101
|
-
return error_expr("low_eval", "underflow")
|
|
102
|
-
key = stack.pop()
|
|
103
|
-
val = heap.get(key) or b""
|
|
104
|
-
# get cost: 1
|
|
105
|
-
if not meter.charge_bytes(1):
|
|
106
|
-
return error_expr("low_eval", "meter limit")
|
|
107
|
-
stack.append(val)
|
|
108
|
-
continue
|
|
109
|
-
|
|
110
|
-
# ---------- HEAP SET ----------
|
|
111
|
-
if tok == b"heap_set":
|
|
112
|
-
if len(stack) < 2:
|
|
113
|
-
return error_expr("low_eval", "underflow")
|
|
114
|
-
val = stack.pop()
|
|
115
|
-
key = stack.pop()
|
|
116
|
-
if not meter.charge_bytes(len(val)):
|
|
117
|
-
return error_expr("low_eval", "meter limit")
|
|
118
|
-
heap[key] = val
|
|
119
|
-
continue
|
|
120
|
-
|
|
121
|
-
# if no opcode matched above, treat token as literal
|
|
122
|
-
# not an opcode → literal blob
|
|
123
|
-
stack.append(tok)
|
astreum/_lispeum/tokenizer.py
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
from typing import List
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
def tokenize(source: str) -> List[str]:
|
|
5
|
-
tokens: List[str] = []
|
|
6
|
-
cur: List[str] = []
|
|
7
|
-
for ch in source:
|
|
8
|
-
if ch.isspace():
|
|
9
|
-
if cur:
|
|
10
|
-
tokens.append("".join(cur))
|
|
11
|
-
cur = []
|
|
12
|
-
continue
|
|
13
|
-
if ch in ("(", ")"):
|
|
14
|
-
if cur:
|
|
15
|
-
tokens.append("".join(cur))
|
|
16
|
-
cur = []
|
|
17
|
-
tokens.append(ch)
|
|
18
|
-
continue
|
|
19
|
-
cur.append(ch)
|
|
20
|
-
if cur:
|
|
21
|
-
tokens.append("".join(cur))
|
|
22
|
-
return tokens
|
astreum/_node.py
DELETED
|
@@ -1,198 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
from pathlib import Path
|
|
3
|
-
from typing import Dict, List, Optional
|
|
4
|
-
import uuid
|
|
5
|
-
import threading
|
|
6
|
-
|
|
7
|
-
from astreum._storage.atom import AtomKind
|
|
8
|
-
|
|
9
|
-
from ._storage import Atom, storage_setup
|
|
10
|
-
from ._lispeum import Env, Expr, Meter, low_eval, parse, tokenize, ParseError
|
|
11
|
-
from .utils.logging import logging_setup
|
|
12
|
-
|
|
13
|
-
__all__ = [
|
|
14
|
-
"Node",
|
|
15
|
-
"Env",
|
|
16
|
-
"Expr",
|
|
17
|
-
"Meter",
|
|
18
|
-
"parse",
|
|
19
|
-
"tokenize",
|
|
20
|
-
]
|
|
21
|
-
|
|
22
|
-
def bytes_touched(*vals: bytes) -> int:
|
|
23
|
-
"""For metering: how many bytes were manipulated (max of operands)."""
|
|
24
|
-
return max((len(v) for v in vals), default=1)
|
|
25
|
-
|
|
26
|
-
class Node:
|
|
27
|
-
def __init__(self, config: dict):
|
|
28
|
-
self.logger = logging_setup(config)
|
|
29
|
-
self.logger.info("Starting Astreum Node")
|
|
30
|
-
# Storage Setup
|
|
31
|
-
storage_setup(self, config=config)
|
|
32
|
-
# Lispeum Setup
|
|
33
|
-
self.environments: Dict[uuid.UUID, Env] = {}
|
|
34
|
-
self.machine_environments_lock = threading.RLock()
|
|
35
|
-
self.low_eval = low_eval
|
|
36
|
-
# Communication and Validation Setup (import lazily to avoid heavy deps during parsing tests)
|
|
37
|
-
try:
|
|
38
|
-
from astreum._communication import communication_setup # type: ignore
|
|
39
|
-
communication_setup(node=self, config=config)
|
|
40
|
-
except Exception:
|
|
41
|
-
pass
|
|
42
|
-
try:
|
|
43
|
-
from astreum._consensus import consensus_setup # type: ignore
|
|
44
|
-
consensus_setup(node=self, config=config)
|
|
45
|
-
except Exception:
|
|
46
|
-
pass
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
# ---- Env helpers ----
|
|
51
|
-
def env_get(self, env_id: uuid.UUID, key: bytes) -> Optional[Expr]:
|
|
52
|
-
cur = self.environments.get(env_id)
|
|
53
|
-
while cur is not None:
|
|
54
|
-
if key in cur.data:
|
|
55
|
-
return cur.data[key]
|
|
56
|
-
cur = self.environments.get(cur.parent_id) if cur.parent_id else None
|
|
57
|
-
return None
|
|
58
|
-
|
|
59
|
-
def env_set(self, env_id: uuid.UUID, key: bytes, value: Expr) -> bool:
|
|
60
|
-
with self.machine_environments_lock:
|
|
61
|
-
env = self.environments.get(env_id)
|
|
62
|
-
if env is None:
|
|
63
|
-
return False
|
|
64
|
-
env.data[key] = value
|
|
65
|
-
return True
|
|
66
|
-
|
|
67
|
-
# Storage
|
|
68
|
-
def _hot_storage_get(self, key: bytes) -> Optional[Atom]:
|
|
69
|
-
atom = self.hot_storage.get(key)
|
|
70
|
-
if atom is not None:
|
|
71
|
-
self.hot_storage_hits[key] = self.hot_storage_hits.get(key, 0) + 1
|
|
72
|
-
return atom
|
|
73
|
-
|
|
74
|
-
def _hot_storage_set(self, key: bytes, value: Atom) -> bool:
|
|
75
|
-
"""Store atom in hot storage without exceeding the configured limit."""
|
|
76
|
-
projected = self.hot_storage_size + value.size
|
|
77
|
-
if projected > self.hot_storage_limit:
|
|
78
|
-
return False
|
|
79
|
-
|
|
80
|
-
self.hot_storage[key] = value
|
|
81
|
-
self.hot_storage_size = projected
|
|
82
|
-
return True
|
|
83
|
-
|
|
84
|
-
def _network_get(self, key: bytes) -> Optional[Atom]:
|
|
85
|
-
# locate storage provider
|
|
86
|
-
# query storage provider
|
|
87
|
-
return None
|
|
88
|
-
|
|
89
|
-
def storage_get(self, key: bytes) -> Optional[Atom]:
|
|
90
|
-
"""Retrieve an Atom by checking local storage first, then the network."""
|
|
91
|
-
atom = self._hot_storage_get(key)
|
|
92
|
-
if atom is not None:
|
|
93
|
-
return atom
|
|
94
|
-
atom = self._cold_storage_get(key)
|
|
95
|
-
if atom is not None:
|
|
96
|
-
return atom
|
|
97
|
-
return self._network_get(key)
|
|
98
|
-
|
|
99
|
-
def _cold_storage_get(self, key: bytes) -> Optional[Atom]:
|
|
100
|
-
"""Read an atom from the cold storage directory if configured."""
|
|
101
|
-
if not self.cold_storage_path:
|
|
102
|
-
return None
|
|
103
|
-
filename = f"{key.hex().upper()}.bin"
|
|
104
|
-
file_path = Path(self.cold_storage_path) / filename
|
|
105
|
-
try:
|
|
106
|
-
data = file_path.read_bytes()
|
|
107
|
-
except FileNotFoundError:
|
|
108
|
-
return None
|
|
109
|
-
except OSError:
|
|
110
|
-
return None
|
|
111
|
-
try:
|
|
112
|
-
return Atom.from_bytes(data)
|
|
113
|
-
except ValueError:
|
|
114
|
-
return None
|
|
115
|
-
|
|
116
|
-
def _cold_storage_set(self, atom: Atom) -> None:
|
|
117
|
-
"""Persist an atom into the cold storage directory if it already exists."""
|
|
118
|
-
if not self.cold_storage_path:
|
|
119
|
-
return
|
|
120
|
-
atom_bytes = atom.to_bytes()
|
|
121
|
-
projected = self.cold_storage_size + len(atom_bytes)
|
|
122
|
-
if self.cold_storage_limit and projected > self.cold_storage_limit:
|
|
123
|
-
return
|
|
124
|
-
directory = Path(self.cold_storage_path)
|
|
125
|
-
if not directory.exists():
|
|
126
|
-
return
|
|
127
|
-
atom_id = atom.object_id()
|
|
128
|
-
filename = f"{atom_id.hex().upper()}.bin"
|
|
129
|
-
file_path = directory / filename
|
|
130
|
-
try:
|
|
131
|
-
file_path.write_bytes(atom_bytes)
|
|
132
|
-
self.cold_storage_size = projected
|
|
133
|
-
except OSError:
|
|
134
|
-
return
|
|
135
|
-
|
|
136
|
-
def _network_set(self, atom: Atom) -> None:
|
|
137
|
-
"""Advertise an atom to the closest known peer so they can fetch it from us."""
|
|
138
|
-
try:
|
|
139
|
-
from ._communication.message import Message, MessageTopic
|
|
140
|
-
except Exception:
|
|
141
|
-
return
|
|
142
|
-
|
|
143
|
-
atom_id = atom.object_id()
|
|
144
|
-
try:
|
|
145
|
-
closest_peer = self.peer_route.closest_peer_for_hash(atom_id)
|
|
146
|
-
except Exception:
|
|
147
|
-
return
|
|
148
|
-
if closest_peer is None or closest_peer.address is None:
|
|
149
|
-
return
|
|
150
|
-
target_addr = closest_peer.address
|
|
151
|
-
|
|
152
|
-
try:
|
|
153
|
-
provider_ip, provider_port = self.incoming_socket.getsockname()[:2]
|
|
154
|
-
except Exception:
|
|
155
|
-
return
|
|
156
|
-
|
|
157
|
-
provider_str = f"{provider_ip}:{int(provider_port)}"
|
|
158
|
-
try:
|
|
159
|
-
provider_bytes = provider_str.encode("utf-8")
|
|
160
|
-
except Exception:
|
|
161
|
-
return
|
|
162
|
-
|
|
163
|
-
payload = atom_id + provider_bytes
|
|
164
|
-
message = Message(topic=MessageTopic.STORAGE_REQUEST, content=payload)
|
|
165
|
-
self.outgoing_queue.put((message.to_bytes(), target_addr))
|
|
166
|
-
|
|
167
|
-
def get_expr_list_from_storage(self, key: bytes) -> Optional["ListExpr"]:
|
|
168
|
-
atoms = self.get_atom_list_from_storage(root_hash=key)
|
|
169
|
-
if atoms is None:
|
|
170
|
-
return None
|
|
171
|
-
|
|
172
|
-
expr_list = []
|
|
173
|
-
for atom in atoms:
|
|
174
|
-
match atom.kind:
|
|
175
|
-
case AtomKind.SYMBOL:
|
|
176
|
-
expr_list.append(Expr.Symbol(atom.data))
|
|
177
|
-
case AtomKind.BYTES:
|
|
178
|
-
expr_list.append(Expr.Bytes(atom.data))
|
|
179
|
-
case AtomKind.LIST:
|
|
180
|
-
expr_list.append(Expr.ListExpr([
|
|
181
|
-
Expr.Bytes(atom.data),
|
|
182
|
-
Expr.Symbol("ref")
|
|
183
|
-
]))
|
|
184
|
-
|
|
185
|
-
expr_list.reverse()
|
|
186
|
-
return Expr.ListExpr(expr_list)
|
|
187
|
-
|
|
188
|
-
def get_atom_list_from_storage(self, root_hash: bytes) -> Optional[List["Atom"]]:
|
|
189
|
-
next_id: Optional[bytes] = root_hash
|
|
190
|
-
atom_list: List["Atom"] = []
|
|
191
|
-
while next_id:
|
|
192
|
-
elem = self.storage_get(key=next_id)
|
|
193
|
-
if elem:
|
|
194
|
-
atom_list.append(elem)
|
|
195
|
-
next_id = elem.next
|
|
196
|
-
else:
|
|
197
|
-
return None
|
|
198
|
-
return atom_list
|
astreum/_storage/__init__.py
DELETED
astreum/_storage/setup.py
DELETED
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from pathlib import Path
|
|
4
|
-
from typing import Any
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
def storage_setup(node: Any, config: dict) -> None:
|
|
8
|
-
"""Initialize hot/cold storage helpers on the node."""
|
|
9
|
-
|
|
10
|
-
node.hot_storage = {}
|
|
11
|
-
node.hot_storage_hits = {}
|
|
12
|
-
node.storage_index = {}
|
|
13
|
-
node.hot_storage_size = 0
|
|
14
|
-
hot_storage_default_limit = 1 << 30 # 1 GiB
|
|
15
|
-
hot_storage_limit_value = config.get("hot_storage_limit", hot_storage_default_limit)
|
|
16
|
-
try:
|
|
17
|
-
node.hot_storage_limit = int(hot_storage_limit_value)
|
|
18
|
-
except (TypeError, ValueError):
|
|
19
|
-
node.hot_storage_limit = hot_storage_default_limit
|
|
20
|
-
|
|
21
|
-
node.cold_storage_size = 0
|
|
22
|
-
cold_storage_default_limit = 10 << 30 # 10 GiB
|
|
23
|
-
cold_storage_limit_value = config.get("cold_storage_limit", cold_storage_default_limit)
|
|
24
|
-
try:
|
|
25
|
-
node.cold_storage_limit = int(cold_storage_limit_value)
|
|
26
|
-
except (TypeError, ValueError):
|
|
27
|
-
node.cold_storage_limit = cold_storage_default_limit
|
|
28
|
-
|
|
29
|
-
cold_storage_path = config.get("cold_storage_path")
|
|
30
|
-
if cold_storage_path:
|
|
31
|
-
try:
|
|
32
|
-
Path(cold_storage_path).mkdir(parents=True, exist_ok=True)
|
|
33
|
-
except OSError:
|
|
34
|
-
cold_storage_path = None
|
|
35
|
-
node.cold_storage_path = cold_storage_path
|
astreum/format.py
DELETED
|
@@ -1,75 +0,0 @@
|
|
|
1
|
-
def encode(iterable, encoder=lambda x: x):
|
|
2
|
-
"""
|
|
3
|
-
Encode an iterable of items into a single bytes object, applying the
|
|
4
|
-
provided encoder function to each item to convert it into bytes.
|
|
5
|
-
|
|
6
|
-
For each item (after applying encoder), we:
|
|
7
|
-
- Determine its length (n)
|
|
8
|
-
- Write a single byte to indicate how many bytes were used for the length:
|
|
9
|
-
0: n == 0
|
|
10
|
-
1: n <= 255
|
|
11
|
-
2: 256 <= n <= 65535
|
|
12
|
-
4: 65536 <= n <= 4294967295
|
|
13
|
-
8: n > 4294967295
|
|
14
|
-
- Write the little-endian encoding of n in the specified number of bytes (if n > 0)
|
|
15
|
-
- Append the item’s data
|
|
16
|
-
"""
|
|
17
|
-
result = bytearray()
|
|
18
|
-
for item in iterable:
|
|
19
|
-
item_bytes = encoder(item)
|
|
20
|
-
n = len(item_bytes)
|
|
21
|
-
if n > 4294967295:
|
|
22
|
-
result.append(8)
|
|
23
|
-
result.extend(n.to_bytes(8, byteorder='little'))
|
|
24
|
-
elif n > 65535:
|
|
25
|
-
result.append(4)
|
|
26
|
-
result.extend(n.to_bytes(4, byteorder='little'))
|
|
27
|
-
elif n > 255:
|
|
28
|
-
result.append(2)
|
|
29
|
-
result.extend(n.to_bytes(2, byteorder='little'))
|
|
30
|
-
elif n > 0:
|
|
31
|
-
result.append(1)
|
|
32
|
-
result.extend(n.to_bytes(1, byteorder='little'))
|
|
33
|
-
else:
|
|
34
|
-
result.append(0)
|
|
35
|
-
result.extend(item_bytes)
|
|
36
|
-
return bytes(result)
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
def decode(buffer, decoder=lambda x: x):
|
|
40
|
-
"""
|
|
41
|
-
Decode a bytes buffer into a list of items, applying the provided decoder
|
|
42
|
-
function to convert the bytes into the desired type.
|
|
43
|
-
|
|
44
|
-
The buffer is read sequentially:
|
|
45
|
-
- Read one byte that indicates how many bytes were used for the length.
|
|
46
|
-
- If this value is 0, then the item is empty.
|
|
47
|
-
- Otherwise, read that many bytes to get the item's length (as a little-endian integer).
|
|
48
|
-
- Then, slice the next 'length' bytes from the buffer to get the item data.
|
|
49
|
-
- Apply the decoder function to the item data before appending it.
|
|
50
|
-
|
|
51
|
-
By default, the decoder is the identity function, so items are returned as bytes.
|
|
52
|
-
"""
|
|
53
|
-
decoded_data = []
|
|
54
|
-
offset = 0
|
|
55
|
-
buf_len = len(buffer)
|
|
56
|
-
|
|
57
|
-
while offset < buf_len:
|
|
58
|
-
length_type = buffer[offset]
|
|
59
|
-
offset += 1
|
|
60
|
-
|
|
61
|
-
if length_type == 0:
|
|
62
|
-
n = 0
|
|
63
|
-
else:
|
|
64
|
-
if offset + length_type > buf_len:
|
|
65
|
-
raise ValueError("Buffer too short for length field")
|
|
66
|
-
n = int.from_bytes(buffer[offset: offset+length_type], byteorder='little')
|
|
67
|
-
offset += length_type
|
|
68
|
-
|
|
69
|
-
if offset + n > buf_len:
|
|
70
|
-
raise ValueError("Buffer is too short for item data")
|
|
71
|
-
item_data = buffer[offset:offset+n]
|
|
72
|
-
offset += n
|
|
73
|
-
decoded_data.append(decoder(item_data))
|
|
74
|
-
|
|
75
|
-
return decoded_data
|