astreum 0.2.41__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- astreum/__init__.py +16 -7
- astreum/{_communication → communication}/__init__.py +3 -3
- astreum/communication/handlers/handshake.py +83 -0
- astreum/communication/handlers/ping.py +48 -0
- astreum/communication/handlers/storage_request.py +81 -0
- astreum/communication/models/__init__.py +0 -0
- astreum/{_communication → communication/models}/message.py +1 -0
- astreum/communication/models/peer.py +23 -0
- astreum/{_communication → communication/models}/route.py +45 -8
- astreum/{_communication → communication}/setup.py +46 -95
- astreum/communication/start.py +38 -0
- astreum/consensus/__init__.py +20 -0
- astreum/consensus/genesis.py +66 -0
- astreum/consensus/models/__init__.py +0 -0
- astreum/consensus/models/account.py +84 -0
- astreum/consensus/models/accounts.py +72 -0
- astreum/consensus/models/block.py +364 -0
- astreum/{_consensus → consensus/models}/chain.py +7 -7
- astreum/{_consensus → consensus/models}/fork.py +8 -8
- astreum/consensus/models/receipt.py +98 -0
- astreum/consensus/models/transaction.py +213 -0
- astreum/{_consensus → consensus}/setup.py +26 -11
- astreum/consensus/start.py +68 -0
- astreum/consensus/validator.py +95 -0
- astreum/{_consensus → consensus}/workers/discovery.py +20 -1
- astreum/consensus/workers/validation.py +291 -0
- astreum/{_consensus → consensus}/workers/verify.py +32 -3
- astreum/machine/__init__.py +20 -0
- astreum/machine/evaluations/__init__.py +0 -0
- astreum/machine/evaluations/high_evaluation.py +237 -0
- astreum/machine/evaluations/low_evaluation.py +281 -0
- astreum/machine/evaluations/script_evaluation.py +27 -0
- astreum/machine/models/__init__.py +0 -0
- astreum/machine/models/environment.py +31 -0
- astreum/machine/models/expression.py +218 -0
- astreum/{_lispeum → machine}/parser.py +26 -31
- astreum/machine/tokenizer.py +90 -0
- astreum/node.py +73 -781
- astreum/storage/__init__.py +7 -0
- astreum/storage/actions/get.py +69 -0
- astreum/storage/actions/set.py +132 -0
- astreum/storage/models/atom.py +107 -0
- astreum/{_storage/patricia.py → storage/models/trie.py} +236 -177
- astreum/storage/setup.py +44 -15
- astreum/utils/bytes.py +24 -0
- astreum/utils/integer.py +25 -0
- astreum/utils/logging.py +219 -0
- astreum-0.3.1.dist-info/METADATA +160 -0
- astreum-0.3.1.dist-info/RECORD +62 -0
- astreum/_communication/peer.py +0 -11
- astreum/_consensus/__init__.py +0 -20
- astreum/_consensus/account.py +0 -170
- astreum/_consensus/accounts.py +0 -67
- astreum/_consensus/block.py +0 -328
- astreum/_consensus/genesis.py +0 -141
- astreum/_consensus/receipt.py +0 -177
- astreum/_consensus/transaction.py +0 -192
- astreum/_consensus/workers/validation.py +0 -122
- astreum/_lispeum/__init__.py +0 -16
- astreum/_lispeum/environment.py +0 -13
- astreum/_lispeum/expression.py +0 -37
- astreum/_lispeum/high_evaluation.py +0 -177
- astreum/_lispeum/low_evaluation.py +0 -123
- astreum/_lispeum/tokenizer.py +0 -22
- astreum/_node.py +0 -58
- astreum/_storage/__init__.py +0 -5
- astreum/_storage/atom.py +0 -117
- astreum/format.py +0 -75
- astreum/models/block.py +0 -441
- astreum/models/merkle.py +0 -205
- astreum/models/patricia.py +0 -393
- astreum/storage/object.py +0 -68
- astreum-0.2.41.dist-info/METADATA +0 -146
- astreum-0.2.41.dist-info/RECORD +0 -53
- /astreum/{models → communication/handlers}/__init__.py +0 -0
- /astreum/{_communication → communication/models}/ping.py +0 -0
- /astreum/{_communication → communication}/util.py +0 -0
- /astreum/{_consensus → consensus}/workers/__init__.py +0 -0
- /astreum/{_lispeum → machine/models}/meter.py +0 -0
- {astreum-0.2.41.dist-info → astreum-0.3.1.dist-info}/WHEEL +0 -0
- {astreum-0.2.41.dist-info → astreum-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {astreum-0.2.41.dist-info → astreum-0.3.1.dist-info}/top_level.txt +0 -0
|
@@ -1,177 +0,0 @@
|
|
|
1
|
-
from typing import List, Union
|
|
2
|
-
import uuid
|
|
3
|
-
|
|
4
|
-
from .environment import Env
|
|
5
|
-
from .expression import Expr
|
|
6
|
-
from .meter import Meter
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def high_eval(self, env_id: uuid.UUID, expr: Expr, meter = None) -> Expr:
|
|
10
|
-
|
|
11
|
-
if meter is None:
|
|
12
|
-
meter = Meter()
|
|
13
|
-
|
|
14
|
-
# ---------- atoms ----------
|
|
15
|
-
if isinstance(expr, Expr.Error):
|
|
16
|
-
return expr
|
|
17
|
-
|
|
18
|
-
if isinstance(expr, Expr.Symbol):
|
|
19
|
-
bound = self.env_get(env_id, expr.value.encode())
|
|
20
|
-
if bound is None:
|
|
21
|
-
return Expr.Error(f"unbound symbol '{expr.value}'", origin=expr)
|
|
22
|
-
return bound
|
|
23
|
-
|
|
24
|
-
if not isinstance(expr, Expr.ListExpr):
|
|
25
|
-
return expr # Expr.Byte or other literals passthrough
|
|
26
|
-
|
|
27
|
-
# ---------- empty / single ----------
|
|
28
|
-
if len(expr.elements) == 0:
|
|
29
|
-
return expr
|
|
30
|
-
if len(expr.elements) == 1:
|
|
31
|
-
return self.high_eval(env_id=env_id, expr=expr.elements[0], meter=meter)
|
|
32
|
-
|
|
33
|
-
tail = expr.elements[-1]
|
|
34
|
-
|
|
35
|
-
# ---------- (value name def) ----------
|
|
36
|
-
if isinstance(tail, Expr.Symbol) and tail.value == "def":
|
|
37
|
-
if len(expr.elements) < 3:
|
|
38
|
-
return Expr.Error("def expects (value name def)", origin=expr)
|
|
39
|
-
name_e = expr.elements[-2]
|
|
40
|
-
if not isinstance(name_e, Expr.Symbol):
|
|
41
|
-
return Expr.Error("def name must be symbol", origin=name_e)
|
|
42
|
-
value_e = expr.elements[-3]
|
|
43
|
-
value_res = self.high_eval(env_id=env_id, expr=value_e, meter=meter)
|
|
44
|
-
if isinstance(value_res, Expr.Error):
|
|
45
|
-
return value_res
|
|
46
|
-
self.env_set(env_id, name_e.value.encode(), value_res)
|
|
47
|
-
return value_res
|
|
48
|
-
|
|
49
|
-
# ---- LOW-LEVEL call: ( arg1 arg2 ... ( (body) sk ) ) ----
|
|
50
|
-
if isinstance(tail, Expr.ListExpr):
|
|
51
|
-
inner = tail.elements
|
|
52
|
-
if len(inner) >= 2 and isinstance(inner[-1], Expr.Symbol) and inner[-1].value == "sk":
|
|
53
|
-
body_expr = inner[-2]
|
|
54
|
-
if not isinstance(body_expr, Expr.ListExpr):
|
|
55
|
-
return Expr.Error("sk body must be list", origin=body_expr)
|
|
56
|
-
|
|
57
|
-
# helper: turn an Expr into a contiguous bytes buffer
|
|
58
|
-
def to_bytes(v: Expr) -> Union[bytes, Expr.Error]:
|
|
59
|
-
if isinstance(v, Expr.Byte):
|
|
60
|
-
return bytes([v.value & 0xFF])
|
|
61
|
-
if isinstance(v, Expr.ListExpr):
|
|
62
|
-
# expect a list of Expr.Byte
|
|
63
|
-
out: bytearray = bytearray()
|
|
64
|
-
for el in v.elements:
|
|
65
|
-
if isinstance(el, Expr.Byte):
|
|
66
|
-
out.append(el.value & 0xFF)
|
|
67
|
-
else:
|
|
68
|
-
return Expr.Error("byte list must contain only Byte", origin=el)
|
|
69
|
-
return bytes(out)
|
|
70
|
-
if isinstance(v, Expr.Error):
|
|
71
|
-
return v
|
|
72
|
-
return Expr.Error("argument must resolve to Byte or (Byte ...)", origin=v)
|
|
73
|
-
|
|
74
|
-
# resolve ALL preceding args into bytes (can be Byte or List[Byte])
|
|
75
|
-
args_exprs = expr.elements[:-1]
|
|
76
|
-
arg_bytes: List[bytes] = []
|
|
77
|
-
for a in args_exprs:
|
|
78
|
-
v = self.high_eval(env_id=env_id, expr=a, meter=meter)
|
|
79
|
-
if isinstance(v, Expr.Error):
|
|
80
|
-
return v
|
|
81
|
-
vb = to_bytes(v)
|
|
82
|
-
if isinstance(vb, Expr.Error):
|
|
83
|
-
return vb
|
|
84
|
-
arg_bytes.append(vb)
|
|
85
|
-
|
|
86
|
-
# build low-level code with $0-based placeholders ($0 = first arg)
|
|
87
|
-
code: List[bytes] = []
|
|
88
|
-
|
|
89
|
-
def emit(tok: Expr) -> Union[None, Expr.Error]:
|
|
90
|
-
if isinstance(tok, Expr.Symbol):
|
|
91
|
-
name = tok.value
|
|
92
|
-
if name.startswith("$"):
|
|
93
|
-
idx_s = name[1:]
|
|
94
|
-
if not idx_s.isdigit():
|
|
95
|
-
return Expr.Error("invalid sk placeholder", origin=tok)
|
|
96
|
-
idx = int(idx_s) # $0 is first
|
|
97
|
-
if idx < 0 or idx >= len(arg_bytes):
|
|
98
|
-
return Expr.Error("arity mismatch in sk placeholder", origin=tok)
|
|
99
|
-
code.append(arg_bytes[idx])
|
|
100
|
-
return None
|
|
101
|
-
code.append(name.encode())
|
|
102
|
-
return None
|
|
103
|
-
|
|
104
|
-
if isinstance(tok, Expr.Byte):
|
|
105
|
-
code.append(bytes([tok.value & 0xFF]))
|
|
106
|
-
return None
|
|
107
|
-
|
|
108
|
-
if isinstance(tok, Expr.ListExpr):
|
|
109
|
-
rv = self.high_eval(env_id, tok, meter=meter)
|
|
110
|
-
if isinstance(rv, Expr.Error):
|
|
111
|
-
return rv
|
|
112
|
-
rb = to_bytes(rv)
|
|
113
|
-
if isinstance(rb, Expr.Error):
|
|
114
|
-
return rb
|
|
115
|
-
code.append(rb)
|
|
116
|
-
return None
|
|
117
|
-
|
|
118
|
-
if isinstance(tok, Expr.Error):
|
|
119
|
-
return tok
|
|
120
|
-
|
|
121
|
-
return Expr.Error("invalid token in sk body", origin=tok)
|
|
122
|
-
|
|
123
|
-
for t in body_expr.elements:
|
|
124
|
-
err = emit(t)
|
|
125
|
-
if isinstance(err, Expr.Error):
|
|
126
|
-
return err
|
|
127
|
-
|
|
128
|
-
# Execute low-level code built from sk-body using the caller's meter
|
|
129
|
-
res = self.low_eval(code, meter=meter)
|
|
130
|
-
return res
|
|
131
|
-
|
|
132
|
-
# ---------- (... (body params fn)) HIGH-LEVEL CALL ----------
|
|
133
|
-
if isinstance(tail, Expr.ListExpr):
|
|
134
|
-
fn_form = tail
|
|
135
|
-
if (len(fn_form.elements) >= 3
|
|
136
|
-
and isinstance(fn_form.elements[-1], Expr.Symbol)
|
|
137
|
-
and fn_form.elements[-1].value == "fn"):
|
|
138
|
-
|
|
139
|
-
body_expr = fn_form.elements[-3]
|
|
140
|
-
params_expr = fn_form.elements[-2]
|
|
141
|
-
|
|
142
|
-
if not isinstance(body_expr, Expr.ListExpr):
|
|
143
|
-
return Expr.Error("fn body must be list", origin=body_expr)
|
|
144
|
-
if not isinstance(params_expr, Expr.ListExpr):
|
|
145
|
-
return Expr.Error("fn params must be list", origin=params_expr)
|
|
146
|
-
|
|
147
|
-
params: List[bytes] = []
|
|
148
|
-
for p in params_expr.elements:
|
|
149
|
-
if not isinstance(p, Expr.Symbol):
|
|
150
|
-
return Expr.Error("fn param must be symbol", origin=p)
|
|
151
|
-
params.append(p.value.encode())
|
|
152
|
-
|
|
153
|
-
args_exprs = expr.elements[:-1]
|
|
154
|
-
if len(args_exprs) != len(params):
|
|
155
|
-
return Expr.Error("arity mismatch", origin=expr)
|
|
156
|
-
|
|
157
|
-
arg_bytes: List[bytes] = []
|
|
158
|
-
for a in args_exprs:
|
|
159
|
-
v = self.high_eval(env_id, a, meter=meter)
|
|
160
|
-
if isinstance(v, Expr.Error):
|
|
161
|
-
return v
|
|
162
|
-
if not isinstance(v, Expr.Byte):
|
|
163
|
-
return Expr.Error("argument must resolve to Byte", origin=a)
|
|
164
|
-
arg_bytes.append(bytes([v.value & 0xFF]))
|
|
165
|
-
|
|
166
|
-
# child env, bind params -> Expr.Byte
|
|
167
|
-
child_env = uuid.uuid4()
|
|
168
|
-
self.environments[child_env] = Env(parent_id=env_id)
|
|
169
|
-
for name_b, val_b in zip(params, arg_bytes):
|
|
170
|
-
self.env_set(child_env, name_b, Expr.Byte(val_b[0]))
|
|
171
|
-
|
|
172
|
-
# evaluate HL body, metered from the top
|
|
173
|
-
return self.high_eval(child_env, body_expr, meter=meter)
|
|
174
|
-
|
|
175
|
-
# ---------- default: resolve each element and return list ----------
|
|
176
|
-
resolved: List[Expr] = [self.high_eval(env_id, e, meter=meter) for e in expr.elements]
|
|
177
|
-
return Expr.ListExpr(resolved)
|
|
@@ -1,123 +0,0 @@
|
|
|
1
|
-
from typing import Dict, List, Union
|
|
2
|
-
from .expression import Expr
|
|
3
|
-
from .meter import Meter
|
|
4
|
-
|
|
5
|
-
def tc_to_int(b: bytes) -> int:
|
|
6
|
-
"""bytes -> int using two's complement (width = len(b)*8)."""
|
|
7
|
-
if not b:
|
|
8
|
-
return 0
|
|
9
|
-
return int.from_bytes(b, "big", signed=True)
|
|
10
|
-
|
|
11
|
-
def int_to_tc(n: int, width_bytes: int) -> bytes:
|
|
12
|
-
"""int -> bytes (two's complement, fixed width)."""
|
|
13
|
-
if width_bytes <= 0:
|
|
14
|
-
return b"\x00"
|
|
15
|
-
return n.to_bytes(width_bytes, "big", signed=True)
|
|
16
|
-
|
|
17
|
-
def min_tc_width(n: int) -> int:
|
|
18
|
-
"""minimum bytes to store n in two's complement."""
|
|
19
|
-
if n == 0:
|
|
20
|
-
return 1
|
|
21
|
-
w = 1
|
|
22
|
-
while True:
|
|
23
|
-
try:
|
|
24
|
-
n.to_bytes(w, "big", signed=True)
|
|
25
|
-
return w
|
|
26
|
-
except OverflowError:
|
|
27
|
-
w += 1
|
|
28
|
-
|
|
29
|
-
def nand_bytes(a: bytes, b: bytes) -> bytes:
|
|
30
|
-
"""Bitwise NAND on two byte strings, zero-extending to max width."""
|
|
31
|
-
w = max(len(a), len(b), 1)
|
|
32
|
-
au = int.from_bytes(a.rjust(w, b"\x00"), "big", signed=False)
|
|
33
|
-
bu = int.from_bytes(b.rjust(w, b"\x00"), "big", signed=False)
|
|
34
|
-
mask = (1 << (w * 8)) - 1
|
|
35
|
-
resu = (~(au & bu)) & mask
|
|
36
|
-
return resu.to_bytes(w, "big", signed=False)
|
|
37
|
-
|
|
38
|
-
def low_eval(self, code: List[bytes], meter: Meter) -> Expr:
|
|
39
|
-
|
|
40
|
-
heap: Dict[bytes, bytes] = {}
|
|
41
|
-
|
|
42
|
-
stack: List[bytes] = []
|
|
43
|
-
pc = 0
|
|
44
|
-
|
|
45
|
-
while True:
|
|
46
|
-
if pc >= len(code):
|
|
47
|
-
if len(stack) != 1:
|
|
48
|
-
return Expr.Error("bad stack")
|
|
49
|
-
# wrap successful result as an Expr.Bytes
|
|
50
|
-
return Expr.Bytes(stack.pop())
|
|
51
|
-
|
|
52
|
-
tok = code[pc]
|
|
53
|
-
pc += 1
|
|
54
|
-
|
|
55
|
-
# ---------- ADD ----------
|
|
56
|
-
if tok == b"add":
|
|
57
|
-
if len(stack) < 2:
|
|
58
|
-
return Expr.Error("underflow")
|
|
59
|
-
b_b = stack.pop()
|
|
60
|
-
a_b = stack.pop()
|
|
61
|
-
a_i = tc_to_int(a_b)
|
|
62
|
-
b_i = tc_to_int(b_b)
|
|
63
|
-
res_i = a_i + b_i
|
|
64
|
-
width = max(len(a_b), len(b_b), min_tc_width(res_i))
|
|
65
|
-
res_b = int_to_tc(res_i, width)
|
|
66
|
-
# charge for both operands' byte widths
|
|
67
|
-
if not meter.charge_bytes(len(a_b) + len(b_b)):
|
|
68
|
-
return Expr.Error("meter limit")
|
|
69
|
-
stack.append(res_b)
|
|
70
|
-
continue
|
|
71
|
-
|
|
72
|
-
# ---------- NAND ----------
|
|
73
|
-
if tok == b"nand":
|
|
74
|
-
if len(stack) < 2:
|
|
75
|
-
return Expr.Error("underflow")
|
|
76
|
-
b_b = stack.pop()
|
|
77
|
-
a_b = stack.pop()
|
|
78
|
-
res_b = nand_bytes(a_b, b_b)
|
|
79
|
-
# bitwise cost: 2 * max(len(a), len(b))
|
|
80
|
-
if not meter.charge_bytes(2 * max(len(a_b), len(b_b), 1)):
|
|
81
|
-
return Expr.Error("meter limit")
|
|
82
|
-
stack.append(res_b)
|
|
83
|
-
continue
|
|
84
|
-
|
|
85
|
-
# ---------- JUMP ----------
|
|
86
|
-
if tok == b"jump":
|
|
87
|
-
if len(stack) < 1:
|
|
88
|
-
return Expr.Error("underflow")
|
|
89
|
-
tgt_b = stack.pop()
|
|
90
|
-
if not meter.charge_bytes(1):
|
|
91
|
-
return Expr.Error("meter limit")
|
|
92
|
-
tgt_i = tc_to_int(tgt_b)
|
|
93
|
-
if tgt_i < 0 or tgt_i >= len(code):
|
|
94
|
-
return Expr.Error("bad jump")
|
|
95
|
-
pc = tgt_i
|
|
96
|
-
continue
|
|
97
|
-
|
|
98
|
-
# ---------- HEAP GET ----------
|
|
99
|
-
if tok == b"heap_get":
|
|
100
|
-
if len(stack) < 1:
|
|
101
|
-
return Expr.Error("underflow")
|
|
102
|
-
key = stack.pop()
|
|
103
|
-
val = heap.get(key) or b""
|
|
104
|
-
# get cost: 1
|
|
105
|
-
if not meter.charge_bytes(1):
|
|
106
|
-
return Expr.Error("meter limit")
|
|
107
|
-
stack.append(val)
|
|
108
|
-
continue
|
|
109
|
-
|
|
110
|
-
# ---------- HEAP SET ----------
|
|
111
|
-
if tok == b"heap_set":
|
|
112
|
-
if len(stack) < 2:
|
|
113
|
-
return Expr.Error("underflow")
|
|
114
|
-
val = stack.pop()
|
|
115
|
-
key = stack.pop()
|
|
116
|
-
if not meter.charge_bytes(len(val)):
|
|
117
|
-
return Expr.Error("meter limit")
|
|
118
|
-
heap[key] = val
|
|
119
|
-
continue
|
|
120
|
-
|
|
121
|
-
# if no opcode matched above, treat token as literal
|
|
122
|
-
# not an opcode → literal blob
|
|
123
|
-
stack.append(tok)
|
astreum/_lispeum/tokenizer.py
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
from typing import List
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
def tokenize(source: str) -> List[str]:
|
|
5
|
-
tokens: List[str] = []
|
|
6
|
-
cur: List[str] = []
|
|
7
|
-
for ch in source:
|
|
8
|
-
if ch.isspace():
|
|
9
|
-
if cur:
|
|
10
|
-
tokens.append("".join(cur))
|
|
11
|
-
cur = []
|
|
12
|
-
continue
|
|
13
|
-
if ch in ("(", ")"):
|
|
14
|
-
if cur:
|
|
15
|
-
tokens.append("".join(cur))
|
|
16
|
-
cur = []
|
|
17
|
-
tokens.append(ch)
|
|
18
|
-
continue
|
|
19
|
-
cur.append(ch)
|
|
20
|
-
if cur:
|
|
21
|
-
tokens.append("".join(cur))
|
|
22
|
-
return tokens
|
astreum/_node.py
DELETED
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
from typing import Dict, Optional
|
|
3
|
-
import uuid
|
|
4
|
-
import threading
|
|
5
|
-
|
|
6
|
-
from src.astreum._storage.atom import Atom
|
|
7
|
-
from src.astreum._lispeum import Env, Expr, low_eval, parse, tokenize, ParseError
|
|
8
|
-
|
|
9
|
-
def bytes_touched(*vals: bytes) -> int:
|
|
10
|
-
"""For metering: how many bytes were manipulated (max of operands)."""
|
|
11
|
-
return max((len(v) for v in vals), default=1)
|
|
12
|
-
|
|
13
|
-
class Node:
|
|
14
|
-
def __init__(self, config: dict):
|
|
15
|
-
# Storage Setup
|
|
16
|
-
self.in_memory_storage: Dict[bytes, Atom] = {}
|
|
17
|
-
self.in_memory_storage_lock = threading.RLock()
|
|
18
|
-
# Lispeum Setup
|
|
19
|
-
self.environments: Dict[uuid.UUID, Env] = {}
|
|
20
|
-
self.machine_environments_lock = threading.RLock()
|
|
21
|
-
self.low_eval = low_eval
|
|
22
|
-
# Communication and Validation Setup (import lazily to avoid heavy deps during parsing tests)
|
|
23
|
-
try:
|
|
24
|
-
from astreum._communication import communication_setup # type: ignore
|
|
25
|
-
communication_setup(node=self, config=config)
|
|
26
|
-
except Exception:
|
|
27
|
-
pass
|
|
28
|
-
try:
|
|
29
|
-
from astreum._consensus import consensus_setup # type: ignore
|
|
30
|
-
consensus_setup(node=self)
|
|
31
|
-
except Exception:
|
|
32
|
-
pass
|
|
33
|
-
|
|
34
|
-
# ---- Env helpers ----
|
|
35
|
-
def env_get(self, env_id: uuid.UUID, key: bytes) -> Optional[Expr]:
|
|
36
|
-
cur = self.environments.get(env_id)
|
|
37
|
-
while cur is not None:
|
|
38
|
-
if key in cur.data:
|
|
39
|
-
return cur.data[key]
|
|
40
|
-
cur = self.environments.get(cur.parent_id) if cur.parent_id else None
|
|
41
|
-
return None
|
|
42
|
-
|
|
43
|
-
def env_set(self, env_id: uuid.UUID, key: bytes, value: Expr) -> bool:
|
|
44
|
-
with self.machine_environments_lock:
|
|
45
|
-
env = self.environments.get(env_id)
|
|
46
|
-
if env is None:
|
|
47
|
-
return False
|
|
48
|
-
env.data[key] = value
|
|
49
|
-
return True
|
|
50
|
-
|
|
51
|
-
# Storage
|
|
52
|
-
def _local_get(self, key: bytes) -> Optional[Atom]:
|
|
53
|
-
with self.in_memory_storage_lock:
|
|
54
|
-
return self.in_memory_storage.get(key)
|
|
55
|
-
|
|
56
|
-
def _local_set(self, key: bytes, value: Atom) -> None:
|
|
57
|
-
with self.in_memory_storage_lock:
|
|
58
|
-
self.in_memory_storage[key] = value
|
astreum/_storage/__init__.py
DELETED
astreum/_storage/atom.py
DELETED
|
@@ -1,117 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
from typing import List, Optional, Tuple
|
|
4
|
-
|
|
5
|
-
from .._lispeum.expression import Expr
|
|
6
|
-
from blake3 import blake3
|
|
7
|
-
|
|
8
|
-
ZERO32 = b"\x00"*32
|
|
9
|
-
|
|
10
|
-
def u64_le(n: int) -> bytes:
|
|
11
|
-
return int(n).to_bytes(8, "little", signed=False)
|
|
12
|
-
|
|
13
|
-
def hash_bytes(b: bytes) -> bytes:
|
|
14
|
-
return blake3(b).digest()
|
|
15
|
-
|
|
16
|
-
class Atom:
|
|
17
|
-
data: bytes
|
|
18
|
-
next: bytes
|
|
19
|
-
size: int
|
|
20
|
-
|
|
21
|
-
def __init__(self, data: bytes, next: bytes = ZERO32, size: Optional[int] = None):
|
|
22
|
-
self.data = data
|
|
23
|
-
self.next = next
|
|
24
|
-
self.size = len(data) if size is None else size
|
|
25
|
-
|
|
26
|
-
@staticmethod
|
|
27
|
-
def from_data(data: bytes, next_hash: bytes = ZERO32) -> "Atom":
|
|
28
|
-
return Atom(data=data, next=next_hash, size=len(data))
|
|
29
|
-
|
|
30
|
-
@staticmethod
|
|
31
|
-
def object_id_from_parts(data_hash: bytes, next_hash: bytes, size: int) -> bytes:
|
|
32
|
-
return blake3(data_hash + next_hash + u64_le(size)).digest()
|
|
33
|
-
|
|
34
|
-
def data_hash(self) -> bytes:
|
|
35
|
-
return hash_bytes(self.data)
|
|
36
|
-
|
|
37
|
-
def object_id(self) -> bytes:
|
|
38
|
-
return self.object_id_from_parts(self.data_hash(), self.next, self.size)
|
|
39
|
-
|
|
40
|
-
@staticmethod
|
|
41
|
-
def verify_metadata(object_id: bytes, size: int, next_hash: bytes, data_hash: bytes) -> bool:
|
|
42
|
-
return object_id == blake3(data_hash + next_hash + u64_le(size)).digest()
|
|
43
|
-
|
|
44
|
-
def to_bytes(self) -> bytes:
|
|
45
|
-
return self.next + self.data
|
|
46
|
-
|
|
47
|
-
@staticmethod
|
|
48
|
-
def from_bytes(buf: bytes) -> "Atom":
|
|
49
|
-
if len(buf) < len(ZERO32):
|
|
50
|
-
raise ValueError("buffer too short for Atom header")
|
|
51
|
-
next_hash = buf[:len(ZERO32)]
|
|
52
|
-
data = buf[len(ZERO32):]
|
|
53
|
-
return Atom(data=data, next=next_hash, size=len(data))
|
|
54
|
-
|
|
55
|
-
def expr_to_atoms(e: Expr) -> Tuple[bytes, List[Atom]]:
|
|
56
|
-
def symbol(value: str) -> Tuple[bytes, List[Atom]]:
|
|
57
|
-
val = value.encode("utf-8")
|
|
58
|
-
val_atom = Atom.from_data(data=val)
|
|
59
|
-
typ_atom = Atom.from_data(b"symbol", val_atom.object_id())
|
|
60
|
-
return typ_atom.object_id(), [val_atom, typ_atom]
|
|
61
|
-
|
|
62
|
-
def bytes(data: bytes) -> Tuple[bytes, List[Atom]]:
|
|
63
|
-
val_atom = Atom.from_data(data=data)
|
|
64
|
-
typ_atom = Atom.from_data(b"byte", val_atom.object_id())
|
|
65
|
-
return typ_atom.object_id(), [val_atom, typ_atom]
|
|
66
|
-
|
|
67
|
-
def err(topic: str, origin: Optional[Expr]) -> Tuple[bytes, List[Atom]]:
|
|
68
|
-
topic_bytes = topic.encode("utf-8")
|
|
69
|
-
topic_atom = Atom.from_data(data=topic_bytes)
|
|
70
|
-
typ_atom = Atom.from_data(data=b"error", next_hash=topic_atom.object_id())
|
|
71
|
-
return typ_atom.object_id(), [topic_atom, typ_atom]
|
|
72
|
-
|
|
73
|
-
def lst(items: List[Expr]) -> Tuple[bytes, List[Atom]]:
|
|
74
|
-
acc: List[Atom] = []
|
|
75
|
-
child_hashes: List[bytes] = []
|
|
76
|
-
for it in items:
|
|
77
|
-
h, atoms = expr_to_atoms(it)
|
|
78
|
-
acc.extend(atoms)
|
|
79
|
-
child_hashes.append(h)
|
|
80
|
-
next_hash = ZERO32
|
|
81
|
-
elem_atoms: List[Atom] = []
|
|
82
|
-
for h in reversed(child_hashes):
|
|
83
|
-
a = Atom.from_data(h, next_hash)
|
|
84
|
-
next_hash = a.object_id()
|
|
85
|
-
elem_atoms.append(a)
|
|
86
|
-
elem_atoms.reverse()
|
|
87
|
-
head = next_hash
|
|
88
|
-
val_atom = Atom.from_data(data=u64_le(len(items)), next_hash=head)
|
|
89
|
-
typ_atom = Atom.from_data(data=b"list", next_hash=val_atom.object_id())
|
|
90
|
-
return typ_atom.object_id(), acc + elem_atoms + [val_atom, typ_atom]
|
|
91
|
-
|
|
92
|
-
if isinstance(e, Expr.Symbol):
|
|
93
|
-
return symbol(e.value)
|
|
94
|
-
if isinstance(e, Expr.Bytes):
|
|
95
|
-
return bytes(e.value)
|
|
96
|
-
if isinstance(e, Expr.Error):
|
|
97
|
-
return err(e.topic, e.origin)
|
|
98
|
-
if isinstance(e, Expr.ListExpr):
|
|
99
|
-
return lst(e.elements)
|
|
100
|
-
raise TypeError("unknown Expr variant")
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
def bytes_list_to_atoms(values: List[bytes]) -> Tuple[bytes, List[Atom]]:
|
|
104
|
-
"""Build a forward-ordered linked list of atoms from byte payloads.
|
|
105
|
-
|
|
106
|
-
Returns the head object's hash (ZERO32 if no values) and the atoms created.
|
|
107
|
-
"""
|
|
108
|
-
next_hash = ZERO32
|
|
109
|
-
atoms: List[Atom] = []
|
|
110
|
-
|
|
111
|
-
for value in reversed(values):
|
|
112
|
-
atom = Atom.from_data(data=bytes(value), next_hash=next_hash)
|
|
113
|
-
atoms.append(atom)
|
|
114
|
-
next_hash = atom.object_id()
|
|
115
|
-
|
|
116
|
-
atoms.reverse()
|
|
117
|
-
return (next_hash if values else ZERO32), atoms
|
astreum/format.py
DELETED
|
@@ -1,75 +0,0 @@
|
|
|
1
|
-
def encode(iterable, encoder=lambda x: x):
|
|
2
|
-
"""
|
|
3
|
-
Encode an iterable of items into a single bytes object, applying the
|
|
4
|
-
provided encoder function to each item to convert it into bytes.
|
|
5
|
-
|
|
6
|
-
For each item (after applying encoder), we:
|
|
7
|
-
- Determine its length (n)
|
|
8
|
-
- Write a single byte to indicate how many bytes were used for the length:
|
|
9
|
-
0: n == 0
|
|
10
|
-
1: n <= 255
|
|
11
|
-
2: 256 <= n <= 65535
|
|
12
|
-
4: 65536 <= n <= 4294967295
|
|
13
|
-
8: n > 4294967295
|
|
14
|
-
- Write the little-endian encoding of n in the specified number of bytes (if n > 0)
|
|
15
|
-
- Append the item’s data
|
|
16
|
-
"""
|
|
17
|
-
result = bytearray()
|
|
18
|
-
for item in iterable:
|
|
19
|
-
item_bytes = encoder(item)
|
|
20
|
-
n = len(item_bytes)
|
|
21
|
-
if n > 4294967295:
|
|
22
|
-
result.append(8)
|
|
23
|
-
result.extend(n.to_bytes(8, byteorder='little'))
|
|
24
|
-
elif n > 65535:
|
|
25
|
-
result.append(4)
|
|
26
|
-
result.extend(n.to_bytes(4, byteorder='little'))
|
|
27
|
-
elif n > 255:
|
|
28
|
-
result.append(2)
|
|
29
|
-
result.extend(n.to_bytes(2, byteorder='little'))
|
|
30
|
-
elif n > 0:
|
|
31
|
-
result.append(1)
|
|
32
|
-
result.extend(n.to_bytes(1, byteorder='little'))
|
|
33
|
-
else:
|
|
34
|
-
result.append(0)
|
|
35
|
-
result.extend(item_bytes)
|
|
36
|
-
return bytes(result)
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
def decode(buffer, decoder=lambda x: x):
|
|
40
|
-
"""
|
|
41
|
-
Decode a bytes buffer into a list of items, applying the provided decoder
|
|
42
|
-
function to convert the bytes into the desired type.
|
|
43
|
-
|
|
44
|
-
The buffer is read sequentially:
|
|
45
|
-
- Read one byte that indicates how many bytes were used for the length.
|
|
46
|
-
- If this value is 0, then the item is empty.
|
|
47
|
-
- Otherwise, read that many bytes to get the item's length (as a little-endian integer).
|
|
48
|
-
- Then, slice the next 'length' bytes from the buffer to get the item data.
|
|
49
|
-
- Apply the decoder function to the item data before appending it.
|
|
50
|
-
|
|
51
|
-
By default, the decoder is the identity function, so items are returned as bytes.
|
|
52
|
-
"""
|
|
53
|
-
decoded_data = []
|
|
54
|
-
offset = 0
|
|
55
|
-
buf_len = len(buffer)
|
|
56
|
-
|
|
57
|
-
while offset < buf_len:
|
|
58
|
-
length_type = buffer[offset]
|
|
59
|
-
offset += 1
|
|
60
|
-
|
|
61
|
-
if length_type == 0:
|
|
62
|
-
n = 0
|
|
63
|
-
else:
|
|
64
|
-
if offset + length_type > buf_len:
|
|
65
|
-
raise ValueError("Buffer too short for length field")
|
|
66
|
-
n = int.from_bytes(buffer[offset: offset+length_type], byteorder='little')
|
|
67
|
-
offset += length_type
|
|
68
|
-
|
|
69
|
-
if offset + n > buf_len:
|
|
70
|
-
raise ValueError("Buffer is too short for item data")
|
|
71
|
-
item_data = buffer[offset:offset+n]
|
|
72
|
-
offset += n
|
|
73
|
-
decoded_data.append(decoder(item_data))
|
|
74
|
-
|
|
75
|
-
return decoded_data
|