conceptkernel 1.1.0__tar.gz → 1.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/PKG-INFO +1 -1
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/__init__.py +1 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/actions.py +45 -12
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/dispatch.py +30 -3
- conceptkernel-1.3.0/cklib/nats.py +183 -0
- conceptkernel-1.3.0/cklib/occurrent.py +218 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/processor.py +1 -2
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/conceptkernel.egg-info/PKG-INFO +1 -1
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/conceptkernel.egg-info/SOURCES.txt +2 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/pyproject.toml +1 -1
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/LICENSE +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/README.md +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/auth.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/capacity.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/context.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/entities.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/events.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/execution.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/instance.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/ledger.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/prov.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/schema.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/serve.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/cklib/urn.py +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/conceptkernel.egg-info/dependency_links.txt +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/conceptkernel.egg-info/requires.txt +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/conceptkernel.egg-info/top_level.txt +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/setup.cfg +0 -0
- {conceptkernel-1.1.0 → conceptkernel-1.3.0}/tests/test_execution.py +0 -0
|
@@ -108,27 +108,51 @@ def _get_actions_from_spec(data):
|
|
|
108
108
|
|
|
109
109
|
|
|
110
110
|
def _find_ck_dir_by_name(target_name, concepts_dir):
|
|
111
|
-
"""Find a CK directory by its metadata.name.
|
|
111
|
+
"""Find a CK directory by its metadata.name.
|
|
112
|
+
|
|
113
|
+
Searches two levels: direct child (flat layout) and one level deeper
|
|
114
|
+
(GUID subdirectory layout).
|
|
115
|
+
"""
|
|
112
116
|
if not os.path.isdir(concepts_dir):
|
|
113
117
|
return None
|
|
114
118
|
for entry in os.listdir(concepts_dir):
|
|
115
119
|
candidate = os.path.join(concepts_dir, entry)
|
|
116
120
|
if not os.path.isdir(candidate):
|
|
117
121
|
continue
|
|
122
|
+
# Level 1: conceptkernel.yaml at kernel root (flat layout)
|
|
118
123
|
yaml_path = os.path.join(candidate, "conceptkernel.yaml")
|
|
119
|
-
if
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
d
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
124
|
+
if os.path.isfile(yaml_path):
|
|
125
|
+
try:
|
|
126
|
+
with open(yaml_path) as f:
|
|
127
|
+
d = yaml.safe_load(f)
|
|
128
|
+
if isinstance(d, dict) and d.get("metadata", {}).get("name") == target_name:
|
|
129
|
+
return candidate
|
|
130
|
+
# Also check kernel_class field
|
|
131
|
+
if isinstance(d, dict) and d.get("kernel_class") == target_name:
|
|
132
|
+
return candidate
|
|
133
|
+
except Exception:
|
|
134
|
+
pass
|
|
127
135
|
continue
|
|
136
|
+
# Level 2: conceptkernel.yaml inside a subdirectory (GUID layout)
|
|
137
|
+
for sub in os.listdir(candidate):
|
|
138
|
+
sub_path = os.path.join(candidate, sub)
|
|
139
|
+
if not os.path.isdir(sub_path):
|
|
140
|
+
continue
|
|
141
|
+
yaml_path = os.path.join(sub_path, "conceptkernel.yaml")
|
|
142
|
+
if os.path.isfile(yaml_path):
|
|
143
|
+
try:
|
|
144
|
+
with open(yaml_path) as f:
|
|
145
|
+
d = yaml.safe_load(f)
|
|
146
|
+
if isinstance(d, dict) and d.get("metadata", {}).get("name") == target_name:
|
|
147
|
+
return sub_path
|
|
148
|
+
if isinstance(d, dict) and d.get("kernel_class") == target_name:
|
|
149
|
+
return sub_path
|
|
150
|
+
except Exception:
|
|
151
|
+
pass
|
|
128
152
|
return None
|
|
129
153
|
|
|
130
154
|
|
|
131
|
-
def resolve_composed_actions(ck_dir):
|
|
155
|
+
def resolve_composed_actions(ck_dir, concepts_dir=None):
|
|
132
156
|
"""Walk COMPOSES/EXTENDS edges, collect target kernel actions.
|
|
133
157
|
|
|
134
158
|
Returns dict: {action_name: target_kernel_name}
|
|
@@ -139,8 +163,17 @@ def resolve_composed_actions(ck_dir):
|
|
|
139
163
|
if err:
|
|
140
164
|
return {}
|
|
141
165
|
|
|
142
|
-
# Derive concepts_dir
|
|
143
|
-
concepts_dir
|
|
166
|
+
# Derive concepts_dir — walk up until we find a dir containing OTHER kernels
|
|
167
|
+
if concepts_dir is None:
|
|
168
|
+
concepts_dir = os.path.dirname(ck_dir)
|
|
169
|
+
# Check if parent has OTHER kernel directories (not just our own GUID subdir)
|
|
170
|
+
ck_basename = os.path.basename(ck_dir)
|
|
171
|
+
siblings = [d for d in os.listdir(concepts_dir)
|
|
172
|
+
if d != ck_basename
|
|
173
|
+
and os.path.isdir(os.path.join(concepts_dir, d))
|
|
174
|
+
and os.path.isfile(os.path.join(concepts_dir, d, "conceptkernel.yaml"))]
|
|
175
|
+
if not siblings:
|
|
176
|
+
concepts_dir = os.path.dirname(concepts_dir)
|
|
144
177
|
|
|
145
178
|
edges_section = data.get("spec", {}).get("edges", data.get("edges", {}))
|
|
146
179
|
outbound = edges_section.get("outbound", [])
|
|
@@ -27,9 +27,36 @@ CONCEPTS_DIR = None
|
|
|
27
27
|
def _init_paths():
|
|
28
28
|
global PROJECT_ROOT, CONCEPTS_DIR
|
|
29
29
|
if PROJECT_ROOT is None:
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
30
|
+
# Prefer env vars (set by CK.Operator boot or delvinator.sh)
|
|
31
|
+
if os.environ.get("CK_CONCEPTS_DIR"):
|
|
32
|
+
CONCEPTS_DIR = os.environ["CK_CONCEPTS_DIR"]
|
|
33
|
+
PROJECT_ROOT = os.path.dirname(CONCEPTS_DIR)
|
|
34
|
+
elif os.environ.get("PROJECT_ROOT"):
|
|
35
|
+
PROJECT_ROOT = os.environ["PROJECT_ROOT"]
|
|
36
|
+
CONCEPTS_DIR = os.path.join(PROJECT_ROOT, "concepts")
|
|
37
|
+
else:
|
|
38
|
+
# Fallback: walk up from cklib to find concepts/ dir containing kernel dirs
|
|
39
|
+
candidate = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
|
40
|
+
for _ in range(5):
|
|
41
|
+
concepts_candidate = os.path.join(candidate, "concepts")
|
|
42
|
+
if os.path.isdir(concepts_candidate):
|
|
43
|
+
# Verify it has actual kernel directories (not just a stray concepts/ folder)
|
|
44
|
+
has_kernels = any(
|
|
45
|
+
os.path.isfile(os.path.join(concepts_candidate, d, "conceptkernel.yaml"))
|
|
46
|
+
or any(os.path.isfile(os.path.join(concepts_candidate, d, sub, "conceptkernel.yaml"))
|
|
47
|
+
for sub in os.listdir(os.path.join(concepts_candidate, d))
|
|
48
|
+
if os.path.isdir(os.path.join(concepts_candidate, d, sub)))
|
|
49
|
+
for d in os.listdir(concepts_candidate)
|
|
50
|
+
if os.path.isdir(os.path.join(concepts_candidate, d))
|
|
51
|
+
)
|
|
52
|
+
if has_kernels:
|
|
53
|
+
PROJECT_ROOT = candidate
|
|
54
|
+
CONCEPTS_DIR = concepts_candidate
|
|
55
|
+
break
|
|
56
|
+
candidate = os.path.dirname(candidate)
|
|
57
|
+
else:
|
|
58
|
+
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".."))
|
|
59
|
+
CONCEPTS_DIR = os.path.join(PROJECT_ROOT, "concepts")
|
|
33
60
|
|
|
34
61
|
|
|
35
62
|
def resolve_kernel(name):
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""
|
|
2
|
+
nats_kernel.py — shared NATS kernel loop for Concept Kernels.
|
|
3
|
+
|
|
4
|
+
Not a standalone CK — a library that any CK's processor.py imports to become
|
|
5
|
+
a NATS listener following the CK processing cycle:
|
|
6
|
+
|
|
7
|
+
Receive → Validate → Process (primary tool) → Create Instance → Publish Result → Notify
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
from nats_kernel import NatsKernelLoop
|
|
11
|
+
|
|
12
|
+
def handle_message(msg):
|
|
13
|
+
return {"status": "ok", "echo": msg}
|
|
14
|
+
|
|
15
|
+
loop = NatsKernelLoop(CK_DIR, handle_message)
|
|
16
|
+
asyncio.run(loop.run())
|
|
17
|
+
"""
|
|
18
|
+
import asyncio
|
|
19
|
+
import json
|
|
20
|
+
import os
|
|
21
|
+
import time
|
|
22
|
+
import uuid
|
|
23
|
+
|
|
24
|
+
import yaml
|
|
25
|
+
import nats
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class NatsKernelLoop:
|
|
29
|
+
"""NATS listener that implements the CK processing cycle."""
|
|
30
|
+
|
|
31
|
+
# nats:// — native TCP (server processors in cluster)
|
|
32
|
+
# wss:// — WebSocket (Python CLI from local machine, needs aiohttp)
|
|
33
|
+
# Set NATS_URL env var to override
|
|
34
|
+
DEFAULT_ENDPOINT = "nats://localhost:4222"
|
|
35
|
+
|
|
36
|
+
def __init__(self, ck_dir, handler_fn):
|
|
37
|
+
self.ck_dir = os.path.abspath(ck_dir)
|
|
38
|
+
self.handler_fn = handler_fn
|
|
39
|
+
self.instance_dir = os.path.join(self.ck_dir, "storage", "instances")
|
|
40
|
+
|
|
41
|
+
# NATS endpoint: env var > .ckproject > default
|
|
42
|
+
self.endpoint = os.environ.get("NATS_URL", self.DEFAULT_ENDPOINT)
|
|
43
|
+
|
|
44
|
+
# Load identity from conceptkernel.yaml
|
|
45
|
+
ck_yaml_path = os.path.join(self.ck_dir, "conceptkernel.yaml")
|
|
46
|
+
with open(ck_yaml_path) as f:
|
|
47
|
+
self.ck = yaml.safe_load(f)
|
|
48
|
+
|
|
49
|
+
meta = self.ck["metadata"]
|
|
50
|
+
spec = self.ck["spec"]
|
|
51
|
+
nats_cfg = spec.get("nats", {})
|
|
52
|
+
|
|
53
|
+
self.kernel_name = meta["name"]
|
|
54
|
+
self.kernel_urn = meta["urn"]
|
|
55
|
+
self.input_topic = nats_cfg.get("input_topic", f"input.{self.kernel_name}")
|
|
56
|
+
self.result_topic = nats_cfg.get("result_topic", f"result.{self.kernel_name}")
|
|
57
|
+
self.event_topic = nats_cfg.get("event_topic", f"event.{self.kernel_name}")
|
|
58
|
+
|
|
59
|
+
self.nc = None
|
|
60
|
+
|
|
61
|
+
async def run(self):
|
|
62
|
+
"""Connect to NATS and listen on input topic."""
|
|
63
|
+
print(f"[nats] connecting to {self.endpoint}...")
|
|
64
|
+
self.nc = await nats.connect(self.endpoint)
|
|
65
|
+
print(f"[nats] connected as {self.kernel_urn}")
|
|
66
|
+
print(f"[sub] {self.input_topic}")
|
|
67
|
+
print(f"[ready] Listening on {self.input_topic}...")
|
|
68
|
+
print()
|
|
69
|
+
|
|
70
|
+
sub = await self.nc.subscribe(self.input_topic)
|
|
71
|
+
|
|
72
|
+
try:
|
|
73
|
+
async for msg in sub.messages:
|
|
74
|
+
await self._handle(msg)
|
|
75
|
+
except asyncio.CancelledError:
|
|
76
|
+
pass
|
|
77
|
+
finally:
|
|
78
|
+
await self.nc.drain()
|
|
79
|
+
|
|
80
|
+
async def _handle(self, msg):
|
|
81
|
+
"""Process a single incoming message through the CK cycle."""
|
|
82
|
+
ts = int(time.time())
|
|
83
|
+
|
|
84
|
+
# Parse headers from NATS-level headers
|
|
85
|
+
headers = {}
|
|
86
|
+
if msg.headers:
|
|
87
|
+
for key in ("Trace-Id", "Nats-Msg-Id", "X-Kernel-ID", "X-User-ID", "X-Anonymous"):
|
|
88
|
+
val = msg.headers.get(key)
|
|
89
|
+
if val:
|
|
90
|
+
headers[key] = val
|
|
91
|
+
|
|
92
|
+
trace_id = headers.get("Trace-Id", f"tx-{uuid.uuid4().hex[:6]}")
|
|
93
|
+
sender_kernel = headers.get("X-Kernel-ID", "unknown")
|
|
94
|
+
user_id = headers.get("X-User-ID", "anonymous")
|
|
95
|
+
|
|
96
|
+
# Parse body (pure data — no control attributes)
|
|
97
|
+
try:
|
|
98
|
+
body = json.loads(msg.data.decode()) if msg.data else {}
|
|
99
|
+
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
100
|
+
body = {"raw": msg.data.decode("utf-8", errors="replace")}
|
|
101
|
+
|
|
102
|
+
print(f"[rx] {trace_id} {msg.subject} {json.dumps(body, separators=(',', ':'))}")
|
|
103
|
+
|
|
104
|
+
# Process via primary tool (handler_fn)
|
|
105
|
+
# Pass nc + trace_id for streaming. Supports both sync and async handlers.
|
|
106
|
+
try:
|
|
107
|
+
import inspect
|
|
108
|
+
try:
|
|
109
|
+
raw = self.handler_fn(body, nc=self.nc, trace_id=trace_id)
|
|
110
|
+
except TypeError:
|
|
111
|
+
raw = self.handler_fn(body)
|
|
112
|
+
|
|
113
|
+
# If handler returned a coroutine, await it
|
|
114
|
+
if inspect.isawaitable(raw):
|
|
115
|
+
result = await raw
|
|
116
|
+
else:
|
|
117
|
+
result = raw
|
|
118
|
+
status = "ok"
|
|
119
|
+
except Exception as e:
|
|
120
|
+
result = {"error": str(e)}
|
|
121
|
+
status = "error"
|
|
122
|
+
|
|
123
|
+
# Create instance record
|
|
124
|
+
instance_id = f"i-{trace_id}-{ts}"
|
|
125
|
+
instance_path = os.path.join(self.instance_dir, instance_id)
|
|
126
|
+
os.makedirs(instance_path, exist_ok=True)
|
|
127
|
+
|
|
128
|
+
record = {
|
|
129
|
+
"instance_id": instance_id,
|
|
130
|
+
"kernel_urn": self.kernel_urn,
|
|
131
|
+
"trace_id": trace_id,
|
|
132
|
+
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(ts)),
|
|
133
|
+
"input": {
|
|
134
|
+
"subject": msg.subject,
|
|
135
|
+
"headers": headers,
|
|
136
|
+
"body": body,
|
|
137
|
+
},
|
|
138
|
+
"output": result,
|
|
139
|
+
"status": status,
|
|
140
|
+
"sender": sender_kernel,
|
|
141
|
+
"user_id": user_id,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
record_path = os.path.join(instance_path, "message.json")
|
|
145
|
+
with open(record_path, "w") as f:
|
|
146
|
+
json.dump(record, f, indent=2)
|
|
147
|
+
|
|
148
|
+
# Publish result
|
|
149
|
+
result_envelope = {
|
|
150
|
+
"trace_id": trace_id,
|
|
151
|
+
"kernel_urn": self.kernel_urn,
|
|
152
|
+
"timestamp": record["timestamp"],
|
|
153
|
+
"status": status,
|
|
154
|
+
"data": result,
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
result_headers = {
|
|
158
|
+
"Trace-Id": trace_id,
|
|
159
|
+
"X-Kernel-ID": self.kernel_urn,
|
|
160
|
+
"Nats-Msg-Id": str(uuid.uuid4()),
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
await self.nc.publish(
|
|
164
|
+
self.result_topic,
|
|
165
|
+
json.dumps(result_envelope).encode(),
|
|
166
|
+
headers=result_headers,
|
|
167
|
+
)
|
|
168
|
+
print(f"[tx] {trace_id} {self.result_topic}")
|
|
169
|
+
|
|
170
|
+
# Publish event notification
|
|
171
|
+
event = {
|
|
172
|
+
"type": "instance_created",
|
|
173
|
+
"instance_id": instance_id,
|
|
174
|
+
"trace_id": trace_id,
|
|
175
|
+
"kernel_urn": self.kernel_urn,
|
|
176
|
+
"timestamp": record["timestamp"],
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
await self.nc.publish(
|
|
180
|
+
self.event_topic,
|
|
181
|
+
json.dumps(event).encode(),
|
|
182
|
+
headers={"Trace-Id": trace_id, "X-Kernel-ID": self.kernel_urn},
|
|
183
|
+
)
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
"""Occurrent tracking — ontology-verified action substeps with PROV-O proof.
|
|
2
|
+
|
|
3
|
+
Every CKP action is an Occurrent (bfo:Process). Each substep within an action
|
|
4
|
+
is also an Occurrent, linked to its parent via prov:wasStartedBy.
|
|
5
|
+
|
|
6
|
+
At completion, the parent action's proof is built from the chain of substep
|
|
7
|
+
occurrents — each one verified before the next can start.
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
from cklib.occurrent import ActionOccurrent
|
|
11
|
+
|
|
12
|
+
with ActionOccurrent("project.deploy", kernel_urn="ckp://Kernel#CK.Operator:v1.0") as action:
|
|
13
|
+
action.step("deploy.accepted", {"hostname": "delvinator.tech.games"})
|
|
14
|
+
action.step("deploy.scanning", {"kernels": 6})
|
|
15
|
+
|
|
16
|
+
# Substep with verification
|
|
17
|
+
with action.substep("deploy.materialising") as sub:
|
|
18
|
+
sub.detail("resources", 9)
|
|
19
|
+
sub.verify("namespace_created", True)
|
|
20
|
+
sub.verify("pv_bound", True)
|
|
21
|
+
sub.verify("deployment_ready", True)
|
|
22
|
+
|
|
23
|
+
action.step("deploy.ready", {"url": "https://delvinator.tech.games/"})
|
|
24
|
+
|
|
25
|
+
# action.proof contains the hash chain of all steps
|
|
26
|
+
# action.occurrents contains the full list with URNs and timestamps
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
import hashlib
|
|
30
|
+
import json
|
|
31
|
+
import os
|
|
32
|
+
import time
|
|
33
|
+
import uuid
|
|
34
|
+
from datetime import datetime, timezone
|
|
35
|
+
from dataclasses import dataclass, field, asdict
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class StepRecord:
|
|
40
|
+
"""A single occurrent step within an action."""
|
|
41
|
+
step_urn: str
|
|
42
|
+
step_type: str
|
|
43
|
+
timestamp: str
|
|
44
|
+
detail: dict = field(default_factory=dict)
|
|
45
|
+
verifications: list = field(default_factory=list)
|
|
46
|
+
hash: str = ""
|
|
47
|
+
parent_hash: str = ""
|
|
48
|
+
status: str = "ok"
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
@dataclass
|
|
52
|
+
class VerificationRecord:
|
|
53
|
+
"""A verification check within a step."""
|
|
54
|
+
check: str
|
|
55
|
+
passed: bool
|
|
56
|
+
detail: str = ""
|
|
57
|
+
timestamp: str = ""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class ActionOccurrent:
|
|
61
|
+
"""Track an action as a chain of verified occurrent steps.
|
|
62
|
+
|
|
63
|
+
Each step gets:
|
|
64
|
+
- A URN: ckp://Occurrent#{kernel}/{action}/{step}-{ts}
|
|
65
|
+
- A SHA-256 hash chained to the previous step
|
|
66
|
+
- Optional verification checks (pass/fail)
|
|
67
|
+
- PROV-O links: wasStartedBy parent action
|
|
68
|
+
|
|
69
|
+
The complete action proof = hash chain of all steps.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(self, action_name, kernel_urn="", ck_dir=None):
|
|
73
|
+
self.action_name = action_name
|
|
74
|
+
self.kernel_urn = kernel_urn
|
|
75
|
+
self.ck_dir = ck_dir
|
|
76
|
+
|
|
77
|
+
ts_ms = int(time.time() * 1000)
|
|
78
|
+
kernel_short = kernel_urn.split("#")[-1].split(":")[0] if "#" in kernel_urn else "unknown"
|
|
79
|
+
|
|
80
|
+
self.action_urn = f"ckp://Action#{kernel_short}/{action_name}-{ts_ms}"
|
|
81
|
+
self.started_at = datetime.now(timezone.utc).isoformat()
|
|
82
|
+
self.ended_at = None
|
|
83
|
+
self.steps = []
|
|
84
|
+
self._prev_hash = self._hash(self.action_urn)
|
|
85
|
+
self._current_substep = None
|
|
86
|
+
|
|
87
|
+
def step(self, step_type, detail=None):
|
|
88
|
+
"""Record a step in the action."""
|
|
89
|
+
ts = datetime.now(timezone.utc).isoformat()
|
|
90
|
+
ts_ms = int(time.time() * 1000)
|
|
91
|
+
kernel_short = self.kernel_urn.split("#")[-1].split(":")[0] if "#" in self.kernel_urn else "unknown"
|
|
92
|
+
|
|
93
|
+
step_urn = f"ckp://Occurrent#{kernel_short}/{self.action_name}/{step_type}-{ts_ms}"
|
|
94
|
+
|
|
95
|
+
# Chain hash
|
|
96
|
+
content = json.dumps({"step": step_type, "detail": detail or {}, "prev": self._prev_hash},
|
|
97
|
+
sort_keys=True)
|
|
98
|
+
step_hash = self._hash(content)
|
|
99
|
+
|
|
100
|
+
record = StepRecord(
|
|
101
|
+
step_urn=step_urn,
|
|
102
|
+
step_type=step_type,
|
|
103
|
+
timestamp=ts,
|
|
104
|
+
detail=detail or {},
|
|
105
|
+
hash=step_hash,
|
|
106
|
+
parent_hash=self._prev_hash,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
self.steps.append(record)
|
|
110
|
+
self._prev_hash = step_hash
|
|
111
|
+
return record
|
|
112
|
+
|
|
113
|
+
def substep(self, step_type):
|
|
114
|
+
"""Context manager for a substep with verification checks."""
|
|
115
|
+
return _Substep(self, step_type)
|
|
116
|
+
|
|
117
|
+
def verify_chain(self):
|
|
118
|
+
"""Verify the hash chain is intact."""
|
|
119
|
+
prev = self._hash(self.action_urn)
|
|
120
|
+
for s in self.steps:
|
|
121
|
+
content = json.dumps({"step": s.step_type, "detail": s.detail, "prev": prev},
|
|
122
|
+
sort_keys=True)
|
|
123
|
+
expected = self._hash(content)
|
|
124
|
+
if s.hash != expected:
|
|
125
|
+
return False, f"Chain broken at {s.step_type}: expected {expected[:12]}, got {s.hash[:12]}"
|
|
126
|
+
prev = s.hash
|
|
127
|
+
return True, "Chain intact"
|
|
128
|
+
|
|
129
|
+
@property
|
|
130
|
+
def proof(self):
|
|
131
|
+
"""Generate the action proof from the step chain."""
|
|
132
|
+
valid, message = self.verify_chain()
|
|
133
|
+
all_verified = all(
|
|
134
|
+
all(v.passed for v in s.verifications)
|
|
135
|
+
for s in self.steps
|
|
136
|
+
)
|
|
137
|
+
return {
|
|
138
|
+
"action_urn": self.action_urn,
|
|
139
|
+
"kernel_urn": self.kernel_urn,
|
|
140
|
+
"action": self.action_name,
|
|
141
|
+
"started_at": self.started_at,
|
|
142
|
+
"ended_at": self.ended_at,
|
|
143
|
+
"steps": len(self.steps),
|
|
144
|
+
"chain_valid": valid,
|
|
145
|
+
"chain_message": message,
|
|
146
|
+
"all_verified": all_verified,
|
|
147
|
+
"final_hash": self._prev_hash,
|
|
148
|
+
"prov:wasAssociatedWith": self.kernel_urn,
|
|
149
|
+
"prov:startedAtTime": self.started_at,
|
|
150
|
+
"prov:endedAtTime": self.ended_at,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
@property
|
|
154
|
+
def occurrents(self):
|
|
155
|
+
"""Return all step records as dicts."""
|
|
156
|
+
return [asdict(s) for s in self.steps]
|
|
157
|
+
|
|
158
|
+
def save(self, path=None):
|
|
159
|
+
"""Save proof + occurrents to storage/proof/."""
|
|
160
|
+
if path is None and self.ck_dir:
|
|
161
|
+
proof_dir = os.path.join(self.ck_dir, "storage", "proof")
|
|
162
|
+
os.makedirs(proof_dir, exist_ok=True)
|
|
163
|
+
ts = int(time.time())
|
|
164
|
+
path = os.path.join(proof_dir, f"proof-{self.action_name}-{ts}.json")
|
|
165
|
+
|
|
166
|
+
if path:
|
|
167
|
+
doc = {
|
|
168
|
+
"proof": self.proof,
|
|
169
|
+
"occurrents": self.occurrents,
|
|
170
|
+
}
|
|
171
|
+
with open(path, "w") as f:
|
|
172
|
+
json.dump(doc, f, indent=2)
|
|
173
|
+
|
|
174
|
+
def __enter__(self):
|
|
175
|
+
return self
|
|
176
|
+
|
|
177
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
178
|
+
self.ended_at = datetime.now(timezone.utc).isoformat()
|
|
179
|
+
if exc_type:
|
|
180
|
+
self.step("action.failed", {"error": str(exc_val)})
|
|
181
|
+
if self.ck_dir:
|
|
182
|
+
self.save()
|
|
183
|
+
|
|
184
|
+
@staticmethod
|
|
185
|
+
def _hash(content):
|
|
186
|
+
if isinstance(content, dict):
|
|
187
|
+
content = json.dumps(content, sort_keys=True)
|
|
188
|
+
return hashlib.sha256(content.encode()).hexdigest()
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class _Substep:
|
|
192
|
+
"""Context manager for a substep with verification."""
|
|
193
|
+
|
|
194
|
+
def __init__(self, parent, step_type):
|
|
195
|
+
self.parent = parent
|
|
196
|
+
self.step_type = step_type
|
|
197
|
+
self._detail = {}
|
|
198
|
+
self._verifications = []
|
|
199
|
+
|
|
200
|
+
def detail(self, key, value):
|
|
201
|
+
self._detail[key] = value
|
|
202
|
+
|
|
203
|
+
def verify(self, check_name, passed, detail=""):
|
|
204
|
+
self._verifications.append(VerificationRecord(
|
|
205
|
+
check=check_name,
|
|
206
|
+
passed=passed,
|
|
207
|
+
detail=detail,
|
|
208
|
+
timestamp=datetime.now(timezone.utc).isoformat(),
|
|
209
|
+
))
|
|
210
|
+
|
|
211
|
+
def __enter__(self):
|
|
212
|
+
return self
|
|
213
|
+
|
|
214
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
215
|
+
record = self.parent.step(self.step_type, self._detail)
|
|
216
|
+
record.verifications = self._verifications
|
|
217
|
+
if not all(v.passed for v in self._verifications):
|
|
218
|
+
record.status = "verification_failed"
|
|
@@ -168,8 +168,7 @@ class KernelProcessor:
|
|
|
168
168
|
|
|
169
169
|
def listen(self):
|
|
170
170
|
"""Start NATS listener using NatsKernelLoop."""
|
|
171
|
-
|
|
172
|
-
from nats_kernel import NatsKernelLoop
|
|
171
|
+
from cklib.nats import NatsKernelLoop
|
|
173
172
|
loop = NatsKernelLoop(self.ck_dir, self.handle_message)
|
|
174
173
|
asyncio.run(loop.run())
|
|
175
174
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|