tigrbl-kernel 0.1.0.dev5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,52 @@
1
+ Metadata-Version: 2.4
2
+ Name: tigrbl-kernel
3
+ Version: 0.1.0.dev5
4
+ Summary: Kernel orchestration for Tigrbl runtime composition.
5
+ License-Expression: Apache-2.0
6
+ Keywords: tigrbl,sdk,standards,framework
7
+ Author: Jacob Stewart
8
+ Author-email: jacob@swarmauri.com
9
+ Requires-Python: >=3.10,<3.13
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: Development Status :: 1 - Planning
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3 :: Only
18
+ Requires-Dist: tigrbl-atoms
19
+ Requires-Dist: tigrbl-core
20
+ Requires-Dist: tigrbl-typing
21
+ Description-Content-Type: text/markdown
22
+
23
+ ![Tigrbl branding](https://github.com/swarmauri/swarmauri-sdk/blob/a170683ecda8ca1c4f912c966d4499649ffb8224/assets/tigrbl.brand.theme.svg)
24
+
25
+ # tigrbl-kernel
26
+
27
+ ![PyPI - Downloads](https://img.shields.io/pypi/dm/tigrbl-kernel.svg) ![Hits](https://hits.sh/github.com/swarmauri/swarmauri-sdk.svg) ![Python Versions](https://img.shields.io/pypi/pyversions/tigrbl-kernel.svg) ![License](https://img.shields.io/pypi/l/tigrbl-kernel.svg) ![Version](https://img.shields.io/pypi/v/tigrbl-kernel.svg)
28
+
29
+ ## Features
30
+
31
+ - Modular package in the Tigrbl namespace.
32
+ - Supports Python 3.10 through 3.12.
33
+ - Distributed as part of the swarmauri-sdk workspace.
34
+
35
+ ## Installation
36
+
37
+ ### uv
38
+
39
+ ```bash
40
+ uv add tigrbl-kernel
41
+ ```
42
+
43
+ ### pip
44
+
45
+ ```bash
46
+ pip install tigrbl-kernel
47
+ ```
48
+
49
+ ## Usage
50
+
51
+ Import from the shared package-specific module namespaces after installation in your environment.
52
+
@@ -0,0 +1,29 @@
1
+ ![Tigrbl branding](https://github.com/swarmauri/swarmauri-sdk/blob/a170683ecda8ca1c4f912c966d4499649ffb8224/assets/tigrbl.brand.theme.svg)
2
+
3
+ # tigrbl-kernel
4
+
5
+ ![PyPI - Downloads](https://img.shields.io/pypi/dm/tigrbl-kernel.svg) ![Hits](https://hits.sh/github.com/swarmauri/swarmauri-sdk.svg) ![Python Versions](https://img.shields.io/pypi/pyversions/tigrbl-kernel.svg) ![License](https://img.shields.io/pypi/l/tigrbl-kernel.svg) ![Version](https://img.shields.io/pypi/v/tigrbl-kernel.svg)
6
+
7
+ ## Features
8
+
9
+ - Modular package in the Tigrbl namespace.
10
+ - Supports Python 3.10 through 3.12.
11
+ - Distributed as part of the swarmauri-sdk workspace.
12
+
13
+ ## Installation
14
+
15
+ ### uv
16
+
17
+ ```bash
18
+ uv add tigrbl-kernel
19
+ ```
20
+
21
+ ### pip
22
+
23
+ ```bash
24
+ pip install tigrbl-kernel
25
+ ```
26
+
27
+ ## Usage
28
+
29
+ Import from the shared package-specific module namespaces after installation in your environment.
@@ -0,0 +1,48 @@
1
+ [project]
2
+ name = "tigrbl-kernel"
3
+ version = "0.1.0.dev5"
4
+ description = "Kernel orchestration for Tigrbl runtime composition."
5
+ license = "Apache-2.0"
6
+ readme = "README.md"
7
+ repository = "http://github.com/swarmauri/swarmauri-sdk"
8
+ requires-python = ">=3.10,<3.13"
9
+ classifiers = [
10
+ "License :: OSI Approved :: Apache Software License",
11
+ "Development Status :: 1 - Planning",
12
+ "Programming Language :: Python :: 3.10",
13
+ "Programming Language :: Python :: 3.11",
14
+ "Programming Language :: Python :: 3.12",
15
+ "Programming Language :: Python",
16
+ "Programming Language :: Python :: 3",
17
+ "Programming Language :: Python :: 3 :: Only",
18
+ ]
19
+ authors = [{ name = "Jacob Stewart", email = "jacob@swarmauri.com" }]
20
+ dependencies = [
21
+ "tigrbl-typing",
22
+ "tigrbl-atoms",
23
+ "tigrbl-core",
24
+ ]
25
+ keywords = ["tigrbl", "sdk", "standards", "framework"]
26
+
27
+ [tool.uv.sources]
28
+ "tigrbl-typing" = { workspace = true }
29
+ "tigrbl-atoms" = { workspace = true }
30
+ "tigrbl-core" = { workspace = true }
31
+
32
+ [build-system]
33
+ requires = ["poetry-core>=1.0.0"]
34
+ build-backend = "poetry.core.masonry.api"
35
+
36
+
37
+ [tool.poetry]
38
+ packages = [
39
+ { include = "tigrbl_kernel" },
40
+ ]
41
+
42
+ [dependency-groups]
43
+ dev = [
44
+ "pytest>=8.0",
45
+ "pytest-asyncio>=0.24",
46
+ "pytest-timeout>=2.3",
47
+ "ruff>=0.9",
48
+ ]
@@ -0,0 +1,44 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List, Mapping
4
+
5
+ from tigrbl_atoms import StepFn
6
+ from .core import Kernel
7
+ from .models import OpView, PackedKernel, SchemaIn, SchemaOut
8
+
9
+ _default_kernel = Kernel()
10
+
11
+
12
+ def get_cached_specs(model: type) -> Mapping[str, Any]:
13
+ return _default_kernel.get_specs(model)
14
+
15
+
16
+ def build_phase_chains(model: type, alias: str) -> Dict[str, List[StepFn]]:
17
+ return _default_kernel._build_op(model, alias)
18
+
19
+
20
+ def build_kernel_plan(app: Any):
21
+ return _default_kernel.kernel_plan(app)
22
+
23
+
24
+ def build_packed_kernel(app: Any) -> PackedKernel | None:
25
+ return _default_kernel.kernel_plan(app).packed
26
+
27
+
28
+ def plan_labels(model: type, alias: str) -> list[str]:
29
+ return _default_kernel.plan_labels(model, alias)
30
+
31
+
32
+ __all__ = [
33
+ "Kernel",
34
+ "OpView",
35
+ "PackedKernel",
36
+ "SchemaIn",
37
+ "SchemaOut",
38
+ "build_kernel_plan",
39
+ "build_packed_kernel",
40
+ "get_cached_specs",
41
+ "_default_kernel",
42
+ "build_phase_chains",
43
+ "plan_labels",
44
+ ]
@@ -0,0 +1,478 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from dataclasses import replace
5
+ from types import SimpleNamespace
6
+ from typing import Any, Dict, List, Mapping
7
+
8
+ from tigrbl_atoms import StepFn
9
+ from tigrbl_atoms.atoms.sys.phase_db import run as _bind_phase_db
10
+
11
+ from . import events as _ev
12
+ from .atoms import (
13
+ _hook_phase_chains,
14
+ _inject_atoms,
15
+ _inject_pre_tx_dep_atoms,
16
+ _is_persistent,
17
+ _wrap_atom,
18
+ )
19
+ from .models import HotOpPlan, KernelPlan, OpKey, OpView, PackedKernel
20
+ from .types import (
21
+ EGRESS_PHASES,
22
+ INGRESS_PHASES,
23
+ LOWER_KIND_ASYNC_DIRECT,
24
+ LOWER_KIND_SPLIT_EXTRACTABLE,
25
+ LOWER_KIND_SYNC_EXTRACTABLE,
26
+ )
27
+ from .utils import (
28
+ _classify_step_lowering,
29
+ _effect_descriptor_for_step,
30
+ _label_step,
31
+ _opspecs,
32
+ )
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+
37
+ _PHASE_DB_LABEL = "atom:sys:phase_db@SYS_PHASE_DB_BIND"
38
+ _RUNTIME_EXECUTION_ORDER = (
39
+ "INGRESS_BEGIN",
40
+ "INGRESS_PARSE",
41
+ "INGRESS_DISPATCH",
42
+ "PRE_TX_BEGIN",
43
+ "START_TX",
44
+ "PRE_HANDLER",
45
+ "HANDLER",
46
+ "POST_HANDLER",
47
+ "PRE_COMMIT",
48
+ "END_TX",
49
+ "POST_COMMIT",
50
+ "POST_RESPONSE",
51
+ "EGRESS_SHAPE",
52
+ "EGRESS_FINALIZE",
53
+ )
54
+
55
+
56
+ def _phase_stamp(self: Any, model: type, alias: str) -> tuple[Any, ...]:
57
+ hooks_root = getattr(model, "hooks", None) or SimpleNamespace()
58
+ alias_ns = getattr(hooks_root, alias, None)
59
+ specs = getattr(getattr(model, "ops", SimpleNamespace()), "by_alias", {})
60
+ sp_list = specs.get(alias) or ()
61
+ sp = sp_list[0] if sp_list else None
62
+ phase_lists = tuple(
63
+ (
64
+ phase,
65
+ id(getattr(alias_ns, phase, None)),
66
+ len(getattr(alias_ns, phase, ()) or ()),
67
+ )
68
+ for phase in _ev.PHASES
69
+ )
70
+ return (
71
+ id(hooks_root),
72
+ id(alias_ns),
73
+ phase_lists,
74
+ id(specs),
75
+ id(sp_list),
76
+ id(sp),
77
+ id(self._atoms()),
78
+ )
79
+
80
+
81
+ def _dedupe_consecutive_steps(steps: list[StepFn]) -> list[StepFn]:
82
+ """Remove adjacent duplicate callables introduced by chain composition."""
83
+ if len(steps) < 2:
84
+ return steps
85
+ deduped: list[StepFn] = [steps[0]]
86
+ last_id = id(steps[0])
87
+ for step in steps[1:]:
88
+ step_id = id(step)
89
+ if step_id == last_id:
90
+ continue
91
+ deduped.append(step)
92
+ last_id = step_id
93
+ if len(deduped) % 2 == 0:
94
+ half = len(deduped) // 2
95
+ lhs = tuple(
96
+ getattr(step, "__tigrbl_label", None) or _label_step(step, "")
97
+ for step in deduped[:half]
98
+ )
99
+ rhs = tuple(
100
+ getattr(step, "__tigrbl_label", None) or _label_step(step, "")
101
+ for step in deduped[half:]
102
+ )
103
+ if lhs == rhs:
104
+ return deduped[:half]
105
+ return deduped
106
+
107
+
108
+ def _phase_db_step() -> StepFn:
109
+ step = _wrap_atom(_bind_phase_db, anchor="SYS_PHASE_DB_BIND")
110
+ setattr(step, "__tigrbl_label", _PHASE_DB_LABEL)
111
+ return step
112
+
113
+
114
+ def _prepend_phase_db_binding(
115
+ chains: Dict[str, List[StepFn]],
116
+ phases: tuple[str, ...] | list[str],
117
+ ) -> None:
118
+ for phase in phases:
119
+ steps = list(chains.get(phase, ()) or ())
120
+ if steps and getattr(steps[0], "__tigrbl_label", None) == _PHASE_DB_LABEL:
121
+ chains[phase] = steps
122
+ continue
123
+ chains[phase] = [_phase_db_step(), *steps]
124
+
125
+
126
+ def _build_op(self, model: type, alias: str) -> Dict[str, List[StepFn]]:
127
+ from .core import DEFAULT_PHASE_ORDER
128
+
129
+ try:
130
+ cache = self._phase_chains.setdefault(model, {})
131
+ except TypeError:
132
+ cache = self._phase_chains_by_id.setdefault(id(model), {})
133
+ stamp = _phase_stamp(self, model, alias)
134
+ cached = cache.get(alias)
135
+ if cached is not None and cached[0] == stamp:
136
+ return cached[1]
137
+
138
+ chains = _hook_phase_chains(model, alias)
139
+ specs = getattr(getattr(model, "ops", SimpleNamespace()), "by_alias", {})
140
+ sp_list = specs.get(alias) or ()
141
+ sp = sp_list[0] if sp_list else None
142
+ target = (getattr(sp, "target", alias) or "").lower()
143
+ persist_policy = getattr(sp, "persist", "default")
144
+ persistent = (
145
+ persist_policy != "skip" and target not in {"read", "list"}
146
+ ) or _is_persistent(chains)
147
+
148
+ try:
149
+ _inject_atoms(
150
+ chains,
151
+ self._atoms() or (),
152
+ persistent=persistent,
153
+ target=target,
154
+ )
155
+ except Exception:
156
+ logger.exception(
157
+ "kernel: atom injection failed for %s.%s",
158
+ getattr(model, "__name__", model),
159
+ alias,
160
+ )
161
+
162
+ _inject_pre_tx_dep_atoms(chains, sp)
163
+
164
+ for phase in DEFAULT_PHASE_ORDER:
165
+ chains.setdefault(phase, [])
166
+ phase_db_phases = list(DEFAULT_PHASE_ORDER)
167
+ _prepend_phase_db_binding(chains, phase_db_phases)
168
+ cache[alias] = (stamp, chains)
169
+ return chains
170
+
171
+
172
+ def _build(self, model: type, alias: str) -> Dict[str, List[StepFn]]:
173
+ return self._build_op(model, alias)
174
+
175
+
176
+ def _build_ingress(self, app: Any) -> Dict[str, List[StepFn]]:
177
+ del app
178
+ order = {name: idx for idx, name in enumerate(_ev.all_events_ordered())}
179
+ ingress_atoms: Dict[str, List[tuple[str, Any]]] = {}
180
+ for anchor, run in self._atoms() or ():
181
+ if not _ev.is_valid_event(anchor):
182
+ continue
183
+ phase = _ev.phase_for_event(anchor)
184
+ if phase not in INGRESS_PHASES:
185
+ continue
186
+ ingress_atoms.setdefault(phase, []).append((anchor, run))
187
+
188
+ out: Dict[str, List[StepFn]] = {}
189
+ for phase, atoms in ingress_atoms.items():
190
+ ordered = sorted(atoms, key=lambda item: order.get(item[0], 10_000))
191
+ out[phase] = [_wrap_atom(run, anchor=anchor) for anchor, run in ordered]
192
+ for phase in INGRESS_PHASES:
193
+ out.setdefault(phase, [])
194
+ _prepend_phase_db_binding(out, list(INGRESS_PHASES))
195
+ return out
196
+
197
+
198
+ def _build_egress(self, app: Any) -> Dict[str, List[StepFn]]:
199
+ del app
200
+ order = {name: idx for idx, name in enumerate(_ev.all_events_ordered())}
201
+ egress_atoms: Dict[str, List[tuple[str, Any]]] = {}
202
+ for anchor, run in self._atoms() or ():
203
+ if not _ev.is_valid_event(anchor):
204
+ continue
205
+ phase = _ev.phase_for_event(anchor)
206
+ if phase not in EGRESS_PHASES:
207
+ continue
208
+ egress_atoms.setdefault(phase, []).append((anchor, run))
209
+
210
+ out: Dict[str, List[StepFn]] = {}
211
+ for phase, atoms in egress_atoms.items():
212
+ ordered = sorted(atoms, key=lambda item: order.get(item[0], 10_000))
213
+ out[phase] = [_wrap_atom(run, anchor=anchor) for anchor, run in ordered]
214
+ for phase in EGRESS_PHASES:
215
+ out.setdefault(phase, [])
216
+ _prepend_phase_db_binding(out, list(EGRESS_PHASES))
217
+ return out
218
+
219
+
220
+ def _plan_labels(self, model: type, alias: str) -> list[str]:
221
+ from .core import DEFAULT_PHASE_ORDER
222
+
223
+ labels: list[str] = []
224
+ chains = self._build(model, alias)
225
+ opspec = next(
226
+ (sp for sp in _opspecs(model) if getattr(sp, "alias", None) == alias),
227
+ None,
228
+ )
229
+ persist = getattr(opspec, "persist", "default") != "skip"
230
+
231
+ tx_begin = "START_TX:hook:sys:txn:begin@START_TX"
232
+ tx_end = "END_TX:hook:sys:txn:commit@END_TX"
233
+ if persist:
234
+ labels.append(tx_begin)
235
+
236
+ def _display_phase(phase: str, step_label: str) -> str:
237
+ if phase != "POST_COMMIT":
238
+ return phase
239
+ if "@out:build" in step_label:
240
+ return "POST_HANDLER"
241
+ if "@out:dump" in step_label:
242
+ return "POST_RESPONSE"
243
+ return phase
244
+
245
+ for phase in DEFAULT_PHASE_ORDER:
246
+ if phase in {"START_TX", "END_TX"}:
247
+ continue
248
+ for step in chains.get(phase, ()) or ():
249
+ step_label = _label_step(step, phase)
250
+ if "SYS_PHASE_DB_BIND" in str(step_label):
251
+ continue
252
+ labels.append(f"{_display_phase(phase, step_label)}:{step_label}")
253
+
254
+ if persist:
255
+ labels.append(tx_end)
256
+
257
+ return labels
258
+
259
+
260
+ def _segment_label(self, program_id: int, phase: str) -> str:
261
+ return f"program:{program_id}:{phase}"
262
+
263
+
264
+ def _build_route_matrix(
265
+ self,
266
+ *,
267
+ proto_names: tuple[str, ...],
268
+ selector_names: tuple[str, ...],
269
+ opkey_to_meta: Mapping[OpKey, int],
270
+ ) -> tuple[tuple[int, ...], ...]:
271
+ proto_to_id = {name: idx for idx, name in enumerate(proto_names)}
272
+ selector_to_id = {name: idx for idx, name in enumerate(selector_names)}
273
+ matrix = [[-1 for _ in selector_names] for _ in proto_names]
274
+ for key, meta_index in opkey_to_meta.items():
275
+ proto_id = proto_to_id.get(key.proto)
276
+ selector_id = selector_to_id.get(key.selector)
277
+ if proto_id is None or selector_id is None:
278
+ continue
279
+ matrix[proto_id][selector_id] = int(meta_index)
280
+ return tuple(tuple(row) for row in matrix)
281
+
282
+
283
+ def _pack_kernel_plan(
284
+ self,
285
+ plan: KernelPlan,
286
+ *,
287
+ opviews: tuple[OpView | None, ...] = (),
288
+ rest_exact_route_to_program: Mapping[tuple[str, str], int] | None = None,
289
+ ) -> PackedKernel:
290
+ from .core import DEFAULT_PHASE_ORDER
291
+
292
+ selector_names = tuple(sorted({key.selector for key in plan.opkey_to_meta.keys()}))
293
+ proto_names = tuple(sorted(plan.proto_indices.keys()))
294
+ op_names = tuple(
295
+ f"{getattr(meta.model, '__name__', None) or getattr(meta.model, 'model_ref', None) or str(meta.model)}.{meta.alias}"
296
+ for meta in plan.opmeta
297
+ )
298
+
299
+ proto_to_id = {name: idx for idx, name in enumerate(proto_names)}
300
+ selector_to_id = {name: idx for idx, name in enumerate(selector_names)}
301
+ op_to_id = {name: idx for idx, name in enumerate(op_names)}
302
+
303
+ step_index: dict[int, int] = {}
304
+ step_table: list[StepFn] = []
305
+ step_labels: list[str] = []
306
+ effect_ids: list[int] = []
307
+ effect_payloads: list[tuple[int, ...]] = []
308
+ step_async_flags: list[bool] = []
309
+
310
+ segment_offsets: list[int] = []
311
+ segment_lengths: list[int] = []
312
+ segment_step_ids: list[int] = []
313
+ segment_phases: list[str] = []
314
+ segment_executor_kinds: list[str] = []
315
+
316
+ op_segment_offsets: list[int] = []
317
+ op_segment_lengths: list[int] = []
318
+ op_to_segment_ids: list[int] = []
319
+
320
+ for program_id, _meta in enumerate(plan.opmeta):
321
+ chains = dict(plan.phase_chains.get(program_id, {}) or {})
322
+ op_segment_offsets.append(len(op_to_segment_ids))
323
+ seg_count = 0
324
+ for phase in DEFAULT_PHASE_ORDER:
325
+ steps = _dedupe_consecutive_steps(list(chains.get(phase, ()) or ()))
326
+ if not steps:
327
+ continue
328
+
329
+ seg_id = len(segment_offsets)
330
+ segment_offsets.append(len(segment_step_ids))
331
+ segment_lengths.append(len(steps))
332
+ segment_phases.append(phase)
333
+
334
+ kinds = {_classify_step_lowering(step, phase) for step in steps}
335
+ if len(kinds) == 1 and LOWER_KIND_SYNC_EXTRACTABLE in kinds:
336
+ segment_executor_kinds.append(LOWER_KIND_SYNC_EXTRACTABLE)
337
+ elif LOWER_KIND_ASYNC_DIRECT in kinds:
338
+ segment_executor_kinds.append(LOWER_KIND_ASYNC_DIRECT)
339
+ else:
340
+ segment_executor_kinds.append(LOWER_KIND_SPLIT_EXTRACTABLE)
341
+
342
+ for step in steps:
343
+ key = id(step)
344
+ step_id = step_index.get(key)
345
+ if step_id is None:
346
+ step_id = len(step_table)
347
+ step_index[key] = step_id
348
+ step_table.append(step)
349
+ step_labels.append(_label_step(step, phase))
350
+ effect_id, payload = _effect_descriptor_for_step(step)
351
+ effect_ids.append(effect_id)
352
+ effect_payloads.append(payload)
353
+ is_async = bool(getattr(step, "_tigrbl_is_async", False))
354
+ if not is_async:
355
+ marker = getattr(step, "__code__", None)
356
+ is_async = bool(getattr(marker, "co_flags", 0) & 0x80)
357
+ step_async_flags.append(is_async)
358
+ segment_step_ids.append(step_id)
359
+
360
+ op_to_segment_ids.append(seg_id)
361
+ seg_count += 1
362
+ op_segment_lengths.append(seg_count)
363
+
364
+ route_to_program = self._build_route_matrix(
365
+ proto_names=proto_names,
366
+ selector_names=selector_names,
367
+ opkey_to_meta=plan.opkey_to_meta,
368
+ )
369
+
370
+ hot_op_plans: list[HotOpPlan] = []
371
+ for program_id, _meta in enumerate(plan.opmeta):
372
+ meta = plan.opmeta[program_id]
373
+ seg_offset = op_segment_offsets[program_id]
374
+ seg_length = op_segment_lengths[program_id]
375
+ by_phase: dict[str, list[int]] = {}
376
+ ordered_segments: list[int] = []
377
+ remaining_segments: list[int] = []
378
+ seen_segment_ids: set[int] = set()
379
+ error_segment_ids: dict[str, list[int]] = {}
380
+ fusible_sync_segment_ids: list[int] = []
381
+ nonfusible_segment_ids: list[int] = []
382
+
383
+ for idx in range(seg_offset, seg_offset + seg_length):
384
+ seg_id = op_to_segment_ids[idx]
385
+ phase = str(segment_phases[seg_id])
386
+ if phase.startswith("ON_"):
387
+ error_segment_ids.setdefault(phase, []).append(seg_id)
388
+ continue
389
+ by_phase.setdefault(phase, []).append(seg_id)
390
+
391
+ for phase in _RUNTIME_EXECUTION_ORDER:
392
+ for seg_id in by_phase.pop(phase, ()):
393
+ if seg_id in seen_segment_ids:
394
+ continue
395
+ seen_segment_ids.add(seg_id)
396
+ ordered_segments.append(seg_id)
397
+
398
+ for idx in range(seg_offset, seg_offset + seg_length):
399
+ seg_id = op_to_segment_ids[idx]
400
+ if seg_id in seen_segment_ids:
401
+ continue
402
+ phase = str(segment_phases[seg_id])
403
+ if phase.startswith("ON_"):
404
+ continue
405
+ seen_segment_ids.add(seg_id)
406
+ remaining_segments.append(seg_id)
407
+
408
+ for seg_id in (*ordered_segments, *remaining_segments):
409
+ if segment_executor_kinds[seg_id] == LOWER_KIND_SYNC_EXTRACTABLE:
410
+ fusible_sync_segment_ids.append(seg_id)
411
+ continue
412
+ nonfusible_segment_ids.append(seg_id)
413
+
414
+ hot_op_plans.append(
415
+ HotOpPlan(
416
+ program_id=program_id,
417
+ model=getattr(meta, "model", None),
418
+ alias=str(getattr(meta, "alias", "") or ""),
419
+ target=str(getattr(meta, "target", "") or ""),
420
+ opview=opviews[program_id] if program_id < len(opviews) else None,
421
+ ordered_segment_ids=tuple(ordered_segments),
422
+ remaining_segment_ids=tuple(remaining_segments),
423
+ error_segment_ids={
424
+ phase: tuple(seg_ids)
425
+ for phase, seg_ids in error_segment_ids.items()
426
+ },
427
+ fusible_sync_segment_ids=tuple(fusible_sync_segment_ids),
428
+ nonfusible_segment_ids=tuple(nonfusible_segment_ids),
429
+ db_acquire_hint=(
430
+ "model_get_db"
431
+ if callable(
432
+ getattr(getattr(meta, "model", None), "__tigrbl_get_db__", None)
433
+ )
434
+ else "resolver"
435
+ ),
436
+ )
437
+ )
438
+
439
+ packed = PackedKernel(
440
+ proto_names=proto_names,
441
+ selector_names=selector_names,
442
+ op_names=op_names,
443
+ proto_to_id=proto_to_id,
444
+ selector_to_id=selector_to_id,
445
+ op_to_id=op_to_id,
446
+ route_to_program=route_to_program,
447
+ segment_offsets=tuple(segment_offsets),
448
+ segment_lengths=tuple(segment_lengths),
449
+ segment_step_ids=tuple(segment_step_ids),
450
+ segment_phases=tuple(segment_phases),
451
+ segment_executor_kinds=tuple(segment_executor_kinds),
452
+ op_segment_offsets=tuple(op_segment_offsets),
453
+ op_segment_lengths=tuple(op_segment_lengths),
454
+ op_to_segment_ids=tuple(op_to_segment_ids),
455
+ step_table=tuple(step_table),
456
+ step_labels=tuple(step_labels),
457
+ numba_effect_ids=tuple(effect_ids),
458
+ numba_effect_payloads=tuple(effect_payloads),
459
+ step_async_flags=tuple(step_async_flags),
460
+ rest_exact_route_to_program=dict(rest_exact_route_to_program or {}),
461
+ hot_op_plans=tuple(hot_op_plans),
462
+ executor_kind="python",
463
+ )
464
+ build_python_executor = getattr(self, "_build_python_packed_executor", None)
465
+ build_numba_executor = getattr(self, "_build_numba_packed_executor", None)
466
+ return replace(
467
+ packed,
468
+ executor=build_python_executor(packed)
469
+ if callable(build_python_executor)
470
+ else None,
471
+ numba_executor=build_numba_executor(packed)
472
+ if callable(build_numba_executor)
473
+ else None,
474
+ )
475
+
476
+
477
+ def _compile_bootstrap_plan(self, app: Any) -> Dict[str, List[StepFn]]:
478
+ return self._build_ingress(app)