@milaboratories/pl-middle-layer 1.51.0 → 1.52.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/cfg_render/executor.cjs +2 -2
  2. package/dist/cfg_render/executor.js +1 -1
  3. package/dist/debug/index.cjs +4 -1
  4. package/dist/debug/index.cjs.map +1 -1
  5. package/dist/debug/index.js +4 -1
  6. package/dist/debug/index.js.map +1 -1
  7. package/dist/index.cjs +14 -0
  8. package/dist/index.d.ts +3 -2
  9. package/dist/index.js +2 -1
  10. package/dist/js_render/computable_context.cjs +12 -2
  11. package/dist/js_render/computable_context.cjs.map +1 -1
  12. package/dist/js_render/computable_context.js +12 -2
  13. package/dist/js_render/computable_context.js.map +1 -1
  14. package/dist/js_render/context.cjs +36 -3
  15. package/dist/js_render/context.cjs.map +1 -1
  16. package/dist/js_render/context.js +36 -3
  17. package/dist/js_render/context.js.map +1 -1
  18. package/dist/js_render/index.cjs +5 -1
  19. package/dist/js_render/index.cjs.map +1 -1
  20. package/dist/js_render/index.js +5 -1
  21. package/dist/js_render/index.js.map +1 -1
  22. package/dist/middle_layer/project.cjs +8 -5
  23. package/dist/middle_layer/project.cjs.map +1 -1
  24. package/dist/middle_layer/project.js +8 -5
  25. package/dist/middle_layer/project.js.map +1 -1
  26. package/dist/middle_layer/project_overview.cjs +28 -22
  27. package/dist/middle_layer/project_overview.cjs.map +1 -1
  28. package/dist/middle_layer/project_overview.js +28 -22
  29. package/dist/middle_layer/project_overview.js.map +1 -1
  30. package/dist/model/block_pack_spec.cjs.map +1 -1
  31. package/dist/model/block_pack_spec.d.ts +2 -2
  32. package/dist/model/block_pack_spec.js.map +1 -1
  33. package/dist/model/template_spec.d.ts +7 -2
  34. package/dist/mutator/block-pack/block_pack.cjs +20 -1
  35. package/dist/mutator/block-pack/block_pack.cjs.map +1 -1
  36. package/dist/mutator/block-pack/block_pack.d.ts +4 -0
  37. package/dist/mutator/block-pack/block_pack.js +19 -1
  38. package/dist/mutator/block-pack/block_pack.js.map +1 -1
  39. package/dist/mutator/template/template_cache.cjs +515 -0
  40. package/dist/mutator/template/template_cache.cjs.map +1 -0
  41. package/dist/mutator/template/template_cache.d.ts +78 -0
  42. package/dist/mutator/template/template_cache.js +502 -0
  43. package/dist/mutator/template/template_cache.js.map +1 -0
  44. package/dist/mutator/template/template_loading.cjs +3 -1
  45. package/dist/mutator/template/template_loading.cjs.map +1 -1
  46. package/dist/mutator/template/template_loading.js +3 -1
  47. package/dist/mutator/template/template_loading.js.map +1 -1
  48. package/package.json +11 -11
  49. package/src/debug/index.ts +6 -0
  50. package/src/index.ts +1 -0
  51. package/src/js_render/computable_context.ts +13 -2
  52. package/src/js_render/context.ts +58 -5
  53. package/src/js_render/index.ts +8 -1
  54. package/src/middle_layer/project.ts +12 -8
  55. package/src/middle_layer/project_overview.ts +6 -0
  56. package/src/model/block_pack_spec.ts +2 -2
  57. package/src/model/template_spec.ts +11 -1
  58. package/src/mutator/block-pack/block_pack.ts +35 -1
  59. package/src/mutator/template/template_cache.test.ts +373 -0
  60. package/src/mutator/template/template_cache.ts +763 -0
  61. package/src/mutator/template/template_loading.ts +3 -0
@@ -0,0 +1,763 @@
1
+ import { createHash } from "node:crypto";
2
+ import type {
3
+ AnyResourceRef,
4
+ PlClient,
5
+ PlTransaction,
6
+ ResourceId,
7
+ ResourceRef,
8
+ } from "@milaboratories/pl-client";
9
+ import {
10
+ ensureResourceIdNotNull,
11
+ field,
12
+ resourceType,
13
+ toGlobalResourceId,
14
+ } from "@milaboratories/pl-client";
15
+ import {
16
+ parseTemplate,
17
+ PlTemplateLibV1,
18
+ PlTemplateOverrideV1,
19
+ PlTemplateSoftwareV1,
20
+ PlTemplateV1,
21
+ } from "@milaboratories/pl-model-backend";
22
+ import type {
23
+ CompiledTemplateV3,
24
+ TemplateData,
25
+ TemplateDataV3,
26
+ TemplateLibData,
27
+ TemplateLibDataV3,
28
+ TemplateSoftwareData,
29
+ TemplateSoftwareDataV3,
30
+ } from "@milaboratories/pl-model-backend";
31
+ import { notEmpty } from "@milaboratories/ts-helpers";
32
+ import type { BlockPackSpecPrepared } from "../../model";
33
+ import type { TemplateSpecPrepared } from "../../model/template_spec";
34
+ import { getDebugFlags } from "../../debug";
35
+
36
+ export const TemplateCacheType = resourceType("TemplateCache", "1");
37
+
38
+ export const TemplateCacheFieldName = "__templateCache";
39
+ const BATCH_SIZE = 50;
40
+ /** @internal exported for testing */
41
+ export const GC_ACCESS_THRESHOLD = 30;
42
+ /** @internal exported for testing */
43
+ export const GC_MAX_ENTRIES = 3000;
44
+ /** @internal exported for testing */
45
+ export const ACCESS_COUNT_KEY = "_accessCount";
46
+ /** @internal exported for testing */
47
+ export const ACCESS_KEY_PREFIX = "access_";
48
+
49
+ // ─── Stats ───────────────────────────────────────────────────────────────────
50
+
51
+ export type TemplateCacheStat = {
52
+ totalMs: number;
53
+ flattenMs: number;
54
+ cacheInitMs: number;
55
+ materializeMs: number;
56
+ totalNodes: number;
57
+ cacheHits: number;
58
+ cacheMisses: number;
59
+ batchCount: number;
60
+ happyPath: boolean;
61
+ gcTriggered: boolean;
62
+ retries: number;
63
+ templateFormat: string;
64
+ };
65
+
66
+ function initialStat(): TemplateCacheStat {
67
+ return {
68
+ totalMs: 0,
69
+ flattenMs: 0,
70
+ cacheInitMs: 0,
71
+ materializeMs: 0,
72
+ totalNodes: 0,
73
+ cacheHits: 0,
74
+ cacheMisses: 0,
75
+ batchCount: 0,
76
+ happyPath: false,
77
+ gcTriggered: false,
78
+ retries: 0,
79
+ templateFormat: "",
80
+ };
81
+ }
82
+
83
+ // ─── Tree node abstraction ───────────────────────────────────────────────────
84
+
85
+ interface CacheableNode {
86
+ /** SHA-256 content hash (includes all descendant content) */
87
+ hash: string;
88
+ /** Creates this node's resource in a transaction.
89
+ * childRefs maps child hash → already-resolved ResourceRef or ResourceId */
90
+ create: (tx: PlTransaction, childRefs: ReadonlyMap<string, AnyResourceRef>) => ResourceRef;
91
+ /** Hashes of direct child nodes this node depends on */
92
+ childHashes: string[];
93
+ }
94
+
95
+ // ─── Hash computation helpers ────────────────────────────────────────────────
96
+
97
+ function getSourceCode(name: string, sources: Record<string, string>, sourceHash: string): string {
98
+ return notEmpty(
99
+ sources[sourceHash],
100
+ `trying to get "${name}" source: sources map doesn't contain source hash ${sourceHash}`,
101
+ );
102
+ }
103
+
104
+ /**
105
+ * Bottom-up hash composition: each node hashes its OWN content + child hash STRINGS.
106
+ * This means each unique node is hashed exactly once → O(n) instead of O(n * depth).
107
+ */
108
+
109
+ // V2 leaf hashes (libs, software — no children)
110
+
111
+ function hashLibV2(lib: TemplateLibData): string {
112
+ return createHash("sha256")
113
+ .update(PlTemplateLibV1.type.name)
114
+ .update(PlTemplateLibV1.type.version)
115
+ .update(lib.name)
116
+ .update(lib.version)
117
+ .update(lib.src)
118
+ .digest("hex");
119
+ }
120
+
121
+ function hashSoftwareV2(sw: TemplateSoftwareData): string {
122
+ return createHash("sha256")
123
+ .update(PlTemplateSoftwareV1.type.name)
124
+ .update(PlTemplateSoftwareV1.type.version)
125
+ .update(sw.name)
126
+ .update(sw.version)
127
+ .update(sw.src)
128
+ .digest("hex");
129
+ }
130
+
131
+ // V3 leaf hashes — use sourceHash directly instead of resolving source content
132
+
133
+ function hashLibV3(lib: TemplateLibDataV3): string {
134
+ return createHash("sha256")
135
+ .update(PlTemplateLibV1.type.name)
136
+ .update(PlTemplateLibV1.type.version)
137
+ .update(lib.name)
138
+ .update(lib.version)
139
+ .update(lib.sourceHash)
140
+ .digest("hex");
141
+ }
142
+
143
+ function hashSoftwareV3(sw: TemplateSoftwareDataV3): string {
144
+ return createHash("sha256")
145
+ .update(PlTemplateSoftwareV1.type.name)
146
+ .update(PlTemplateSoftwareV1.type.version)
147
+ .update(sw.name)
148
+ .update(sw.version)
149
+ .update(sw.sourceHash)
150
+ .digest("hex");
151
+ }
152
+
153
+ // ─── Tree flattening ─────────────────────────────────────────────────────────
154
+
155
+ function flattenV2Tree(data: TemplateData): CacheableNode[] {
156
+ const nodes: CacheableNode[] = [];
157
+ const seen = new Set<string>();
158
+
159
+ function processLib(lib: TemplateLibData): string {
160
+ const hash = hashLibV2(lib);
161
+ if (!seen.has(hash)) {
162
+ seen.add(hash);
163
+ nodes.push({
164
+ hash,
165
+ create: (tx) =>
166
+ tx.createValue(
167
+ PlTemplateLibV1.type,
168
+ JSON.stringify(PlTemplateLibV1.fromV2Data(lib).data),
169
+ ),
170
+ childHashes: [],
171
+ });
172
+ }
173
+ return hash;
174
+ }
175
+
176
+ function processSoftware(sw: TemplateSoftwareData): string {
177
+ const hash = hashSoftwareV2(sw);
178
+ if (!seen.has(hash)) {
179
+ seen.add(hash);
180
+ nodes.push({
181
+ hash,
182
+ create: (tx) => {
183
+ const swData = PlTemplateSoftwareV1.fromV2Data(sw);
184
+ const ref = tx.createStruct(PlTemplateSoftwareV1.type, swData.data);
185
+ tx.setKValue(ref, PlTemplateSoftwareV1.metaNameKey, JSON.stringify(swData.name));
186
+ tx.lock(ref);
187
+ return ref;
188
+ },
189
+ childHashes: [],
190
+ });
191
+ }
192
+ return hash;
193
+ }
194
+
195
+ function processTemplate(tpl: TemplateData): string {
196
+ // Process children first (bottom-up) — their hashes are computed before ours
197
+ const childHashes: string[] = [];
198
+ const children: { fieldName: string; hash: string }[] = [];
199
+
200
+ for (const [libId, lib] of Object.entries(tpl.libs ?? {})) {
201
+ const h = processLib(lib);
202
+ childHashes.push(h);
203
+ children.push({ fieldName: `${PlTemplateV1.libPrefix}/${libId}`, hash: h });
204
+ }
205
+ for (const [swId, sw] of Object.entries(tpl.software ?? {})) {
206
+ const h = processSoftware(sw);
207
+ childHashes.push(h);
208
+ children.push({ fieldName: `${PlTemplateV1.softPrefix}/${swId}`, hash: h });
209
+ }
210
+ for (const [swId, sw] of Object.entries(tpl.assets ?? {})) {
211
+ const h = processSoftware(sw);
212
+ childHashes.push(h);
213
+ children.push({ fieldName: `${PlTemplateV1.softPrefix}/${swId}`, hash: h });
214
+ }
215
+ for (const [tplId, sub] of Object.entries(tpl.templates ?? {})) {
216
+ const h = processTemplate(sub);
217
+ childHashes.push(h);
218
+ children.push({ fieldName: `${PlTemplateV1.tplPrefix}/${tplId}`, hash: h });
219
+ }
220
+
221
+ // Compose hash from own content + child hash strings (NOT child content)
222
+ const h = createHash("sha256")
223
+ .update(PlTemplateV1.type.name)
224
+ .update(PlTemplateV1.type.version)
225
+ .update(tpl.hashOverride ?? "no-override")
226
+ .update(tpl.name)
227
+ .update(tpl.version)
228
+ .update(tpl.src);
229
+ for (const child of children) {
230
+ h.update("child:" + child.fieldName + ":" + child.hash);
231
+ }
232
+ const hash = h.digest("hex");
233
+
234
+ if (seen.has(hash)) return hash;
235
+ seen.add(hash);
236
+ nodes.push({
237
+ hash,
238
+ create: (tx, childRefs) => {
239
+ const tplRef = tx.createStruct(
240
+ PlTemplateV1.type,
241
+ JSON.stringify(PlTemplateV1.fromV2Data(tpl).data),
242
+ );
243
+ for (const child of children) {
244
+ const fld = field(tplRef, child.fieldName);
245
+ tx.createField(fld, "Input");
246
+ tx.setField(fld, notEmpty(childRefs.get(child.hash), `missing child ref ${child.hash}`));
247
+ }
248
+ tx.lock(tplRef);
249
+
250
+ if (!tpl.hashOverride) return tplRef;
251
+
252
+ const overrideRef = tx.createStruct(
253
+ PlTemplateOverrideV1.type,
254
+ JSON.stringify(PlTemplateOverrideV1.fromV2Data(tpl)),
255
+ );
256
+ const overrideFld = PlTemplateOverrideV1.tplField(overrideRef);
257
+ tx.createField(overrideFld, "Service");
258
+ tx.setField(overrideFld, tplRef);
259
+ tx.lock(overrideRef);
260
+ return overrideRef;
261
+ },
262
+ childHashes,
263
+ });
264
+
265
+ return hash;
266
+ }
267
+
268
+ processTemplate(data);
269
+ return nodes;
270
+ }
271
+
272
+ function flattenV3Tree(data: CompiledTemplateV3): CacheableNode[] {
273
+ const nodes: CacheableNode[] = [];
274
+ const seen = new Set<string>();
275
+ const sources = data.hashToSource;
276
+
277
+ function processLib(lib: TemplateLibDataV3): string {
278
+ const hash = hashLibV3(lib);
279
+ if (!seen.has(hash)) {
280
+ seen.add(hash);
281
+ nodes.push({
282
+ hash,
283
+ create: (tx) =>
284
+ tx.createValue(
285
+ PlTemplateLibV1.type,
286
+ JSON.stringify(
287
+ PlTemplateLibV1.fromV3Data(lib, getSourceCode(lib.name, sources, lib.sourceHash))
288
+ .data,
289
+ ),
290
+ ),
291
+ childHashes: [],
292
+ });
293
+ }
294
+ return hash;
295
+ }
296
+
297
+ function processSoftware(sw: TemplateSoftwareDataV3): string {
298
+ const hash = hashSoftwareV3(sw);
299
+ if (!seen.has(hash)) {
300
+ seen.add(hash);
301
+ nodes.push({
302
+ hash,
303
+ create: (tx) => {
304
+ const swData = PlTemplateSoftwareV1.fromV3Data(
305
+ sw,
306
+ getSourceCode(sw.name, sources, sw.sourceHash),
307
+ );
308
+ const ref = tx.createStruct(PlTemplateSoftwareV1.type, swData.data);
309
+ tx.setKValue(ref, PlTemplateSoftwareV1.metaNameKey, JSON.stringify(swData.name));
310
+ tx.lock(ref);
311
+ return ref;
312
+ },
313
+ childHashes: [],
314
+ });
315
+ }
316
+ return hash;
317
+ }
318
+
319
+ function processTemplate(tpl: TemplateDataV3): string {
320
+ // Process children first (bottom-up)
321
+ const childHashes: string[] = [];
322
+ const children: { fieldName: string; hash: string }[] = [];
323
+
324
+ for (const [libId, lib] of Object.entries(tpl.libs ?? {})) {
325
+ const h = processLib(lib);
326
+ childHashes.push(h);
327
+ children.push({ fieldName: `${PlTemplateV1.libPrefix}/${libId}`, hash: h });
328
+ }
329
+ for (const [swId, sw] of Object.entries(tpl.software ?? {})) {
330
+ const h = processSoftware(sw);
331
+ childHashes.push(h);
332
+ children.push({ fieldName: `${PlTemplateV1.softPrefix}/${swId}`, hash: h });
333
+ }
334
+ for (const [swId, sw] of Object.entries(tpl.assets ?? {})) {
335
+ const h = processSoftware(sw);
336
+ childHashes.push(h);
337
+ children.push({ fieldName: `${PlTemplateV1.softPrefix}/${swId}`, hash: h });
338
+ }
339
+ for (const [tplId, sub] of Object.entries(tpl.templates ?? {})) {
340
+ const h = processTemplate(sub);
341
+ childHashes.push(h);
342
+ children.push({ fieldName: `${PlTemplateV1.tplPrefix}/${tplId}`, hash: h });
343
+ }
344
+
345
+ // Compose hash from own content + child hash strings (NOT child content).
346
+ // Uses sourceHash directly — it already uniquely identifies the source.
347
+ const h = createHash("sha256")
348
+ .update(PlTemplateV1.type.name)
349
+ .update(PlTemplateV1.type.version)
350
+ .update(tpl.hashOverride ?? "no-override")
351
+ .update(tpl.name)
352
+ .update(tpl.version)
353
+ .update(tpl.sourceHash);
354
+ for (const child of children) {
355
+ h.update("child:" + child.fieldName + ":" + child.hash);
356
+ }
357
+ const hash = h.digest("hex");
358
+
359
+ if (seen.has(hash)) return hash;
360
+ seen.add(hash);
361
+ nodes.push({
362
+ hash,
363
+ create: (tx, childRefs) => {
364
+ const sourceCode = getSourceCode(tpl.name, sources, tpl.sourceHash);
365
+ const tplRef = tx.createStruct(
366
+ PlTemplateV1.type,
367
+ JSON.stringify(PlTemplateV1.fromV3Data(tpl, sourceCode).data),
368
+ );
369
+ for (const child of children) {
370
+ const fld = field(tplRef, child.fieldName);
371
+ tx.createField(fld, "Input");
372
+ tx.setField(fld, notEmpty(childRefs.get(child.hash), `missing child ref ${child.hash}`));
373
+ }
374
+ tx.lock(tplRef);
375
+
376
+ if (!tpl.hashOverride) return tplRef;
377
+
378
+ const overrideRef = tx.createStruct(
379
+ PlTemplateOverrideV1.type,
380
+ JSON.stringify(PlTemplateOverrideV1.fromV3Data(tpl)),
381
+ );
382
+ const overrideFld = PlTemplateOverrideV1.tplField(overrideRef);
383
+ tx.createField(overrideFld, "Service");
384
+ tx.setField(overrideFld, tplRef);
385
+ tx.lock(overrideRef);
386
+ return overrideRef;
387
+ },
388
+ childHashes,
389
+ });
390
+
391
+ return hash;
392
+ }
393
+
394
+ processTemplate(data.template);
395
+ return nodes;
396
+ }
397
+
398
+ /** Flatten template tree into a topologically ordered list of cacheable nodes (leaves first). */
399
+ export function flattenTemplateTree(data: TemplateData | CompiledTemplateV3): CacheableNode[] {
400
+ if (data.type === "pl.tengo-template.v2") {
401
+ return flattenV2Tree(data);
402
+ } else {
403
+ return flattenV3Tree(data);
404
+ }
405
+ }
406
+
407
+ // ─── Cache operations ────────────────────────────────────────────────────────
408
+
409
+ /** In-memory cache for the TemplateCache ResourceId per PlClient instance. */
410
+ const cacheRidMap = new WeakMap<PlClient, ResourceId>();
411
+
412
+ /** Clear the in-memory cacheRid entry (call on errors referencing the cache resource). */
413
+ export function invalidateTemplateCacheId(pl: PlClient): void {
414
+ cacheRidMap.delete(pl);
415
+ }
416
+
417
+ /** Find or create the TemplateCache/1 resource on user root. */
418
+ export async function getOrCreateTemplateCache(pl: PlClient): Promise<ResourceId> {
419
+ // Check in-memory cache first (0ms after first call)
420
+ const cached = cacheRidMap.get(pl);
421
+ if (cached !== undefined) return cached;
422
+
423
+ // Try read-only check
424
+ const existing = await pl.withReadTx("templateCache:check", async (tx) => {
425
+ const fd = await tx.getFieldIfExists(field(pl.clientRoot, TemplateCacheFieldName));
426
+ return fd ? ensureResourceIdNotNull(fd.value) : undefined;
427
+ });
428
+ if (existing) {
429
+ cacheRidMap.set(pl, existing);
430
+ return existing;
431
+ }
432
+
433
+ const result = await pl.withWriteTx("templateCache:init", async (tx) => {
434
+ // Double-check inside write tx (another instance may have created it)
435
+ const fd = await tx.getFieldIfExists(field(pl.clientRoot, TemplateCacheFieldName));
436
+ if (fd) return ensureResourceIdNotNull(fd.value);
437
+
438
+ const cache = tx.createStruct(TemplateCacheType);
439
+ tx.createField(field(pl.clientRoot, TemplateCacheFieldName), "Dynamic", cache);
440
+ tx.lock(cache);
441
+ await tx.commit();
442
+ return await cache.globalId;
443
+ });
444
+ cacheRidMap.set(pl, result);
445
+ return result;
446
+ }
447
+
448
+ /** Remove the template cache from user root. */
449
+ export async function dropTemplateCache(pl: PlClient): Promise<void> {
450
+ await pl.withWriteTx("templateCache:drop", async (tx) => {
451
+ const cacheField = field(pl.clientRoot, TemplateCacheFieldName);
452
+ const fd = await tx.getFieldIfExists(cacheField);
453
+ if (fd) {
454
+ tx.removeField(cacheField);
455
+ await tx.commit();
456
+ }
457
+ });
458
+ invalidateTemplateCacheId(pl);
459
+ }
460
+
461
+ // ─── GC ──────────────────────────────────────────────────────────────────────
462
+
463
+ /**
464
+ * Run count-based garbage collection on the template cache.
465
+ * Evicts least-recently-used entries when the cache exceeds maxEntries.
466
+ * Always resets the access counter to 0.
467
+ *
468
+ * @internal exported for testing (maxEntries parameter allows low thresholds in tests)
469
+ * @returns true if entries were evicted
470
+ */
471
+ export async function runGc(
472
+ pl: PlClient,
473
+ cacheRid: ResourceId,
474
+ maxEntries: number = GC_MAX_ENTRIES,
475
+ ): Promise<boolean> {
476
+ return await pl.withWriteTx("templateCache:gc", async (tx) => {
477
+ const kvs = await tx.listKeyValuesString(cacheRid);
478
+ const entries: { hash: string; timestamp: number }[] = [];
479
+ for (const { key, value } of kvs) {
480
+ if (!key.startsWith(ACCESS_KEY_PREFIX)) continue;
481
+ entries.push({
482
+ hash: key.slice(ACCESS_KEY_PREFIX.length),
483
+ timestamp: parseInt(value, 10),
484
+ });
485
+ }
486
+
487
+ // Always reset counter
488
+ tx.setKValue(cacheRid, ACCESS_COUNT_KEY, "0");
489
+
490
+ if (entries.length <= maxEntries) {
491
+ await tx.commit();
492
+ return false;
493
+ }
494
+
495
+ // Sort oldest first, evict until under limit
496
+ entries.sort((a, b) => a.timestamp - b.timestamp);
497
+ const toEvict = entries.length - maxEntries;
498
+ for (let i = 0; i < toEvict; i++) {
499
+ tx.removeField(field(cacheRid, entries[i].hash));
500
+ tx.deleteKValue(cacheRid, ACCESS_KEY_PREFIX + entries[i].hash);
501
+ }
502
+
503
+ await tx.commit();
504
+ return true;
505
+ });
506
+ }
507
+
508
+ // ─── Batched materialization ─────────────────────────────────────────────────
509
+
510
+ /** Create a batch of cache nodes in the current transaction. */
511
+ function createBatchNodes(
512
+ tx: PlTransaction,
513
+ cacheRid: ResourceId,
514
+ batch: CacheableNode[],
515
+ resolvedIds: ReadonlyMap<string, ResourceId>,
516
+ newRefs: Map<string, ResourceRef>,
517
+ now: string,
518
+ ): void {
519
+ for (const node of batch) {
520
+ const childRefs = new Map<string, AnyResourceRef>();
521
+ for (const ch of node.childHashes) {
522
+ const resolved = resolvedIds.get(ch) ?? newRefs.get(ch);
523
+ if (resolved === undefined) {
524
+ throw new Error(`BUG: child ${ch} not resolved`);
525
+ }
526
+ childRefs.set(ch, resolved);
527
+ }
528
+ const ref = node.create(tx, childRefs);
529
+ newRefs.set(node.hash, ref);
530
+ tx.createField(field(cacheRid, node.hash), "Dynamic", ref);
531
+ tx.setKValue(cacheRid, ACCESS_KEY_PREFIX + node.hash, now);
532
+ }
533
+ }
534
+
535
+ /**
536
+ * Materialize a template tree via the cache using "probe all + batched creation".
537
+ *
538
+ * Phase 1 (single write tx):
539
+ * - Check existence of ALL hashes in one roundtrip
540
+ * - Happy path: if root cached, update access tracking and return
541
+ * - Otherwise: fetch ResourceIds for all cache hits, create first batch of missing nodes
542
+ *
543
+ * Phase 2..N (one write tx per batch):
544
+ * - Create remaining missing nodes in BATCH_SIZE chunks
545
+ *
546
+ * @returns root ResourceId and current access count (for GC decision)
547
+ */
548
+ async function materialize(
549
+ pl: PlClient,
550
+ cacheRid: ResourceId,
551
+ rootHash: string,
552
+ nodes: CacheableNode[],
553
+ stat: TemplateCacheStat,
554
+ ): Promise<{ rootId: ResourceId; accessCount: number }> {
555
+ const allHashes = nodes.map((n) => n.hash);
556
+ const resolvedIds = new Map<string, ResourceId>();
557
+
558
+ // Phase 1: probe all + first batch
559
+ const phase1 = await pl.withWriteTx("templateCache:materialize", async (tx) => {
560
+ // 1 roundtrip: check all hashes + read access count
561
+ const [exists, countStr] = await Promise.all([
562
+ Promise.all(allHashes.map((h) => tx.fieldExists(field(cacheRid, h)))),
563
+ tx.getKValueStringIfExists(cacheRid, ACCESS_COUNT_KEY),
564
+ ]);
565
+
566
+ const prevCount = countStr ? parseInt(countStr, 10) : 0;
567
+ const newCount = prevCount + 1;
568
+ const now = Date.now().toString();
569
+ const rootIdx = allHashes.length - 1;
570
+
571
+ // Happy path: root already cached
572
+ if (exists[rootIdx]) {
573
+ const rootFd = await tx.getField(field(cacheRid, rootHash));
574
+ const rootRid = ensureResourceIdNotNull(rootFd.value);
575
+ tx.setKValue(cacheRid, ACCESS_KEY_PREFIX + rootHash, now);
576
+ tx.setKValue(cacheRid, ACCESS_COUNT_KEY, newCount.toString());
577
+ await tx.commit();
578
+ stat.happyPath = true;
579
+ stat.cacheHits = stat.totalNodes;
580
+ stat.batchCount = 1;
581
+ return { done: true as const, rootId: rootRid, accessCount: newCount };
582
+ }
583
+
584
+ // Fetch ResourceIds for all cache hits (1 roundtrip)
585
+ const hitIndices: number[] = [];
586
+ for (let i = 0; i < allHashes.length; i++) {
587
+ if (exists[i]) hitIndices.push(i);
588
+ }
589
+
590
+ if (hitIndices.length > 0) {
591
+ const hitFields = await Promise.all(
592
+ hitIndices.map((i) => tx.getField(field(cacheRid, allHashes[i]))),
593
+ );
594
+ for (let j = 0; j < hitIndices.length; j++) {
595
+ resolvedIds.set(allHashes[hitIndices[j]], ensureResourceIdNotNull(hitFields[j].value));
596
+ }
597
+ }
598
+ stat.cacheHits = hitIndices.length;
599
+
600
+ // Missing nodes (topo order preserved from flatten)
601
+ const missing = nodes.filter((n) => !resolvedIds.has(n.hash));
602
+ stat.cacheMisses = missing.length;
603
+
604
+ // Create first batch of missing nodes
605
+ const firstBatch = missing.slice(0, BATCH_SIZE);
606
+ const newRefs = new Map<string, ResourceRef>();
607
+ createBatchNodes(tx, cacheRid, firstBatch, resolvedIds, newRefs, now);
608
+
609
+ // Update access tracking for cache hits
610
+ for (const i of hitIndices) {
611
+ tx.setKValue(cacheRid, ACCESS_KEY_PREFIX + allHashes[i], now);
612
+ }
613
+ tx.setKValue(cacheRid, ACCESS_COUNT_KEY, newCount.toString());
614
+
615
+ await tx.commit();
616
+
617
+ // Resolve new refs to global IDs (after commit)
618
+ for (const [hash, ref] of newRefs) {
619
+ resolvedIds.set(hash, await toGlobalResourceId(ref));
620
+ }
621
+
622
+ return {
623
+ done: false as const,
624
+ remaining: missing.slice(BATCH_SIZE),
625
+ accessCount: newCount,
626
+ };
627
+ });
628
+
629
+ if (phase1.done) {
630
+ return { rootId: phase1.rootId, accessCount: phase1.accessCount };
631
+ }
632
+
633
+ stat.batchCount = 1;
634
+
635
+ // Phase 2+: remaining batches
636
+ const { remaining } = phase1;
637
+ for (let i = 0; i < remaining.length; i += BATCH_SIZE) {
638
+ const batch = remaining.slice(i, i + BATCH_SIZE);
639
+ stat.batchCount++;
640
+
641
+ await pl.withWriteTx("templateCache:create", async (tx) => {
642
+ const newRefs = new Map<string, ResourceRef>();
643
+ const now = Date.now().toString();
644
+ createBatchNodes(tx, cacheRid, batch, resolvedIds, newRefs, now);
645
+ await tx.commit();
646
+
647
+ for (const [hash, ref] of newRefs) {
648
+ resolvedIds.set(hash, await toGlobalResourceId(ref));
649
+ }
650
+ });
651
+ }
652
+
653
+ const rootId = resolvedIds.get(rootHash);
654
+ if (!rootId) throw new Error("BUG: root hash not resolved after all batches");
655
+ return { rootId, accessCount: phase1.accessCount };
656
+ }
657
+
658
+ /**
659
+ * Materialize a template tree via the cache.
660
+ * Manages its own transactions internally — do NOT call inside an existing tx.
661
+ *
662
+ * @returns concrete ResourceId of the root template
663
+ */
664
+ export async function loadTemplateCached(
665
+ pl: PlClient,
666
+ spec: TemplateSpecPrepared,
667
+ options?: { cacheResourceId?: ResourceId },
668
+ ): Promise<ResourceId> {
669
+ const stat = initialStat();
670
+ const t0 = performance.now();
671
+
672
+ try {
673
+ // Parse to data if needed
674
+ let tplData: TemplateData | CompiledTemplateV3;
675
+ switch (spec.type) {
676
+ case "explicit":
677
+ tplData = parseTemplate(spec.content);
678
+ break;
679
+ case "prepared":
680
+ tplData = spec.data;
681
+ break;
682
+ case "cached":
683
+ return spec.resourceId;
684
+ case "from-registry":
685
+ throw new Error(
686
+ "loadTemplateCached does not support from-registry specs; use loadTemplate instead",
687
+ );
688
+ default: {
689
+ const _: never = spec;
690
+ throw new Error(`unexpected spec type: ${(_ as any).type}`);
691
+ }
692
+ }
693
+
694
+ stat.templateFormat = tplData.type;
695
+
696
+ // Flatten to ordered nodes
697
+ const tFlatten = performance.now();
698
+ const nodes = flattenTemplateTree(tplData);
699
+ stat.flattenMs = performance.now() - tFlatten;
700
+ if (nodes.length === 0) throw new Error("template tree produced no nodes");
701
+
702
+ stat.totalNodes = nodes.length;
703
+ const rootHash = nodes[nodes.length - 1].hash;
704
+
705
+ // Resolve or create cache resource
706
+ const tCacheInit = performance.now();
707
+ const cacheRid = options?.cacheResourceId ?? (await getOrCreateTemplateCache(pl));
708
+ stat.cacheInitMs = performance.now() - tCacheInit;
709
+
710
+ // Retry loop: if a write tx fails (e.g. concurrent GC invalidated a cached resource),
711
+ // restart materialization from scratch.
712
+ const MAX_RETRIES = 3;
713
+ for (let attempt = 0; attempt < MAX_RETRIES; attempt++) {
714
+ try {
715
+ const tMat = performance.now();
716
+ const result = await materialize(pl, cacheRid, rootHash, nodes, stat);
717
+ stat.materializeMs = performance.now() - tMat;
718
+ stat.retries = attempt;
719
+
720
+ // GC in separate tx if access count exceeded threshold
721
+ if (result.accessCount >= GC_ACCESS_THRESHOLD) {
722
+ await runGc(pl, cacheRid);
723
+ stat.gcTriggered = true;
724
+ }
725
+
726
+ return result.rootId;
727
+ } catch (e) {
728
+ if (attempt === MAX_RETRIES - 1) throw e;
729
+ // Retry from scratch — previous batch results may reference GC'd resources
730
+ stat.cacheHits = 0;
731
+ stat.cacheMisses = 0;
732
+ }
733
+ }
734
+
735
+ throw new Error("BUG: unreachable");
736
+ } finally {
737
+ stat.totalMs = performance.now() - t0;
738
+ if (getDebugFlags().logTemplateCacheStat) {
739
+ console.log(`[templateCache] ${JSON.stringify(stat)}`);
740
+ }
741
+ }
742
+ }
743
+
744
+ // ─── Caller helper ───────────────────────────────────────────────────────────
745
+
746
+ /**
747
+ * Pre-materialize a block pack's template via cache.
748
+ * Returns a copy of the spec with the template replaced by a cached reference.
749
+ * If the template is already cached, returns the spec unchanged.
750
+ */
751
+ export async function cacheBlockPackTemplate(
752
+ pl: PlClient,
753
+ spec: BlockPackSpecPrepared,
754
+ options?: { cacheResourceId?: ResourceId },
755
+ ): Promise<BlockPackSpecPrepared> {
756
+ if (spec.template.type === "cached") return spec;
757
+
758
+ const resourceId = await loadTemplateCached(pl, spec.template, options);
759
+ return {
760
+ ...spec,
761
+ template: { type: "cached", resourceId },
762
+ };
763
+ }