lynkr 8.0.1 → 9.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,492 @@
1
+ /**
2
+ * Distill — Core Algorithms for Intelligent Compression
3
+ *
4
+ * Ported from samuelfaj/distill (TypeScript CLI tool).
5
+ * Provides structural similarity detection, delta rendering,
6
+ * burst detection, text normalization, and bad distillation detection
7
+ * for LLM-optimized context compression.
8
+ *
9
+ * @module context/distill
10
+ */
11
+
12
+ const logger = require('../logger');
13
+
14
+ // ── Text Normalization ──────────────────────────────────────────────
15
+
16
+ /**
17
+ * Strip ANSI escape codes from text
18
+ */
19
+ function stripAnsi(text) {
20
+ if (!text) return '';
21
+ // eslint-disable-next-line no-control-regex
22
+ return text.replace(/\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])/g, '');
23
+ }
24
+
25
+ /**
26
+ * Normalize text for comparison:
27
+ * - Strip ANSI codes
28
+ * - Normalize line endings
29
+ * - Collapse whitespace runs
30
+ * - Trim
31
+ */
32
+ function normalizeText(text) {
33
+ if (!text) return '';
34
+ let result = stripAnsi(text);
35
+ result = result.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
36
+ result = result.replace(/[ \t]+/g, ' ');
37
+ result = result.replace(/\n{3,}/g, '\n\n');
38
+ return result.trim();
39
+ }
40
+
41
+ /**
42
+ * Extract a structural signature from text.
43
+ * Splits into lines, normalizes each, filters empties,
44
+ * returns a Set of unique line signatures for Jaccard comparison.
45
+ */
46
+ function extractSignature(text) {
47
+ const normalized = normalizeText(text);
48
+ const lines = normalized.split('\n').map(l => l.trim()).filter(Boolean);
49
+ return new Set(lines);
50
+ }
51
+
52
+ // ── Structural Similarity (Jaccard) ─────────────────────────────────
53
+
54
+ /**
55
+ * Compute Jaccard similarity between two Sets.
56
+ * Returns a value in [0, 1].
57
+ */
58
+ function jaccardSimilarity(setA, setB) {
59
+ if (setA.size === 0 && setB.size === 0) return 1;
60
+ if (setA.size === 0 || setB.size === 0) return 0;
61
+
62
+ let intersection = 0;
63
+ const smaller = setA.size <= setB.size ? setA : setB;
64
+ const larger = setA.size <= setB.size ? setB : setA;
65
+
66
+ for (const item of smaller) {
67
+ if (larger.has(item)) intersection++;
68
+ }
69
+
70
+ const union = setA.size + setB.size - intersection;
71
+ return union === 0 ? 0 : intersection / union;
72
+ }
73
+
74
+ /**
75
+ * Compute structural similarity between two text blocks.
76
+ * Uses normalized line signatures + Jaccard index.
77
+ *
78
+ * @param {string} a - First text
79
+ * @param {string} b - Second text
80
+ * @returns {number} Similarity score in [0, 1]
81
+ */
82
+ function structuralSimilarity(a, b) {
83
+ if (!a && !b) return 1;
84
+ if (!a || !b) return 0;
85
+
86
+ const sigA = extractSignature(a);
87
+ const sigB = extractSignature(b);
88
+
89
+ return jaccardSimilarity(sigA, sigB);
90
+ }
91
+
92
+ // ── Delta Rendering ─────────────────────────────────────────────────
93
+
94
+ /**
95
+ * Compute a delta between two text blocks.
96
+ * Returns only the lines that changed (added/removed).
97
+ * If similarity is above threshold, returns a compact diff summary.
98
+ *
99
+ * @param {string} previous - Previous text
100
+ * @param {string} current - Current text
101
+ * @param {Object} options
102
+ * @param {number} options.similarityThreshold - Min similarity to use delta (default 0.3)
103
+ * @returns {Object} { isDelta, similarity, result, addedCount, removedCount }
104
+ */
105
+ function deltaRender(previous, current, options = {}) {
106
+ const threshold = options.similarityThreshold ?? 0.3;
107
+
108
+ if (!previous) {
109
+ return { isDelta: false, similarity: 0, result: current, addedCount: 0, removedCount: 0 };
110
+ }
111
+
112
+ const similarity = structuralSimilarity(previous, current);
113
+
114
+ // If not similar enough, return full text (no point diffing unrelated content)
115
+ if (similarity < threshold) {
116
+ return { isDelta: false, similarity, result: current, addedCount: 0, removedCount: 0 };
117
+ }
118
+
119
+ const prevLines = normalizeText(previous).split('\n');
120
+ const currLines = normalizeText(current).split('\n');
121
+
122
+ const prevSet = new Set(prevLines);
123
+ const currSet = new Set(currLines);
124
+
125
+ const added = currLines.filter(l => !prevSet.has(l));
126
+ const removed = prevLines.filter(l => !currSet.has(l));
127
+
128
+ if (added.length === 0 && removed.length === 0) {
129
+ return {
130
+ isDelta: true,
131
+ similarity: 1,
132
+ result: '[No changes]',
133
+ addedCount: 0,
134
+ removedCount: 0,
135
+ };
136
+ }
137
+
138
+ const parts = [];
139
+ if (removed.length > 0) {
140
+ parts.push(`[Removed ${removed.length} lines]`);
141
+ }
142
+ if (added.length > 0) {
143
+ parts.push(`[Added ${added.length} lines]`);
144
+ parts.push(added.join('\n'));
145
+ }
146
+
147
+ return {
148
+ isDelta: true,
149
+ similarity,
150
+ result: parts.join('\n'),
151
+ addedCount: added.length,
152
+ removedCount: removed.length,
153
+ };
154
+ }
155
+
156
+ // ── Burst Detection ─────────────────────────────────────────────────
157
+
158
+ /**
159
+ * Detect output bursts — groups of data separated by idle periods.
160
+ * Used to determine if output is streaming (many small bursts)
161
+ * or batch (few large bursts).
162
+ *
163
+ * @param {Array<{timestamp: number, size: number}>} chunks - Output chunks with timing
164
+ * @param {number} idleThresholdMs - Idle time to split bursts (default 2000ms)
165
+ * @returns {Object} { burstCount, avgBurstSize, mode: 'streaming'|'batch' }
166
+ */
167
+ function detectBursts(chunks, idleThresholdMs = 2000) {
168
+ if (!chunks || chunks.length === 0) {
169
+ return { burstCount: 0, avgBurstSize: 0, mode: 'batch' };
170
+ }
171
+
172
+ if (chunks.length === 1) {
173
+ return { burstCount: 1, avgBurstSize: chunks[0].size, mode: 'batch' };
174
+ }
175
+
176
+ let burstCount = 1;
177
+ let currentBurstSize = chunks[0].size;
178
+ const burstSizes = [];
179
+
180
+ for (let i = 1; i < chunks.length; i++) {
181
+ const gap = chunks[i].timestamp - chunks[i - 1].timestamp;
182
+
183
+ if (gap > idleThresholdMs) {
184
+ burstSizes.push(currentBurstSize);
185
+ burstCount++;
186
+ currentBurstSize = chunks[i].size;
187
+ } else {
188
+ currentBurstSize += chunks[i].size;
189
+ }
190
+ }
191
+ burstSizes.push(currentBurstSize);
192
+
193
+ const avgBurstSize = burstSizes.reduce((a, b) => a + b, 0) / burstSizes.length;
194
+
195
+ return {
196
+ burstCount,
197
+ avgBurstSize: Math.round(avgBurstSize),
198
+ mode: burstCount > 5 ? 'streaming' : 'batch',
199
+ };
200
+ }
201
+
202
+ // ── Bad Distillation Detection ──────────────────────────────────────
203
+
204
+ /**
205
+ * Heuristics to detect when a compression/summary is worse than original.
206
+ * Checks for:
207
+ * - Summary is longer than original
208
+ * - Summary lost too much information (similarity too low)
209
+ * - Summary introduced hallucinated content (low overlap)
210
+ * - Summary is just a truncation
211
+ *
212
+ * @param {string} original - Original text
213
+ * @param {string} summary - Compressed/summarized text
214
+ * @param {Object} options
215
+ * @param {number} options.maxExpansionRatio - Max allowed summary/original ratio (default 1.1)
216
+ * @param {number} options.minRetention - Min similarity to consider useful (default 0.15)
217
+ * @returns {Object} { isBad, reasons: string[] }
218
+ */
219
+ function detectBadDistillation(original, summary, options = {}) {
220
+ const maxExpansionRatio = options.maxExpansionRatio ?? 1.1;
221
+ const minRetention = options.minRetention ?? 0.15;
222
+
223
+ const reasons = [];
224
+
225
+ if (!original || !summary) {
226
+ return { isBad: false, reasons };
227
+ }
228
+
229
+ const origLen = normalizeText(original).length;
230
+ const sumLen = normalizeText(summary).length;
231
+
232
+ // Check expansion
233
+ if (origLen > 0 && sumLen / origLen > maxExpansionRatio) {
234
+ reasons.push(`Summary is ${((sumLen / origLen) * 100).toFixed(0)}% of original (expanded)`);
235
+ }
236
+
237
+ // Check retention via similarity
238
+ const similarity = structuralSimilarity(original, summary);
239
+ if (similarity < minRetention && sumLen > 50) {
240
+ reasons.push(`Low similarity (${(similarity * 100).toFixed(0)}%) — summary may not represent original`);
241
+ }
242
+
243
+ // Check if summary is just a truncation of original
244
+ const origNorm = normalizeText(original);
245
+ const sumNorm = normalizeText(summary);
246
+ if (origNorm.startsWith(sumNorm) && sumLen < origLen * 0.9) {
247
+ reasons.push('Summary appears to be a simple truncation');
248
+ }
249
+
250
+ return {
251
+ isBad: reasons.length > 0,
252
+ reasons,
253
+ similarity,
254
+ expansionRatio: origLen > 0 ? sumLen / origLen : 0,
255
+ };
256
+ }
257
+
258
+ // ── Repetition Detection ────────────────────────────────────────────
259
+
260
+ /**
261
+ * Detect repetitive blocks in a sequence of text outputs.
262
+ * Groups consecutive similar blocks and replaces them with a count.
263
+ *
264
+ * @param {string[]} blocks - Array of text blocks (e.g., tool results)
265
+ * @param {Object} options
266
+ * @param {number} options.similarityThreshold - Threshold for "same" (default 0.8)
267
+ * @returns {Object} { compressed: string[], stats: { totalBlocks, uniqueBlocks, duplicatesRemoved } }
268
+ */
269
+ function deduplicateBlocks(blocks, options = {}) {
270
+ const threshold = options.similarityThreshold ?? 0.8;
271
+
272
+ if (!blocks || blocks.length <= 1) {
273
+ return {
274
+ compressed: blocks || [],
275
+ stats: { totalBlocks: blocks?.length || 0, uniqueBlocks: blocks?.length || 0, duplicatesRemoved: 0 },
276
+ };
277
+ }
278
+
279
+ const compressed = [];
280
+ let runStart = 0;
281
+ let runCount = 1;
282
+
283
+ for (let i = 1; i < blocks.length; i++) {
284
+ const sim = structuralSimilarity(blocks[runStart], blocks[i]);
285
+
286
+ if (sim >= threshold) {
287
+ runCount++;
288
+ } else {
289
+ // Flush the current run
290
+ compressed.push(blocks[runStart]);
291
+ if (runCount > 1) {
292
+ compressed.push(`[...repeated ${runCount - 1} more time${runCount - 1 > 1 ? 's' : ''} with minor variations]`);
293
+ }
294
+ runStart = i;
295
+ runCount = 1;
296
+ }
297
+ }
298
+
299
+ // Flush last run
300
+ compressed.push(blocks[runStart]);
301
+ if (runCount > 1) {
302
+ compressed.push(`[...repeated ${runCount - 1} more time${runCount - 1 > 1 ? 's' : ''} with minor variations]`);
303
+ }
304
+
305
+ const duplicatesRemoved = blocks.length - compressed.length;
306
+
307
+ return {
308
+ compressed,
309
+ stats: {
310
+ totalBlocks: blocks.length,
311
+ uniqueBlocks: compressed.filter(b => !b.startsWith('[...repeated')).length,
312
+ duplicatesRemoved: Math.max(0, duplicatesRemoved),
313
+ },
314
+ };
315
+ }
316
+
317
+ // ── Smart Tool Result Compression ───────────────────────────────────
318
+
319
+ /**
320
+ * Intelligently compress a tool result using Distill algorithms.
321
+ * Applies in order:
322
+ * 1. Text normalization (ANSI strip, whitespace cleanup)
323
+ * 2. Delta rendering against previous result (if available)
324
+ * 3. Structural dedup of repetitive sections within the result
325
+ *
326
+ * @param {string} text - Tool result text
327
+ * @param {Object} options
328
+ * @param {string} options.previousResult - Previous tool result for delta rendering
329
+ * @param {number} options.maxLength - Max output length (default 1000)
330
+ * @returns {Object} { text, method, stats }
331
+ */
332
+ function compressToolResult(text, options = {}) {
333
+ if (!text) return { text: '', method: 'empty', stats: {} };
334
+
335
+ const maxLength = options.maxLength ?? 1000;
336
+ const originalLength = text.length;
337
+
338
+ // Step 1: Normalize
339
+ let result = normalizeText(text);
340
+
341
+ // Step 2: Delta rendering against previous result
342
+ if (options.previousResult) {
343
+ const delta = deltaRender(options.previousResult, result);
344
+ if (delta.isDelta && delta.similarity > 0.5) {
345
+ result = delta.result;
346
+ logger.debug({
347
+ similarity: delta.similarity.toFixed(2),
348
+ addedLines: delta.addedCount,
349
+ removedLines: delta.removedCount,
350
+ }, '[Distill] Delta rendering applied');
351
+
352
+ if (result.length <= maxLength) {
353
+ return {
354
+ text: result,
355
+ method: 'delta',
356
+ stats: {
357
+ originalLength,
358
+ compressedLength: result.length,
359
+ similarity: delta.similarity,
360
+ savings: ((1 - result.length / originalLength) * 100).toFixed(1) + '%',
361
+ },
362
+ };
363
+ }
364
+ }
365
+ }
366
+
367
+ // Step 3: Internal dedup — split into logical sections and dedup
368
+ const sections = result.split(/\n{2,}/);
369
+ if (sections.length > 3) {
370
+ const { compressed, stats } = deduplicateBlocks(sections);
371
+ if (stats.duplicatesRemoved > 0) {
372
+ result = compressed.join('\n\n');
373
+ logger.debug({
374
+ sectionsOriginal: stats.totalBlocks,
375
+ duplicatesRemoved: stats.duplicatesRemoved,
376
+ }, '[Distill] Section dedup applied');
377
+ }
378
+ }
379
+
380
+ // Step 4: Truncate if still over limit
381
+ if (result.length > maxLength) {
382
+ const keepStart = Math.floor(maxLength * 0.4);
383
+ const keepEnd = Math.floor(maxLength * 0.4);
384
+ const start = result.substring(0, keepStart);
385
+ const end = result.substring(result.length - keepEnd);
386
+ result = `${start}\n...[${result.length - maxLength} chars compressed]...\n${end}`;
387
+ }
388
+
389
+ return {
390
+ text: result,
391
+ method: result.length < originalLength ? 'distill' : 'passthrough',
392
+ stats: {
393
+ originalLength,
394
+ compressedLength: result.length,
395
+ savings: ((1 - result.length / originalLength) * 100).toFixed(1) + '%',
396
+ },
397
+ };
398
+ }
399
+
400
+ // ── History Dedup ───────────────────────────────────────────────────
401
+
402
+ /**
403
+ * Deduplicate repetitive tool results across conversation history.
404
+ * Scans tool_result blocks, finds structurally similar ones,
405
+ * and replaces duplicates with references.
406
+ *
407
+ * @param {Array} messages - Conversation messages
408
+ * @param {Object} options
409
+ * @param {number} options.similarityThreshold - Threshold (default 0.8)
410
+ * @returns {Object} { messages, stats }
411
+ */
412
+ function deduplicateHistory(messages, options = {}) {
413
+ if (!messages || messages.length === 0) {
414
+ return { messages: messages || [], stats: { checked: 0, deduplicated: 0 } };
415
+ }
416
+
417
+ const threshold = options.similarityThreshold ?? 0.8;
418
+ const seenResults = []; // { text, signature, index }
419
+ let deduplicated = 0;
420
+ let checked = 0;
421
+
422
+ const processed = messages.map((msg, msgIdx) => {
423
+ if (!Array.isArray(msg.content)) return msg;
424
+
425
+ const newContent = msg.content.map(block => {
426
+ if (block.type !== 'tool_result') return block;
427
+
428
+ const text = typeof block.content === 'string'
429
+ ? block.content
430
+ : Array.isArray(block.content)
431
+ ? block.content.map(c => (typeof c === 'string' ? c : c.text || '')).join('\n')
432
+ : '';
433
+
434
+ if (!text || text.length < 100) return block; // Skip short results
435
+
436
+ checked++;
437
+ const signature = extractSignature(text);
438
+
439
+ // Check against seen results
440
+ for (const seen of seenResults) {
441
+ const sim = jaccardSimilarity(signature, seen.signature);
442
+ if (sim >= threshold) {
443
+ deduplicated++;
444
+ return {
445
+ ...block,
446
+ content: `[Similar to earlier tool result — ${(sim * 100).toFixed(0)}% match, ${text.length} chars compressed]`,
447
+ };
448
+ }
449
+ }
450
+
451
+ // Register this result
452
+ seenResults.push({ text, signature, index: msgIdx });
453
+ return block;
454
+ });
455
+
456
+ return { ...msg, content: newContent };
457
+ });
458
+
459
+ return {
460
+ messages: processed,
461
+ stats: { checked, deduplicated },
462
+ };
463
+ }
464
+
465
+ module.exports = {
466
+ // Text normalization
467
+ stripAnsi,
468
+ normalizeText,
469
+ extractSignature,
470
+
471
+ // Structural similarity
472
+ jaccardSimilarity,
473
+ structuralSimilarity,
474
+
475
+ // Delta rendering
476
+ deltaRender,
477
+
478
+ // Burst detection
479
+ detectBursts,
480
+
481
+ // Bad distillation detection
482
+ detectBadDistillation,
483
+
484
+ // Repetition detection
485
+ deduplicateBlocks,
486
+
487
+ // Smart compression
488
+ compressToolResult,
489
+
490
+ // History dedup
491
+ deduplicateHistory,
492
+ };
@@ -55,6 +55,8 @@ function getDestinationUrl(providerType) {
55
55
  return config.vertex?.endpoint ?? 'unknown';
56
56
  case 'moonshot':
57
57
  return config.moonshot?.endpoint ?? 'unknown';
58
+ case 'codex':
59
+ return 'codex://app-server (local process)';
58
60
  default:
59
61
  return 'unknown';
60
62
  }
@@ -1085,7 +1087,10 @@ function toAnthropicResponse(openai, requestedModel, wantsThinking) {
1085
1087
  }
1086
1088
 
1087
1089
  function sanitizePayload(payload) {
1088
- const clean = JSON.parse(JSON.stringify(payload ?? {}));
1090
+ const { clonePayloadSmart } = require("../utils/payload");
1091
+ const providerType = config.modelProvider?.type ?? "databricks";
1092
+ const willFlatten = providerType !== "azure-anthropic";
1093
+ const clean = clonePayloadSmart(payload ?? {}, { willFlatten });
1089
1094
  const requestedModel =
1090
1095
  (typeof payload?.model === "string" && payload.model.trim().length > 0
1091
1096
  ? payload.model.trim()
@@ -1093,11 +1098,10 @@ function sanitizePayload(payload) {
1093
1098
  config.modelProvider?.defaultModel ??
1094
1099
  "databricks-claude-sonnet-4-5";
1095
1100
  clean.model = requestedModel;
1096
- if (!clean.max_tokens) {
1097
- clean.max_tokens = 16384;
1098
- }
1099
- const providerType = config.modelProvider?.type ?? "databricks";
1100
- const flattenContent = providerType !== "azure-anthropic";
1101
+ if (!clean.max_tokens) {
1102
+ clean.max_tokens = 16384;
1103
+ }
1104
+ const flattenContent = willFlatten;
1101
1105
  clean.messages = normaliseMessages(clean, { flattenContent }).filter((msg) => {
1102
1106
  const hasToolCalls =
1103
1107
  Array.isArray(msg?.tool_calls) && msg.tool_calls.length > 0;
@@ -1552,6 +1556,8 @@ async function runAgentLoop({
1552
1556
  headers,
1553
1557
  }) {
1554
1558
  logger.debug({ providerType, messageCount: cleanPayload.messages?.length }, 'runAgentLoop entered');
1559
+ const { createTimer } = require("../utils/perf-timer");
1560
+ const agentTimer = createTimer("agentLoop");
1555
1561
  const settings = resolveLoopOptions(options);
1556
1562
  // Initialize audit logger (no-op if disabled)
1557
1563
  const auditLogger = createAuditLogger(config.audit);
@@ -1634,6 +1640,7 @@ async function runAgentLoop({
1634
1640
  }
1635
1641
 
1636
1642
 
1643
+ if (steps === 1 && agentTimer) agentTimer.mark("preCompression");
1637
1644
  if (steps === 1 && config.historyCompression?.enabled !== false) {
1638
1645
  try {
1639
1646
  if (historyCompression.needsCompression(cleanPayload.messages)) {
@@ -1921,9 +1928,16 @@ IMPORTANT TOOL USAGE RULES:
1921
1928
  });
1922
1929
  }
1923
1930
 
1931
+ // Thread workspace for code-graph integration (auto-detected or from header)
1932
+ if (headers?.["x-lynkr-workspace"]) {
1933
+ cleanPayload._workspace = headers["x-lynkr-workspace"];
1934
+ }
1935
+
1936
+ if (agentTimer) agentTimer.mark("preInvokeModel");
1924
1937
  let databricksResponse;
1925
1938
  try {
1926
1939
  databricksResponse = await invokeModel(cleanPayload);
1940
+ if (agentTimer) agentTimer.mark("invokeModel");
1927
1941
  } catch (modelError) {
1928
1942
  const isConnectionError = modelError.cause?.code === 'ECONNREFUSED'
1929
1943
  || modelError.message?.includes('fetch failed')
@@ -3150,6 +3164,12 @@ IMPORTANT TOOL USAGE RULES:
3150
3164
  if (Array.isArray(anthropicPayload?.content)) {
3151
3165
  anthropicPayload.content = policy.sanitiseContent(anthropicPayload.content);
3152
3166
  }
3167
+ } else if (actualProvider === "codex") {
3168
+ // Codex responses are already in Anthropic format from invokeCodex
3169
+ anthropicPayload = databricksResponse.json;
3170
+ if (Array.isArray(anthropicPayload?.content)) {
3171
+ anthropicPayload.content = policy.sanitiseContent(anthropicPayload.content);
3172
+ }
3153
3173
  } else {
3154
3174
  anthropicPayload = toAnthropicResponse(
3155
3175
  databricksResponse.json,
@@ -3434,6 +3454,15 @@ IMPORTANT TOOL USAGE RULES:
3434
3454
  }
3435
3455
  }
3436
3456
 
3457
+ // Attach routing metadata for OpenClaw model name rewriting
3458
+ if (databricksResponse.routingDecision) {
3459
+ anthropicPayload._routingMeta = {
3460
+ provider: databricksResponse.routingDecision.provider,
3461
+ model: databricksResponse.routingDecision.model,
3462
+ tier: databricksResponse.routingDecision.tier,
3463
+ };
3464
+ }
3465
+
3437
3466
  appendTurnToSession(session, {
3438
3467
  role: "assistant",
3439
3468
  type: "message",
@@ -3487,6 +3516,7 @@ IMPORTANT TOOL USAGE RULES:
3487
3516
  },
3488
3517
  "Agent loop completed successfully",
3489
3518
  );
3519
+ if (agentTimer) { agentTimer.mark("responseReady"); agentTimer.done(); }
3490
3520
  return {
3491
3521
  response: {
3492
3522
  status: 200,
@@ -3903,7 +3933,11 @@ async function processMessage({ payload, headers, session, cwd, options = {} })
3903
3933
  }
3904
3934
  }
3905
3935
 
3936
+ const { createTimer } = require("../utils/perf-timer");
3937
+ const pTimer = createTimer("processMessage");
3938
+
3906
3939
  const cleanPayload = sanitizePayload(payload);
3940
+ pTimer.mark("sanitizePayload");
3907
3941
 
3908
3942
  // Proactively load tools based on prompt content (lazy loading)
3909
3943
  try {
@@ -3914,6 +3948,7 @@ async function processMessage({ payload, headers, session, cwd, options = {} })
3914
3948
  } catch (err) {
3915
3949
  logger.debug({ error: err.message }, "Lazy tool loading check failed");
3916
3950
  }
3951
+ pTimer.mark("lazyToolLoad");
3917
3952
 
3918
3953
  appendTurnToSession(session, {
3919
3954
  role: "user",
@@ -3923,12 +3958,14 @@ async function processMessage({ payload, headers, session, cwd, options = {} })
3923
3958
  },
3924
3959
  type: "message",
3925
3960
  });
3961
+ pTimer.mark("sessionAppend");
3926
3962
 
3927
3963
  let cacheKey = null;
3928
3964
  let cachedResponse = null;
3929
3965
  if (promptCache.isEnabled()) {
3930
3966
  // cleanPayload is already a deep clone from sanitizePayload, no need to clone again
3931
3967
  const { key, entry } = promptCache.lookup(cleanPayload);
3968
+ pTimer.mark("cacheCheck");
3932
3969
  cacheKey = key;
3933
3970
  if (entry?.value) {
3934
3971
  try {
@@ -4018,6 +4055,7 @@ async function processMessage({ payload, headers, session, cwd, options = {} })
4018
4055
  // NOTE: Tool loop guard moved to BEFORE sanitizePayload() since sanitization
4019
4056
  // removes conversation history (consecutive same-role messages)
4020
4057
 
4058
+ pTimer.mark("preAgentLoop");
4021
4059
  const loopResult = await runAgentLoop({
4022
4060
  cleanPayload,
4023
4061
  requestedModel,
@@ -4029,6 +4067,8 @@ async function processMessage({ payload, headers, session, cwd, options = {} })
4029
4067
  providerType: config.modelProvider?.type ?? "databricks",
4030
4068
  headers,
4031
4069
  });
4070
+ pTimer.mark("agentLoopDone");
4071
+ pTimer.done();
4032
4072
 
4033
4073
  // Store successful responses in semantic cache for future fuzzy matching
4034
4074
  if (semanticCache.isEnabled() && semanticLookupResult && !semanticLookupResult.hit) {