chainlesschain 0.45.12 → 0.45.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/package.json +1 -1
  2. package/src/assets/web-panel/assets/{AppLayout-BfLjLMsm.js → AppLayout-B00RARl2.js} +1 -1
  3. package/src/assets/web-panel/assets/{Chat-DP7PO9Li.js → Chat-DXtvKoM0.js} +1 -1
  4. package/src/assets/web-panel/assets/{Cron-DyQF-7R1.js → Cron-BJ4ODHOy.js} +1 -1
  5. package/src/assets/web-panel/assets/Dashboard-3iIpp3zd.js +3 -0
  6. package/src/assets/web-panel/assets/Dashboard-BS-tzGNj.css +1 -0
  7. package/src/assets/web-panel/assets/{Logs-BOii-AoO.js → Logs-CSeKZEG_.js} +1 -1
  8. package/src/assets/web-panel/assets/{McpTools-DmiJtJYr.js → McpTools-BYQAK11r.js} +1 -1
  9. package/src/assets/web-panel/assets/{Memory-CDRMMobU.js → Memory-gkUAPyuZ.js} +1 -1
  10. package/src/assets/web-panel/assets/{Notes-CVhqqoS1.js → Notes-bjNrQgAo.js} +1 -1
  11. package/src/assets/web-panel/assets/{Providers-Dkt7021l.js → Providers-Dbf57Tbv.js} +1 -1
  12. package/src/assets/web-panel/assets/{Services-DUDL_UGb.js → Services-CS0oMdxh.js} +1 -1
  13. package/src/assets/web-panel/assets/{Skills-DXXELJc3.js → Skills-B2fgruv8.js} +1 -1
  14. package/src/assets/web-panel/assets/Tasks-BJjN_YEm.css +1 -0
  15. package/src/assets/web-panel/assets/Tasks-qULws8pc.js +1 -0
  16. package/src/assets/web-panel/assets/chat-DnH09sSR.js +1 -0
  17. package/src/assets/web-panel/assets/{index-vW799KpE.js → index-CF2CqPYX.js} +2 -2
  18. package/src/assets/web-panel/assets/ws-DjelKkD6.js +1 -0
  19. package/src/assets/web-panel/index.html +1 -1
  20. package/src/commands/agent.js +7 -8
  21. package/src/commands/chat.js +9 -11
  22. package/src/commands/serve.js +11 -106
  23. package/src/commands/session.js +101 -0
  24. package/src/commands/ui.js +10 -151
  25. package/src/gateways/repl/agent-repl.js +1 -0
  26. package/src/gateways/repl/chat-repl.js +1 -0
  27. package/src/gateways/ui/web-ui-server.js +1 -0
  28. package/src/gateways/ws/action-protocol.js +83 -0
  29. package/src/gateways/ws/message-dispatcher.js +73 -0
  30. package/src/gateways/ws/session-protocol.js +396 -0
  31. package/src/gateways/ws/task-protocol.js +55 -0
  32. package/src/gateways/ws/worktree-protocol.js +315 -0
  33. package/src/gateways/ws/ws-server.js +4 -0
  34. package/src/gateways/ws/ws-session-gateway.js +1 -0
  35. package/src/harness/background-task-manager.js +506 -0
  36. package/src/harness/background-task-worker.js +48 -0
  37. package/src/harness/compression-telemetry.js +214 -0
  38. package/src/harness/feature-flags.js +157 -0
  39. package/src/harness/jsonl-session-store.js +452 -0
  40. package/src/harness/prompt-compressor.js +416 -0
  41. package/src/harness/worktree-isolator.js +845 -0
  42. package/src/lib/agent-core.js +246 -45
  43. package/src/lib/background-task-manager.js +1 -305
  44. package/src/lib/background-task-worker.js +1 -50
  45. package/src/lib/compression-telemetry.js +5 -0
  46. package/src/lib/feature-flags.js +7 -182
  47. package/src/lib/interaction-adapter.js +32 -6
  48. package/src/lib/jsonl-session-store.js +21 -237
  49. package/src/lib/prompt-compressor.js +10 -481
  50. package/src/lib/sub-agent-context.js +21 -1
  51. package/src/lib/worktree-isolator.js +13 -231
  52. package/src/lib/ws-agent-handler.js +1 -0
  53. package/src/lib/ws-server.js +138 -387
  54. package/src/lib/ws-session-manager.js +82 -1
  55. package/src/repl/agent-repl.js +11 -0
  56. package/src/runtime/agent-runtime.js +417 -0
  57. package/src/runtime/contracts/agent-turn.js +11 -0
  58. package/src/runtime/contracts/session-record.js +31 -0
  59. package/src/runtime/contracts/task-record.js +18 -0
  60. package/src/runtime/contracts/telemetry-record.js +23 -0
  61. package/src/runtime/contracts/worktree-record.js +14 -0
  62. package/src/runtime/index.js +13 -0
  63. package/src/runtime/policies/agent-policy.js +45 -0
  64. package/src/runtime/runtime-context.js +14 -0
  65. package/src/runtime/runtime-events.js +37 -0
  66. package/src/runtime/runtime-factory.js +50 -0
  67. package/src/tools/index.js +22 -0
  68. package/src/tools/legacy-agent-tools.js +171 -0
  69. package/src/tools/registry.js +141 -0
  70. package/src/tools/tool-context.js +28 -0
  71. package/src/tools/tool-permissions.js +28 -0
  72. package/src/tools/tool-telemetry.js +39 -0
  73. package/src/assets/web-panel/assets/Dashboard-BGGdnr6t.js +0 -3
  74. package/src/assets/web-panel/assets/Dashboard-CRFnDUFh.css +0 -1
  75. package/src/assets/web-panel/assets/Tasks-BwZ63-mq.js +0 -1
  76. package/src/assets/web-panel/assets/Tasks-Cr_XXNyQ.css +0 -1
  77. package/src/assets/web-panel/assets/chat-C_hu-qNs.js +0 -1
  78. package/src/assets/web-panel/assets/ws-DwluTqT5.js +0 -1
@@ -1,481 +1,10 @@
1
- /**
2
- * CLI Prompt Compressor — 5 strategies for context window management.
3
- *
4
- * Strategies:
5
- * 1. deduplication — Remove duplicate/similar messages (Jaccard similarity)
6
- * 2. truncation — Keep most recent N messages
7
- * 3. summarization — LLM-generated summary of old history
8
- * 4. snipCompact — Remove stale tool results and processed markers
9
- * 5. contextCollapse — Fold consecutive same-type messages into summaries
10
- *
11
- * Feature-flag gated: PROMPT_COMPRESSOR, CONTEXT_SNIP, CONTEXT_COLLAPSE
12
- */
13
-
14
- import { createHash } from "node:crypto";
15
- import { feature } from "./feature-flags.js";
16
-
17
- // ── Token estimation ────────────────────────────────────────────────────
18
-
19
- /**
20
- * Estimate token count for a string.
21
- * Chinese: ~1.5 chars/token, English: ~4 chars/token.
22
- */
23
- export function estimateTokens(text) {
24
- if (!text) return 0;
25
- const chineseChars = (text.match(/[\u4e00-\u9fa5]/g) || []).length;
26
- const otherChars = text.length - chineseChars;
27
- return Math.ceil(chineseChars / 1.5 + otherChars / 4);
28
- }
29
-
30
- /**
31
- * Estimate total tokens for a messages array.
32
- */
33
- export function estimateMessagesTokens(messages) {
34
- return messages.reduce((sum, msg) => {
35
- const content =
36
- typeof msg.content === "string"
37
- ? msg.content
38
- : JSON.stringify(msg.content || "");
39
- return sum + estimateTokens(content);
40
- }, 0);
41
- }
42
-
43
- // ── Similarity ──────────────────────────────────────────────────────────
44
-
45
- function jaccardSimilarity(str1, str2) {
46
- if (!str1 || !str2) return 0;
47
- if (str1 === str2) return 1;
48
- const tokens1 = new Set(str1.split(""));
49
- const tokens2 = new Set(str2.split(""));
50
- let intersection = 0;
51
- for (const t of tokens1) {
52
- if (tokens2.has(t)) intersection++;
53
- }
54
- return intersection / (tokens1.size + tokens2.size - intersection);
55
- }
56
-
57
- function getContent(msg) {
58
- return typeof msg.content === "string"
59
- ? msg.content
60
- : JSON.stringify(msg.content || "");
61
- }
62
-
63
- // ── Provider context window registry ───────────────────────────────────
64
-
65
- /**
66
- * Known context window sizes (in tokens) per model/provider.
67
- * Used by adaptive compression to auto-tune maxTokens.
68
- */
69
- export const CONTEXT_WINDOWS = {
70
- // Ollama local models
71
- "qwen2.5:7b": 32768,
72
- "qwen2.5:14b": 32768,
73
- "qwen2.5-coder:14b": 32768,
74
- "qwen2:7b": 32768,
75
- "llama3:8b": 8192,
76
- "mistral:7b": 32768,
77
- "codellama:7b": 16384,
78
- // OpenAI
79
- "gpt-4o": 128000,
80
- "gpt-4o-mini": 128000,
81
- "gpt-4-turbo": 128000,
82
- "gpt-3.5-turbo": 16385,
83
- o1: 200000,
84
- // Anthropic
85
- "claude-opus-4-6": 200000,
86
- "claude-sonnet-4-6": 200000,
87
- "claude-haiku-4-5-20251001": 200000,
88
- // DeepSeek
89
- "deepseek-chat": 64000,
90
- "deepseek-coder": 64000,
91
- "deepseek-reasoner": 64000,
92
- // DashScope
93
- "qwen-turbo": 131072,
94
- "qwen-plus": 131072,
95
- "qwen-max": 32768,
96
- // Gemini
97
- "gemini-2.0-flash": 1048576,
98
- "gemini-2.0-pro": 1048576,
99
- "gemini-1.5-flash": 1048576,
100
- // Kimi
101
- "moonshot-v1-auto": 131072,
102
- "moonshot-v1-8k": 8192,
103
- "moonshot-v1-32k": 32768,
104
- "moonshot-v1-128k": 131072,
105
- // Volcengine
106
- "doubao-seed-1-6-251015": 32768,
107
- // Provider-level defaults (fallback when model not listed)
108
- _provider_defaults: {
109
- ollama: 32768,
110
- openai: 128000,
111
- anthropic: 200000,
112
- deepseek: 64000,
113
- dashscope: 131072,
114
- gemini: 1048576,
115
- kimi: 131072,
116
- volcengine: 32768,
117
- minimax: 32768,
118
- mistral: 32768,
119
- },
120
- };
121
-
122
- /**
123
- * Get context window size for a model/provider combination.
124
- * @param {string} [model] — Model name
125
- * @param {string} [provider] — Provider name
126
- * @returns {number} Context window in tokens
127
- */
128
- export function getContextWindow(model, provider) {
129
- if (model && CONTEXT_WINDOWS[model]) {
130
- return CONTEXT_WINDOWS[model];
131
- }
132
- if (provider && CONTEXT_WINDOWS._provider_defaults[provider]) {
133
- return CONTEXT_WINDOWS._provider_defaults[provider];
134
- }
135
- return 32768; // Conservative default
136
- }
137
-
138
- /**
139
- * Calculate adaptive compression thresholds based on context window.
140
- *
141
- * Strategy:
142
- * - maxTokens = 60% of context window (reserve 40% for system prompt + response)
143
- * - maxMessages scales with context: small ctx → 15, large ctx → 50
144
- * - For very large contexts (>128k), enable less aggressive compression
145
- *
146
- * @param {number} contextWindow — Context window in tokens
147
- * @returns {{ maxMessages: number, maxTokens: number, aggressive: boolean }}
148
- */
149
- export function adaptiveThresholds(contextWindow) {
150
- const maxTokens = Math.floor(contextWindow * 0.6);
151
- // Scale messages: 15 for 8k, 20 for 32k, 30 for 128k, 50 for 200k+
152
- const maxMessages = Math.min(
153
- 50,
154
- Math.max(15, Math.floor(10 + Math.log2(contextWindow / 1024) * 5)),
155
- );
156
- // Aggressive compression only for small context windows
157
- const aggressive = contextWindow < 32768;
158
-
159
- return { maxMessages, maxTokens, aggressive };
160
- }
161
-
162
- // ── PromptCompressor class ──────────────────────────────────────────────
163
-
164
- export class PromptCompressor {
165
- /**
166
- * @param {object} options
167
- * @param {number} [options.maxMessages=20] — Max messages before truncation
168
- * @param {number} [options.maxTokens=8000] — Token threshold for auto-compact
169
- * @param {number} [options.similarityThreshold=0.9] — Dedup similarity threshold
170
- * @param {Function} [options.llmQuery] — async (prompt) => string, for summarization
171
- * @param {string} [options.model] — Model name (for adaptive thresholds)
172
- * @param {string} [options.provider] — Provider name (for adaptive thresholds)
173
- */
174
- constructor(options = {}) {
175
- // If model/provider supplied and no explicit maxMessages/maxTokens, auto-adapt
176
- if (
177
- (options.model || options.provider) &&
178
- !options.maxMessages &&
179
- !options.maxTokens
180
- ) {
181
- const ctxWindow = getContextWindow(options.model, options.provider);
182
- const adaptive = adaptiveThresholds(ctxWindow);
183
- this.maxMessages = adaptive.maxMessages;
184
- this.maxTokens = adaptive.maxTokens;
185
- this._adaptive = true;
186
- this._contextWindow = ctxWindow;
187
- } else {
188
- this.maxMessages = options.maxMessages || 20;
189
- this.maxTokens = options.maxTokens || 8000;
190
- this._adaptive = false;
191
- this._contextWindow = null;
192
- }
193
- this.similarityThreshold = options.similarityThreshold || 0.9;
194
- this.llmQuery = options.llmQuery || null;
195
- }
196
-
197
- /**
198
- * Reconfigure thresholds for a new model/provider (e.g. after model switch).
199
- * Only updates if no explicit overrides were set in constructor.
200
- */
201
- adaptToModel(model, provider) {
202
- const ctxWindow = getContextWindow(model, provider);
203
- const adaptive = adaptiveThresholds(ctxWindow);
204
- this.maxMessages = adaptive.maxMessages;
205
- this.maxTokens = adaptive.maxTokens;
206
- this._adaptive = true;
207
- this._contextWindow = ctxWindow;
208
- }
209
-
210
- /**
211
- * Run all enabled compression strategies on messages.
212
- * Returns { messages, stats }.
213
- */
214
- async compress(messages, options = {}) {
215
- if (!Array.isArray(messages) || messages.length <= 2) {
216
- return {
217
- messages: Array.isArray(messages) ? [...messages] : [],
218
- stats: { strategy: "none", saved: 0 },
219
- };
220
- }
221
-
222
- const originalTokens = estimateMessagesTokens(messages);
223
- let result = [...messages];
224
- const applied = [];
225
-
226
- // Strategy 4: snipCompact (remove stale tool results)
227
- if (feature("CONTEXT_SNIP")) {
228
- const before = result.length;
229
- result = this._snipCompact(result);
230
- if (result.length < before) applied.push("snip");
231
- }
232
-
233
- // Strategy 1: deduplication
234
- if (result.length > 3) {
235
- const before = result.length;
236
- result = this._deduplicate(result);
237
- if (result.length < before) applied.push("dedup");
238
- }
239
-
240
- // Strategy 5: contextCollapse (fold consecutive tool results)
241
- if (feature("CONTEXT_COLLAPSE") && result.length > 6) {
242
- const before = result.length;
243
- result = this._contextCollapse(result);
244
- if (result.length < before) applied.push("collapse");
245
- }
246
-
247
- // Strategy 2: truncation
248
- if (result.length > this.maxMessages) {
249
- result = this._truncate(result);
250
- applied.push("truncate");
251
- }
252
-
253
- // Strategy 3: summarization (only if still over token limit)
254
- const currentTokens = estimateMessagesTokens(result);
255
- if (this.llmQuery && currentTokens > this.maxTokens && result.length > 4) {
256
- try {
257
- result = await this._summarize(result);
258
- applied.push("summarize");
259
- } catch (_err) {
260
- // Summarization failed — continue with what we have
261
- }
262
- }
263
-
264
- const compressedTokens = estimateMessagesTokens(result);
265
- return {
266
- messages: result,
267
- stats: {
268
- strategy: applied.join("+") || "none",
269
- originalMessages: messages.length,
270
- compressedMessages: result.length,
271
- originalTokens,
272
- compressedTokens,
273
- saved: originalTokens - compressedTokens,
274
- ratio: originalTokens > 0 ? compressedTokens / originalTokens : 1,
275
- },
276
- };
277
- }
278
-
279
- /**
280
- * Check if auto-compact should trigger.
281
- */
282
- shouldAutoCompact(messages) {
283
- return (
284
- messages.length > this.maxMessages ||
285
- estimateMessagesTokens(messages) > this.maxTokens
286
- );
287
- }
288
-
289
- // ── Strategy 1: Deduplication ───────────────────────────────────────
290
-
291
- _deduplicate(messages) {
292
- const system = messages.filter((m) => m.role === "system");
293
- const last = [...messages].reverse().find((m) => m.role === "user");
294
- const rest = messages.filter((m) => m.role !== "system" && m !== last);
295
-
296
- const seen = new Map();
297
- const deduped = [];
298
-
299
- for (const msg of rest) {
300
- const content = getContent(msg);
301
- const hash = createHash("md5").update(content).digest("hex");
302
-
303
- if (seen.has(hash)) continue;
304
-
305
- let isDup = false;
306
- for (const [, existing] of seen) {
307
- if (
308
- jaccardSimilarity(content, getContent(existing)) >=
309
- this.similarityThreshold
310
- ) {
311
- isDup = true;
312
- break;
313
- }
314
- }
315
-
316
- if (!isDup) {
317
- seen.set(hash, msg);
318
- deduped.push(msg);
319
- }
320
- }
321
-
322
- const result = [...system, ...deduped];
323
- if (last && !result.includes(last)) result.push(last);
324
- return result;
325
- }
326
-
327
- // ── Strategy 2: Truncation ──────────────────────────────────────────
328
-
329
- _truncate(messages) {
330
- const system = messages.filter((m) => m.role === "system");
331
- const last = [...messages].reverse().find((m) => m.role === "user");
332
- const rest = messages.filter((m) => m.role !== "system" && m !== last);
333
-
334
- let slots = this.maxMessages - system.length;
335
- if (last) slots -= 1;
336
-
337
- const recent = rest.slice(-Math.max(slots, 1));
338
- const result = [...system, ...recent];
339
- if (last && !result.includes(last)) result.push(last);
340
- return result;
341
- }
342
-
343
- // ── Strategy 3: Summarization ───────────────────────────────────────
344
-
345
- async _summarize(messages) {
346
- const system = messages.filter((m) => m.role === "system");
347
- const last = [...messages].reverse().find((m) => m.role === "user");
348
- const toSummarize = messages.filter(
349
- (m) => m.role !== "system" && m !== last,
350
- );
351
-
352
- if (toSummarize.length < 3) return messages;
353
-
354
- const historyText = toSummarize
355
- .map((m) => `${m.role}: ${getContent(m).slice(0, 500)}`)
356
- .join("\n");
357
-
358
- const summary = await this.llmQuery(
359
- `Summarize this conversation history concisely, preserving key facts and decisions:\n\n${historyText}\n\nSummary:`,
360
- );
361
-
362
- if (!summary) return messages;
363
-
364
- const result = [
365
- ...system,
366
- { role: "system", content: `[Conversation Summary]\n${summary}` },
367
- ];
368
- if (last) result.push(last);
369
- return result;
370
- }
371
-
372
- // ── Strategy 4: Snip Compact ────────────────────────────────────────
373
- // Removes stale markers: processed tool results older than recent window,
374
- // empty assistant messages, and system messages with [PROCESSED] tags.
375
-
376
- _snipCompact(messages) {
377
- if (messages.length <= 4) return messages;
378
-
379
- // Keep system[0] + last 4 messages untouched
380
- const head = messages.slice(0, 1);
381
- const middle = messages.slice(1, -4);
382
- const tail = messages.slice(-4);
383
-
384
- const snipped = middle.filter((msg) => {
385
- const content = getContent(msg);
386
-
387
- // Remove empty messages
388
- if (!content || content.trim() === "") return false;
389
-
390
- // Remove processed markers
391
- if (content.includes("[PROCESSED]") || content.includes("[STALE]"))
392
- return false;
393
-
394
- // Remove tool_result messages that are just "ok" or empty JSON
395
- if (msg.role === "tool") {
396
- if (
397
- content === "ok" ||
398
- content === "{}" ||
399
- content === "null" ||
400
- content.length < 3
401
- )
402
- return false;
403
- }
404
-
405
- // Remove very short assistant acknowledgments in middle
406
- if (msg.role === "assistant" && content.length < 10) return false;
407
-
408
- return true;
409
- });
410
-
411
- return [...head, ...snipped, ...tail];
412
- }
413
-
414
- // ── Strategy 5: Context Collapse ────────────────────────────────────
415
- // Folds consecutive tool_call + tool_result pairs into a single summary.
416
-
417
- _contextCollapse(messages) {
418
- if (messages.length <= 6) return messages;
419
-
420
- const result = [];
421
- let i = 0;
422
-
423
- while (i < messages.length) {
424
- const msg = messages[i];
425
-
426
- // Detect consecutive tool sequences in the middle (not last 3)
427
- if (
428
- i > 0 &&
429
- i < messages.length - 3 &&
430
- msg.role === "assistant" &&
431
- msg.tool_calls &&
432
- msg.tool_calls.length > 0
433
- ) {
434
- // Collect this tool call + all following tool results
435
- const toolGroup = [msg];
436
- let j = i + 1;
437
- while (j < messages.length - 3 && messages[j].role === "tool") {
438
- toolGroup.push(messages[j]);
439
- j++;
440
- }
441
-
442
- // Also collect next assistant with tool_calls (chained calls)
443
- while (
444
- j < messages.length - 3 &&
445
- messages[j].role === "assistant" &&
446
- messages[j].tool_calls
447
- ) {
448
- toolGroup.push(messages[j]);
449
- j++;
450
- while (j < messages.length - 3 && messages[j].role === "tool") {
451
- toolGroup.push(messages[j]);
452
- j++;
453
- }
454
- }
455
-
456
- // Only collapse if we collected 3+ messages
457
- if (toolGroup.length >= 3) {
458
- const toolNames = toolGroup
459
- .filter((m) => m.tool_calls)
460
- .flatMap((m) =>
461
- m.tool_calls.map((tc) => tc.function?.name || "tool"),
462
- )
463
- .filter(Boolean);
464
- const uniqueTools = [...new Set(toolNames)];
465
-
466
- result.push({
467
- role: "system",
468
- content: `[Collapsed ${toolGroup.length} tool messages: ${uniqueTools.join(", ")}]`,
469
- });
470
- i = j;
471
- continue;
472
- }
473
- }
474
-
475
- result.push(msg);
476
- i++;
477
- }
478
-
479
- return result;
480
- }
481
- }
1
+ export {
2
+ estimateTokens,
3
+ estimateMessagesTokens,
4
+ CONTEXT_WINDOWS,
5
+ getContextWindow,
6
+ COMPRESSION_VARIANTS,
7
+ getCompressionVariant,
8
+ adaptiveThresholds,
9
+ PromptCompressor,
10
+ } from "../harness/prompt-compressor.js";
@@ -16,6 +16,9 @@ import {
16
16
  createWorktree,
17
17
  removeWorktree,
18
18
  isolateTask,
19
+ diffWorktree,
20
+ mergeWorktree,
21
+ worktreeLog,
19
22
  } from "./worktree-isolator.js";
20
23
  import { isGitRepo } from "./git-integration.js";
21
24
 
@@ -150,12 +153,29 @@ export class SubAgentContext {
150
153
  },
151
154
  );
152
155
 
153
- // Annotate result with worktree info
156
+ // Annotate result with worktree info + diff preview
154
157
  if (result) {
158
+ let diffInfo = null;
159
+ let commits = [];
160
+ if (
161
+ hasChanges ||
162
+ worktreeLog(this._repoDir, `agent/${taskId}`).length > 0
163
+ ) {
164
+ try {
165
+ diffInfo = diffWorktree(this._repoDir, `agent/${taskId}`);
166
+ commits = worktreeLog(this._repoDir, `agent/${taskId}`);
167
+ } catch (_e) {
168
+ // Non-critical — diff preview is optional
169
+ }
170
+ }
155
171
  result.worktree = {
156
172
  branch,
157
173
  path: worktreePath,
158
174
  hasChanges,
175
+ diff: diffInfo,
176
+ commits,
177
+ merge: (options = {}) =>
178
+ mergeWorktree(this._repoDir, branch, options),
159
179
  };
160
180
  }
161
181
  return result;