@botbotgo/agent-harness 0.0.272 → 0.0.274

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -704,6 +704,7 @@ Discovery rules:
704
704
  Example workspaces:
705
705
 
706
706
  - `examples/hello-skill-app/` keeps the smallest local tool + skill workspace
707
+ - `examples/local-scheduled-task-app/` runs recurring prompt-driven tasks with a local `node-llama-cpp` GGUF workspace and one local tool
707
708
  - `examples/multimodal-app/` keeps the smallest image-plus-PDF example and sends both through one `request(...)` call
708
709
  - `examples/plan-and-run-app/` keeps the smallest public-API planning example and prints both the plan and the observed execution steps
709
710
  - `examples/runtime-flow-demo/` runs one real hosted-model request and exports a Mermaid flowchart from runtime plus upstream events
@@ -743,6 +744,13 @@ Practical guidance:
743
744
  - use `backend: deepagent` for approvals, resume, multi-agent orchestration, rich memory flows, and heavier tool chains
744
745
  - keep `backend: langchain-v1` for lighter direct-response or explicitly chosen V1 agent shapes while this upstream behavior settles
745
746
 
747
+ Local GGUF note:
748
+
749
+ - `provider: node-llama-cpp` now exposes a LangChain-style tool-binding shim, so local GGUF models can enter the standard tool-calling path without an app-owned model wrapper
750
+ - `backend: langchain-v1` is the straightforward local GGUF path and is the currently verified default for `node-llama-cpp` tool use
751
+ - `backend: deepagent` can also reach the same tool-calling path, but final reliability still depends on the selected model following upstream tool schemas correctly
752
+ - `agent-harness` does not try to normalize every model-specific argument drift or malformed tool payload; once the runtime hands a call to upstream tools, schema fidelity is a model responsibility
753
+
746
754
  ### `config/runtime/workspace.yaml`
747
755
 
748
756
  Use this file for workspace-wide runtime policy.
package/README.zh.md CHANGED
@@ -667,6 +667,7 @@ await stop(runtime);
667
667
  示例工作区:
668
668
 
669
669
  - `examples/hello-skill-app/` 保留最小的本地 tool + skill 工作区
670
+ - `examples/local-scheduled-task-app/` 展示如何用本地 `node-llama-cpp` GGUF 工作区和一个本地 tool 执行周期性 prompt 任务
670
671
  - `examples/multimodal-app/` 保留最小的图片 + PDF 示例,并通过一次 `request(...)` 调用发送
671
672
  - `examples/plan-and-run-app/` 保留最小的公开 API 规划示例,并同时打印规划步骤和真实执行步骤
672
673
  - `examples/runtime-flow-demo/` 会跑一次真实 hosted model 请求,并把 runtime 与 upstream events 导出为 Mermaid flowchart
@@ -700,6 +701,13 @@ await stop(runtime);
700
701
  - approvals、resume、多 agent orchestration、复杂 memory 流、重工具链,优先使用 `backend: deepagent`
701
702
  - `backend: langchain-v1` 先保留给轻量 direct-response 场景,或明确需要 V1 agent 语义的工作区
702
703
 
704
+ 本地 GGUF 补充说明:
705
+
706
+ - `provider: node-llama-cpp` 现在带有一层 LangChain 风格的 tool-binding shim,因此本地 GGUF 模型可以进入标准 tool-calling 路径,而不需要应用自己包一层 model wrapper
707
+ - 对 `node-llama-cpp` 来说,`backend: langchain-v1` 仍然是更直接、当前已验证的本地 tool use 路径
708
+ - `backend: deepagent` 也可以走到同一条 tool-calling 路径,但最终稳定性仍取决于所选模型是否能正确遵守 upstream tool schema
709
+ - `agent-harness` 不会为每个模型的参数漂移或畸形 tool payload 做无限兼容;runtime 把调用交给 upstream tools 之后,schema fidelity 就属于模型责任
710
+
703
711
  ### `config/runtime/workspace.yaml`
704
712
 
705
713
  用于工作区范围的运行时策略。
@@ -1 +1 @@
1
- export declare const AGENT_HARNESS_VERSION = "0.0.271";
1
+ export declare const AGENT_HARNESS_VERSION = "0.0.273";
@@ -1 +1 @@
1
- export const AGENT_HARNESS_VERSION = "0.0.271";
1
+ export const AGENT_HARNESS_VERSION = "0.0.273";
@@ -2,8 +2,324 @@ import { ChatAnthropic } from "@langchain/anthropic";
2
2
  import { ChatGoogle } from "@langchain/google";
3
3
  import { ChatOllama } from "@langchain/ollama";
4
4
  import { ChatOpenAI } from "@langchain/openai";
5
+ import { AIMessage } from "langchain";
5
6
  import { initChatModel } from "langchain";
7
+ import { salvageToolArgs, tryParseJson } from "../../parsing/output-parsing.js";
8
+ import { normalizeModelFacingToolSchema } from "../tool/resolved-tool.js";
6
9
  import { normalizeOpenAICompatibleInit } from "../compat/openai-compatible.js";
10
+ const NODE_LLAMA_CPP_TOOL_CALL_INSTRUCTION = [
11
+ "Available tools are listed below.",
12
+ "If you need a tool, respond with only one JSON object.",
13
+ 'Use this exact shape: {"name":"tool_name","arguments":{"key":"value"}}',
14
+ "Do not add markdown, prose, or code fences unless the output is wrapped inside <tool_call>...</tool_call>.",
15
+ "If no tool is needed, answer normally.",
16
+ ].join("\n");
17
+ function readModelText(value) {
18
+ if (typeof value === "string") {
19
+ return value.trim();
20
+ }
21
+ if (typeof value !== "object" || value === null) {
22
+ return "";
23
+ }
24
+ const typed = value;
25
+ if (typeof typed.content === "string") {
26
+ return typed.content.trim();
27
+ }
28
+ if (Array.isArray(typed.content)) {
29
+ return typed.content
30
+ .map((item) => typeof item === "string"
31
+ ? item
32
+ : typeof item === "object" && item !== null && typeof item.text === "string"
33
+ ? item.text
34
+ : "")
35
+ .join("")
36
+ .trim();
37
+ }
38
+ return "";
39
+ }
40
+ function readPromptContent(value) {
41
+ if (typeof value === "string") {
42
+ return value.trim();
43
+ }
44
+ if (Array.isArray(value)) {
45
+ return value.map((item) => readPromptContent(item)).filter(Boolean).join("\n").trim();
46
+ }
47
+ if (typeof value !== "object" || value === null) {
48
+ return "";
49
+ }
50
+ if (typeof value.content === "string" || Array.isArray(value.content)) {
51
+ return readModelText(value);
52
+ }
53
+ if (typeof value.text === "string") {
54
+ return String(value.text).trim();
55
+ }
56
+ return "";
57
+ }
58
+ function readMessageType(value) {
59
+ if (typeof value !== "object" || value === null) {
60
+ return undefined;
61
+ }
62
+ if (typeof value._getType === "function") {
63
+ return String(value._getType() ?? "");
64
+ }
65
+ if (typeof value.getType === "function") {
66
+ return String(value.getType() ?? "");
67
+ }
68
+ const ids = Array.isArray(value.id)
69
+ ? (value.id).filter((item) => typeof item === "string")
70
+ : [];
71
+ const typeName = ids.at(-1);
72
+ if (typeName === "HumanMessage")
73
+ return "human";
74
+ if (typeName === "SystemMessage")
75
+ return "system";
76
+ if (typeName === "AIMessage")
77
+ return "ai";
78
+ if (typeName === "ToolMessage")
79
+ return "tool";
80
+ return undefined;
81
+ }
82
+ function mapMessageRole(value) {
83
+ const directRole = typeof value?.role === "string"
84
+ ? String(value.role).trim().toLowerCase()
85
+ : undefined;
86
+ if (directRole) {
87
+ if (directRole === "assistant")
88
+ return "ASSISTANT";
89
+ if (directRole === "tool")
90
+ return "TOOL";
91
+ return directRole.toUpperCase();
92
+ }
93
+ const messageType = readMessageType(value);
94
+ if (messageType === "system")
95
+ return "SYSTEM";
96
+ if (messageType === "human")
97
+ return "USER";
98
+ if (messageType === "ai")
99
+ return "ASSISTANT";
100
+ if (messageType === "tool")
101
+ return "TOOL";
102
+ return "USER";
103
+ }
104
+ function readToolCalls(value) {
105
+ if (typeof value !== "object" || value === null) {
106
+ return [];
107
+ }
108
+ if (Array.isArray(value.tool_calls)) {
109
+ return value.tool_calls;
110
+ }
111
+ if (typeof value.kwargs === "object" && value.kwargs !== null) {
112
+ const toolCalls = value.kwargs.tool_calls;
113
+ return Array.isArray(toolCalls) ? toolCalls : [];
114
+ }
115
+ return [];
116
+ }
117
+ function formatStructuredMessage(value) {
118
+ const role = mapMessageRole(value);
119
+ const content = readPromptContent(value);
120
+ if (role === "ASSISTANT") {
121
+ const toolCalls = readToolCalls(value);
122
+ if (toolCalls.length > 0) {
123
+ return [
124
+ "ASSISTANT_TOOL_CALLS:",
125
+ JSON.stringify(toolCalls),
126
+ ].join("\n");
127
+ }
128
+ }
129
+ if (role === "TOOL") {
130
+ const typed = value;
131
+ const name = typeof typed.name === "string"
132
+ ? typed.name
133
+ : typeof typed.kwargs === "object" && typed.kwargs !== null && typeof typed.kwargs.name === "string"
134
+ ? String(typed.kwargs.name)
135
+ : typeof typed.lc_kwargs === "object" && typed.lc_kwargs !== null && typeof typed.lc_kwargs.name === "string"
136
+ ? String(typed.lc_kwargs.name)
137
+ : "";
138
+ const toolCallId = typeof typed.tool_call_id === "string"
139
+ ? typed.tool_call_id
140
+ : typeof typed.kwargs === "object" && typed.kwargs !== null && typeof typed.kwargs.tool_call_id === "string"
141
+ ? String(typed.kwargs.tool_call_id)
142
+ : typeof typed.lc_kwargs === "object" && typed.lc_kwargs !== null && typeof typed.lc_kwargs.tool_call_id === "string"
143
+ ? String(typed.lc_kwargs.tool_call_id)
144
+ : "";
145
+ return [
146
+ "TOOL_RESULT:",
147
+ name ? `name=${name}` : "",
148
+ toolCallId ? `tool_call_id=${toolCallId}` : "",
149
+ content,
150
+ ].filter(Boolean).join("\n");
151
+ }
152
+ return content ? `${role}:\n${content}` : "";
153
+ }
154
+ function stringifyNodeLlamaCppInput(input) {
155
+ if (typeof input === "string") {
156
+ return input;
157
+ }
158
+ if (Array.isArray(input)) {
159
+ return input
160
+ .map((message) => formatStructuredMessage(message))
161
+ .filter(Boolean)
162
+ .join("\n\n")
163
+ .trim();
164
+ }
165
+ if (typeof input === "object" && input !== null && Array.isArray(input.messages)) {
166
+ return stringifyNodeLlamaCppInput(input.messages);
167
+ }
168
+ return readPromptContent(input);
169
+ }
170
+ function extractToolCallPayload(text) {
171
+ const trimmed = text.trim();
172
+ if (!trimmed) {
173
+ return null;
174
+ }
175
+ const direct = tryParseJson(trimmed);
176
+ if (direct) {
177
+ return direct;
178
+ }
179
+ const fenced = trimmed.match(/```(?:json)?\s*([\s\S]*?)```/i)?.[1]?.trim();
180
+ if (fenced) {
181
+ const parsed = tryParseJson(fenced);
182
+ if (parsed) {
183
+ return parsed;
184
+ }
185
+ }
186
+ const xml = trimmed.match(/<tool_call>\s*([\s\S]*?)\s*<\/tool_call>/i)?.[1]?.trim();
187
+ if (xml) {
188
+ const parsed = tryParseJson(xml);
189
+ if (parsed) {
190
+ return parsed;
191
+ }
192
+ }
193
+ return null;
194
+ }
195
+ function normalizeParsedToolCall(payload) {
196
+ if (typeof payload !== "object" || payload === null) {
197
+ return null;
198
+ }
199
+ if (Array.isArray(payload)) {
200
+ return normalizeParsedToolCall(payload[0]);
201
+ }
202
+ const typed = payload;
203
+ const functionPayload = typeof typed.function === "object" && typed.function !== null ? typed.function : undefined;
204
+ const nameCandidate = typed.name ?? typed.tool ?? functionPayload?.name;
205
+ const name = typeof nameCandidate === "string" ? nameCandidate.trim() : "";
206
+ if (!name) {
207
+ return null;
208
+ }
209
+ const argsCandidate = typed.arguments ?? typed.args ?? typed.parameters ?? typed.input ?? functionPayload?.arguments ?? {};
210
+ const args = salvageToolArgs(argsCandidate) ?? {};
211
+ return { name, args };
212
+ }
213
+ function formatBoundToolInstruction(tool) {
214
+ if (typeof tool !== "object" || tool === null) {
215
+ return null;
216
+ }
217
+ const typed = tool;
218
+ const name = typeof typed.name === "string" ? typed.name.trim() : "";
219
+ if (!name) {
220
+ return null;
221
+ }
222
+ const description = typeof typed.description === "string" ? typed.description.trim() : "";
223
+ const schema = normalizeModelFacingToolSchema(typed);
224
+ return [
225
+ `Tool: ${name}`,
226
+ description ? `Description: ${description}` : "",
227
+ `Arguments JSON schema: ${JSON.stringify(schema)}`,
228
+ ].filter(Boolean).join("\n");
229
+ }
230
+ function withNodeLlamaCppToolPrompt(input, tools) {
231
+ const toolInstructions = tools.map((tool) => formatBoundToolInstruction(tool)).filter((value) => Boolean(value));
232
+ if (toolInstructions.length === 0) {
233
+ return stringifyNodeLlamaCppInput(input);
234
+ }
235
+ const systemContent = `${NODE_LLAMA_CPP_TOOL_CALL_INSTRUCTION}\n\n${toolInstructions.join("\n\n")}`;
236
+ const prompt = stringifyNodeLlamaCppInput(input);
237
+ return [systemContent, prompt].filter(Boolean).join("\n\n");
238
+ }
239
+ function createNodeLlamaCppToolBindableModel(model, boundTools = []) {
240
+ return new Proxy(model, {
241
+ has(target, prop) {
242
+ if (prop === "bindTools" || prop === "invoke" || prop === "stream" || prop === "withConfig") {
243
+ return true;
244
+ }
245
+ return prop in target;
246
+ },
247
+ get(target, prop, receiver) {
248
+ if (prop === "bindTools") {
249
+ return (tools) => createNodeLlamaCppToolBindableModel(target, tools);
250
+ }
251
+ if (prop === "invoke") {
252
+ return async (input, config) => {
253
+ const rawResult = await target.invoke(boundTools.length > 0 ? withNodeLlamaCppToolPrompt(input, boundTools) : input, config);
254
+ if (boundTools.length === 0) {
255
+ return rawResult;
256
+ }
257
+ const text = readModelText(rawResult);
258
+ const parsedToolCall = normalizeParsedToolCall(extractToolCallPayload(text));
259
+ if (!parsedToolCall) {
260
+ return rawResult;
261
+ }
262
+ return new AIMessage({
263
+ content: "",
264
+ tool_calls: [{
265
+ id: `tool-${Math.random().toString(36).slice(2, 10)}`,
266
+ name: parsedToolCall.name,
267
+ args: parsedToolCall.args,
268
+ type: "tool_call",
269
+ }],
270
+ });
271
+ };
272
+ }
273
+ if (prop === "stream") {
274
+ return async (input, config) => {
275
+ const value = await receiver.invoke(input, config);
276
+ return (async function* () {
277
+ yield value;
278
+ })();
279
+ };
280
+ }
281
+ if (prop === "withConfig" && typeof target.withConfig === "function") {
282
+ return (config) => createNodeLlamaCppToolBindableModel(target.withConfig(config), boundTools);
283
+ }
284
+ const member = Reflect.get(target, prop, receiver);
285
+ return typeof member === "function" ? member.bind(target) : member;
286
+ },
287
+ getOwnPropertyDescriptor(target, prop) {
288
+ if (prop === "bindTools" || prop === "invoke" || prop === "stream" || prop === "withConfig") {
289
+ return {
290
+ configurable: true,
291
+ enumerable: false,
292
+ writable: false,
293
+ value: this.get?.(target, prop, target),
294
+ };
295
+ }
296
+ return Reflect.getOwnPropertyDescriptor(target, prop);
297
+ },
298
+ });
299
+ }
300
+ function inferNodeLlamaCppModelPath(model) {
301
+ const modelPath = typeof model.init?.modelPath === "string" ? model.init.modelPath.trim() : "";
302
+ if (modelPath) {
303
+ return modelPath;
304
+ }
305
+ return model.model.includes("/") || model.model.endsWith(".gguf") ? model.model : undefined;
306
+ }
307
+ async function createNodeLlamaCppModel(model) {
308
+ const modelPath = inferNodeLlamaCppModelPath(model);
309
+ if (!modelPath) {
310
+ throw new Error(`Model ${model.id} with provider ${model.provider} must define a GGUF path via top-level modelPath or use model as the GGUF path.`);
311
+ }
312
+ try {
313
+ const { ChatLlamaCpp } = await import("@langchain/community/chat_models/llama_cpp");
314
+ return createNodeLlamaCppToolBindableModel(await ChatLlamaCpp.initialize({
315
+ ...model.init,
316
+ modelPath,
317
+ }));
318
+ }
319
+ catch (error) {
320
+ throw new Error(`Failed to initialize ${model.provider} model ${model.id}. Install node-llama-cpp in the application workspace and ensure the GGUF file exists at ${modelPath}.`, { cause: error });
321
+ }
322
+ }
7
323
  export async function createResolvedModel(model, modelResolver) {
8
324
  if (modelResolver) {
9
325
  return modelResolver(model.id);
@@ -23,5 +339,8 @@ export async function createResolvedModel(model, modelResolver) {
23
339
  if (model.provider === "google" || model.provider === "google-genai" || model.provider === "gemini") {
24
340
  return new ChatGoogle({ model: model.model, ...model.init });
25
341
  }
342
+ if (model.provider === "node-llama-cpp" || model.provider === "llama-cpp") {
343
+ return createNodeLlamaCppModel(model);
344
+ }
26
345
  return initChatModel(model.model, { modelProvider: model.provider, ...model.init });
27
346
  }
@@ -21,6 +21,7 @@ export declare function createModelFallbackRunnable(model: {
21
21
  stream: (input: unknown, config?: Record<string, unknown>) => Promise<AsyncIterable<unknown>>;
22
22
  };
23
23
  export declare function applyStrictToolJsonInstruction(binding: CompiledAgentBinding): CompiledAgentBinding;
24
+ export declare function applyToolRecoveryInstruction(binding: CompiledAgentBinding, instruction: string): CompiledAgentBinding;
24
25
  export declare function callRuntimeWithToolParseRecovery(input: {
25
26
  binding: CompiledAgentBinding;
26
27
  request: unknown;
@@ -1,5 +1,5 @@
1
1
  import { setTimeout as sleep } from "node:timers/promises";
2
- import { extractVisibleOutput, isToolCallRecoveryFailure, STRICT_TOOL_JSON_INSTRUCTION } from "../parsing/output-parsing.js";
2
+ import { appendToolRecoveryInstruction, extractVisibleOutput, isToolCallRecoveryFailure, resolveToolCallRecoveryInstruction, STRICT_TOOL_JSON_INSTRUCTION, } from "../parsing/output-parsing.js";
3
3
  import { readStreamDelta } from "../parsing/stream-event-parsing.js";
4
4
  import { computeRemainingTimeoutMs, isRetryableProviderError, resolveProviderRetryPolicy } from "./resilience.js";
5
5
  import { isDeepAgentBinding, isLangChainBinding, withUpdatedBindingExecutionParams, } from "../support/compiled-binding.js";
@@ -134,16 +134,19 @@ export function createModelFallbackRunnable(model) {
134
134
  };
135
135
  }
136
136
  export function applyStrictToolJsonInstruction(binding) {
137
+ return applyToolRecoveryInstruction(binding, STRICT_TOOL_JSON_INSTRUCTION);
138
+ }
139
+ export function applyToolRecoveryInstruction(binding, instruction) {
137
140
  if (isLangChainBinding(binding)) {
138
141
  return withUpdatedBindingExecutionParams(binding, (params) => ({
139
142
  ...params,
140
- systemPrompt: [params.systemPrompt, STRICT_TOOL_JSON_INSTRUCTION].filter(Boolean).join("\n\n"),
143
+ systemPrompt: [params.systemPrompt, instruction].filter(Boolean).join("\n\n"),
141
144
  }));
142
145
  }
143
146
  if (isDeepAgentBinding(binding)) {
144
147
  return withUpdatedBindingExecutionParams(binding, (params) => ({
145
148
  ...params,
146
- systemPrompt: [params.systemPrompt, STRICT_TOOL_JSON_INSTRUCTION].filter(Boolean).join("\n\n"),
149
+ systemPrompt: [params.systemPrompt, instruction].filter(Boolean).join("\n\n"),
147
150
  }));
148
151
  }
149
152
  return binding;
@@ -153,9 +156,10 @@ export async function callRuntimeWithToolParseRecovery(input) {
153
156
  return await input.callRuntime(input.binding, input.request);
154
157
  }
155
158
  catch (error) {
156
- if (input.resumePayload !== undefined || !isToolCallRecoveryFailure(error)) {
159
+ const recoveryInstruction = resolveToolCallRecoveryInstruction(error);
160
+ if (input.resumePayload !== undefined || !recoveryInstruction || !isToolCallRecoveryFailure(error)) {
157
161
  throw error;
158
162
  }
159
- return input.callRuntime(applyStrictToolJsonInstruction(input.binding), input.request);
163
+ return input.callRuntime(applyToolRecoveryInstruction(input.binding, recoveryInstruction), appendToolRecoveryInstruction(input.request, recoveryInstruction));
160
164
  }
161
165
  }
@@ -13,5 +13,9 @@ export declare function isToolCallParseFailure(error: unknown): boolean;
13
13
  export declare function isToolCallValidationFailure(error: unknown): boolean;
14
14
  export declare function isToolCallRecoveryFailure(error: unknown): boolean;
15
15
  export declare const STRICT_TOOL_JSON_INSTRUCTION = "When calling tools, return only the tool call itself. The arguments must be a pure JSON object with no explanatory text before or after it.";
16
+ export declare const WRITE_TODOS_FULL_ENTRY_INSTRUCTION = "When calling write_todos, every todo item must include both content and status. Do not send status-only updates. Retry by resending the full todo entry with the original content preserved.";
17
+ export declare function isRepairableWriteTodosContentFailure(error: unknown): boolean;
18
+ export declare function resolveToolCallRecoveryInstruction(error: unknown): string | null;
19
+ export declare function appendToolRecoveryInstruction(input: unknown, instruction: string): unknown;
16
20
  export declare function wrapResolvedModel<T>(value: T): T;
17
21
  export declare function extractReasoningText(value: unknown): string;
@@ -509,20 +509,41 @@ export function isToolCallValidationFailure(error) {
509
509
  if (Array.isArray(direct) && direct.length > 0 && direct.every((issue) => isStructuredValidationIssue(issue) && issue.path.length > 0)) {
510
510
  return true;
511
511
  }
512
- return /Invalid input:\s*expected .* received undefined/i.test(message) && /"path"\s*:\s*\[/.test(message);
512
+ if (/Invalid input:\s*expected .* received undefined/i.test(message) && /"path"\s*:\s*\[/.test(message)) {
513
+ return true;
514
+ }
515
+ return /Received tool input did not match expected schema/i.test(message) && /(?:→\s*at|at)\s+[\w[\].]+/i.test(message);
513
516
  }
514
517
  export function isToolCallRecoveryFailure(error) {
515
518
  return isToolCallParseFailure(error) || isToolCallValidationFailure(error);
516
519
  }
517
520
  export const STRICT_TOOL_JSON_INSTRUCTION = "When calling tools, return only the tool call itself. The arguments must be a pure JSON object with no explanatory text before or after it.";
518
- function appendStrictToolInstruction(input) {
521
+ export const WRITE_TODOS_FULL_ENTRY_INSTRUCTION = "When calling write_todos, every todo item must include both content and status. Do not send status-only updates. Retry by resending the full todo entry with the original content preserved.";
522
+ export function isRepairableWriteTodosContentFailure(error) {
523
+ if (!(error instanceof Error))
524
+ return false;
525
+ const message = error.message.trim();
526
+ if (!message)
527
+ return false;
528
+ return /write_todos/i.test(message) && /todos\[\d+\]\.content/i.test(message);
529
+ }
530
+ export function resolveToolCallRecoveryInstruction(error) {
531
+ if (isRepairableWriteTodosContentFailure(error)) {
532
+ return WRITE_TODOS_FULL_ENTRY_INSTRUCTION;
533
+ }
534
+ if (isToolCallRecoveryFailure(error)) {
535
+ return STRICT_TOOL_JSON_INSTRUCTION;
536
+ }
537
+ return null;
538
+ }
539
+ export function appendToolRecoveryInstruction(input, instruction) {
519
540
  if (Array.isArray(input)) {
520
- return [...input, { role: "system", content: STRICT_TOOL_JSON_INSTRUCTION }];
541
+ return [...input, { role: "system", content: instruction }];
521
542
  }
522
543
  if (typeof input === "object" && input && Array.isArray(input.messages)) {
523
544
  return {
524
545
  ...input,
525
- messages: [...(input.messages), { role: "system", content: STRICT_TOOL_JSON_INSTRUCTION }],
546
+ messages: [...(input.messages), { role: "system", content: instruction }],
526
547
  };
527
548
  }
528
549
  return input;
@@ -540,11 +561,12 @@ export function wrapResolvedModel(value) {
540
561
  return normalizeAgentMessage(await member.apply(currentTarget, args));
541
562
  }
542
563
  catch (error) {
543
- if (!isToolCallRecoveryFailure(error)) {
564
+ const recoveryInstruction = resolveToolCallRecoveryInstruction(error);
565
+ if (!recoveryInstruction) {
544
566
  throw error;
545
567
  }
546
568
  const retryArgs = [...args];
547
- retryArgs[0] = appendStrictToolInstruction(retryArgs[0]);
569
+ retryArgs[0] = appendToolRecoveryInstruction(retryArgs[0], recoveryInstruction);
548
570
  return normalizeAgentMessage(await member.apply(currentTarget, retryArgs));
549
571
  }
550
572
  };
@@ -46,6 +46,29 @@ function normalizeOpenAICompatibleInit(init) {
46
46
  delete normalized.omitAuthHeader;
47
47
  return normalized;
48
48
  }
49
+ function inferNodeLlamaCppModelPath(embeddingModel) {
50
+ const modelPath = typeof embeddingModel.init?.modelPath === "string" ? embeddingModel.init.modelPath.trim() : "";
51
+ if (modelPath) {
52
+ return modelPath;
53
+ }
54
+ return embeddingModel.model.includes("/") || embeddingModel.model.endsWith(".gguf") ? embeddingModel.model : undefined;
55
+ }
56
+ async function createNodeLlamaCppEmbeddings(embeddingModel) {
57
+ const modelPath = inferNodeLlamaCppModelPath(embeddingModel);
58
+ if (!modelPath) {
59
+ throw new Error(`Embedding model ${embeddingModel.id} with provider ${embeddingModel.provider} must define a GGUF path via top-level modelPath or use model as the GGUF path.`);
60
+ }
61
+ try {
62
+ const { LlamaCppEmbeddings } = await import("@langchain/community/embeddings/llama_cpp");
63
+ return await LlamaCppEmbeddings.initialize({
64
+ ...embeddingModel.init,
65
+ modelPath,
66
+ });
67
+ }
68
+ catch (error) {
69
+ throw new Error(`Failed to initialize ${embeddingModel.provider} embedding model ${embeddingModel.id}. Install node-llama-cpp in the application workspace and ensure the GGUF file exists at ${modelPath}.`, { cause: error });
70
+ }
71
+ }
49
72
  export function resolveCompiledEmbeddingModelRef(workspace, embeddingModelRef) {
50
73
  const resolvedId = embeddingModelRef ? resolveRefId(embeddingModelRef) : "default";
51
74
  const embeddingModel = workspace.embeddings.get(resolvedId);
@@ -85,5 +108,8 @@ export async function resolveCompiledEmbeddingModel(embeddingModel, resolver) {
85
108
  if (embeddingModel.provider === "llamaindex-ollama") {
86
109
  return createLlamaIndexEmbeddingModel(embeddingModel);
87
110
  }
88
- throw new Error(`Embedding model provider ${embeddingModel.provider} is not supported by the built-in runtime. Configure embeddingModelResolver or use openai-compatible/openai/ollama/llamaindex-ollama.`);
111
+ if (embeddingModel.provider === "node-llama-cpp" || embeddingModel.provider === "llama-cpp") {
112
+ return createNodeLlamaCppEmbeddings(embeddingModel);
113
+ }
114
+ throw new Error(`Embedding model provider ${embeddingModel.provider} is not supported by the built-in runtime. Configure embeddingModelResolver or use openai-compatible/openai/ollama/llamaindex-ollama/node-llama-cpp.`);
89
115
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@botbotgo/agent-harness",
3
- "version": "0.0.272",
3
+ "version": "0.0.274",
4
4
  "description": "Workspace runtime for multi-agent applications",
5
5
  "license": "MIT",
6
6
  "type": "module",