@webmcp-auto-ui/agent 2.5.26 → 2.5.28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +10 -2
- package/src/autoui-server.ts +80 -65
- package/src/index.ts +25 -6
- package/src/loop.ts +52 -33
- package/src/prompts/claude-prompt-builder.ts +81 -0
- package/src/prompts/gemma4-prompt-builder.ts +205 -0
- package/src/prompts/index.ts +55 -0
- package/src/prompts/mistral-prompt-builder.ts +90 -0
- package/src/prompts/qwen-prompt-builder.ts +90 -0
- package/src/prompts/tool-call-parsers.ts +322 -0
- package/src/prompts/tool-refs.ts +196 -0
- package/src/providers/factory.ts +34 -3
- package/src/providers/hawk-models.ts +22 -0
- package/src/providers/hawk.ts +181 -0
- package/src/providers/transformers-models.ts +143 -0
- package/src/providers/transformers-serialize.ts +81 -0
- package/src/providers/transformers.ts +329 -0
- package/src/providers/transformers.worker.ts +640 -0
- package/src/providers/wasm.ts +132 -332
- package/src/recipes/_generated.ts +306 -0
- package/src/recipes/hackathon-assemblee-nationale.md +111 -0
- package/src/recipes/notebook-playbook.md +193 -0
- package/src/server/hawkProxy.ts +54 -0
- package/src/server/index.ts +2 -0
- package/src/tool-layers.ts +7 -403
- package/src/trace-observer.ts +669 -0
- package/src/types.ts +17 -7
- package/src/util/opfs-cache.ts +364 -0
- package/src/util/storage-inventory.ts +195 -0
- package/tests/gemma-prompt.test.ts +472 -0
- package/tests/loop.test.ts +3 -3
- package/tests/transformers-serialize.test.ts +103 -0
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
import type { LLMProvider, LLMResponse, ChatMessage, ProviderTool, ContentBlock } from '../types.js';
|
|
2
|
+
|
|
3
|
+
export interface HawkLLMProviderOptions {
|
|
4
|
+
proxyUrl: string; // SvelteKit proxy endpoint, e.g. '/api/hawk'
|
|
5
|
+
model: string; // e.g. 'qwen35-2b' (ID Hawk sans préfixe)
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
// ── OpenAI-compatible types ─────────────────────────────────────────
|
|
9
|
+
|
|
10
|
+
interface OaiTool {
|
|
11
|
+
type: 'function';
|
|
12
|
+
function: { name: string; description: string; parameters: Record<string, unknown> };
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
interface OaiMessage {
|
|
16
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
17
|
+
content?: string | null;
|
|
18
|
+
tool_calls?: { id: string; type: 'function'; function: { name: string; arguments: string } }[];
|
|
19
|
+
tool_call_id?: string;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
interface OaiChoice {
|
|
23
|
+
message: {
|
|
24
|
+
content?: string | null;
|
|
25
|
+
tool_calls?: { id: string; type: 'function'; function: { name: string; arguments: string } }[];
|
|
26
|
+
};
|
|
27
|
+
finish_reason: string;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
// ── Helpers ─────────────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
let _counter = 0;
|
|
33
|
+
function hawkId(): string {
|
|
34
|
+
return 'hawk_' + (++_counter).toString(36) + '_' + Date.now().toString(36);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
function toOaiTools(tools: ProviderTool[]): OaiTool[] {
|
|
38
|
+
return tools.map(t => ({
|
|
39
|
+
type: 'function' as const,
|
|
40
|
+
function: {
|
|
41
|
+
name: t.name,
|
|
42
|
+
description: t.description,
|
|
43
|
+
parameters: t.input_schema,
|
|
44
|
+
},
|
|
45
|
+
}));
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function toOaiMessages(messages: ChatMessage[], system?: string): OaiMessage[] {
|
|
49
|
+
const out: OaiMessage[] = [];
|
|
50
|
+
|
|
51
|
+
if (system) out.push({ role: 'system', content: system });
|
|
52
|
+
|
|
53
|
+
for (const msg of messages) {
|
|
54
|
+
if (typeof msg.content === 'string') {
|
|
55
|
+
out.push({ role: msg.role === 'assistant' ? 'assistant' : 'user', content: msg.content });
|
|
56
|
+
continue;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
const blocks = msg.content as ContentBlock[];
|
|
60
|
+
const textParts = blocks.filter(b => b.type === 'text').map(b => (b as { type: 'text'; text: string }).text);
|
|
61
|
+
const toolUses = blocks.filter(b => b.type === 'tool_use') as { type: 'tool_use'; id: string; name: string; input: Record<string, unknown> }[];
|
|
62
|
+
const toolResults = blocks.filter(b => b.type === 'tool_result') as { type: 'tool_result'; tool_use_id: string; content: string }[];
|
|
63
|
+
|
|
64
|
+
if (msg.role === 'assistant') {
|
|
65
|
+
const oai: OaiMessage = { role: 'assistant', content: textParts.join('\n') || null };
|
|
66
|
+
if (toolUses.length > 0) {
|
|
67
|
+
oai.tool_calls = toolUses.map(tu => ({
|
|
68
|
+
id: tu.id,
|
|
69
|
+
type: 'function' as const,
|
|
70
|
+
function: { name: tu.name, arguments: JSON.stringify(tu.input) },
|
|
71
|
+
}));
|
|
72
|
+
}
|
|
73
|
+
out.push(oai);
|
|
74
|
+
} else {
|
|
75
|
+
// User turn — may contain tool_result blocks (sent back after assistant tool_use)
|
|
76
|
+
for (const tr of toolResults) {
|
|
77
|
+
out.push({ role: 'tool', tool_call_id: tr.tool_use_id, content: tr.content });
|
|
78
|
+
}
|
|
79
|
+
if (textParts.length > 0) {
|
|
80
|
+
out.push({ role: 'user', content: textParts.join('\n') });
|
|
81
|
+
}
|
|
82
|
+
// If only tool_results and no text, we've already pushed them
|
|
83
|
+
if (toolResults.length === 0 && textParts.length === 0) {
|
|
84
|
+
out.push({ role: 'user', content: '' });
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
return out;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
function parseArguments(raw: string): Record<string, unknown> {
|
|
92
|
+
try { return JSON.parse(raw); } catch { return { _raw: raw }; }
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// ── Provider ────────────────────────────────────────────────────────
|
|
96
|
+
|
|
97
|
+
export class HawkProvider implements LLMProvider {
|
|
98
|
+
readonly name = 'hawk';
|
|
99
|
+
readonly model: string;
|
|
100
|
+
private proxyUrl: string;
|
|
101
|
+
|
|
102
|
+
constructor(options: HawkLLMProviderOptions) {
|
|
103
|
+
this.model = options.model;
|
|
104
|
+
this.proxyUrl = options.proxyUrl;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
async chat(
|
|
108
|
+
messages: ChatMessage[],
|
|
109
|
+
tools: ProviderTool[],
|
|
110
|
+
options?: { signal?: AbortSignal; system?: string; maxTokens?: number; temperature?: number },
|
|
111
|
+
): Promise<LLMResponse> {
|
|
112
|
+
const oaiMessages = toOaiMessages(messages, options?.system);
|
|
113
|
+
const oaiTools = tools.length > 0 ? toOaiTools(tools) : undefined;
|
|
114
|
+
|
|
115
|
+
// NOTE: `model` is NOT sent in the body — the server proxy injects it
|
|
116
|
+
// from the X-Model header into the upstream Hawk request.
|
|
117
|
+
const body: Record<string, unknown> = {
|
|
118
|
+
messages: oaiMessages,
|
|
119
|
+
stream: false,
|
|
120
|
+
};
|
|
121
|
+
if (oaiTools) body.tools = oaiTools;
|
|
122
|
+
if (options?.maxTokens) body.max_tokens = options.maxTokens;
|
|
123
|
+
if (options?.temperature != null) body.temperature = options.temperature;
|
|
124
|
+
|
|
125
|
+
const response = await fetch(this.proxyUrl, {
|
|
126
|
+
method: 'POST',
|
|
127
|
+
headers: {
|
|
128
|
+
'Content-Type': 'application/json',
|
|
129
|
+
'X-Model': this.model,
|
|
130
|
+
},
|
|
131
|
+
body: JSON.stringify(body),
|
|
132
|
+
signal: options?.signal,
|
|
133
|
+
});
|
|
134
|
+
|
|
135
|
+
if (!response.ok) {
|
|
136
|
+
const txt = await response.text().catch(() => '');
|
|
137
|
+
throw new Error(`Hawk LLM ${response.status}${txt ? ': ' + txt.slice(0, 200) : ''}`);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const data = await response.json() as { choices?: OaiChoice[]; usage?: { prompt_tokens?: number; completion_tokens?: number } };
|
|
141
|
+
const choice = data.choices?.[0];
|
|
142
|
+
if (!choice) throw new Error('Hawk LLM returned no choices');
|
|
143
|
+
|
|
144
|
+
const content: ContentBlock[] = [];
|
|
145
|
+
const toolCalls = choice.message.tool_calls;
|
|
146
|
+
|
|
147
|
+
if (choice.message.content) {
|
|
148
|
+
content.push({ type: 'text', text: choice.message.content });
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
if (toolCalls && toolCalls.length > 0) {
|
|
152
|
+
for (const tc of toolCalls) {
|
|
153
|
+
content.push({
|
|
154
|
+
type: 'tool_use',
|
|
155
|
+
id: tc.id || hawkId(),
|
|
156
|
+
name: tc.function.name,
|
|
157
|
+
input: parseArguments(tc.function.arguments),
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// Ensure at least one block
|
|
163
|
+
if (content.length === 0) {
|
|
164
|
+
content.push({ type: 'text', text: '' });
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
const hasToolUse = content.some(b => b.type === 'tool_use');
|
|
168
|
+
const stopReason = hasToolUse ? 'tool_use'
|
|
169
|
+
: choice.finish_reason === 'tool_calls' ? 'tool_use'
|
|
170
|
+
: 'end_turn';
|
|
171
|
+
|
|
172
|
+
return {
|
|
173
|
+
content,
|
|
174
|
+
stopReason,
|
|
175
|
+
usage: data.usage ? {
|
|
176
|
+
input_tokens: data.usage.prompt_tokens ?? 0,
|
|
177
|
+
output_tokens: data.usage.completion_tokens ?? 0,
|
|
178
|
+
} : undefined,
|
|
179
|
+
};
|
|
180
|
+
}
|
|
181
|
+
}
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
// Shared catalog of in-browser models served via transformers.js (ONNX + WebGPU).
|
|
2
|
+
// Read by TransformersProvider (agent A) and the <LLMSelector> UI (agent D).
|
|
3
|
+
//
|
|
4
|
+
// Each entry pins:
|
|
5
|
+
// - `repo`: HuggingFace repository ID to load via from_pretrained
|
|
6
|
+
// - `dtype`: mixed per-component quantization (embed_tokens / decoder / vision)
|
|
7
|
+
// - `family`: prompt builder family — drives which {gemma,qwen,mistral}-prompt-builder is used
|
|
8
|
+
// - `toolFormat`: output tool-call syntax — drives which parser strategy is used
|
|
9
|
+
// - `contextLength`: native context budget (for clipping heuristics)
|
|
10
|
+
// - `vision`: true if the model accepts images via RawImage + AutoProcessor
|
|
11
|
+
// - `modelClass`: optional specialized transformers.js class name (for VLMs);
|
|
12
|
+
// defaults to AutoModelForCausalLM when omitted
|
|
13
|
+
|
|
14
|
+
export type TransformersFamily = 'gemma4' | 'qwen3' | 'mistral';
|
|
15
|
+
export type ToolCallFormat = 'gemma-native' | 'qwen-json' | 'mistral-toolcalls';
|
|
16
|
+
|
|
17
|
+
export type DType = 'q4' | 'q4f16' | 'q8' | 'fp16' | 'fp32';
|
|
18
|
+
|
|
19
|
+
export interface TransformersModelEntry {
|
|
20
|
+
repo: string;
|
|
21
|
+
dtype: DType | {
|
|
22
|
+
embed_tokens?: DType;
|
|
23
|
+
decoder_model_merged?: DType;
|
|
24
|
+
vision_encoder?: DType;
|
|
25
|
+
audio_encoder?: DType;
|
|
26
|
+
};
|
|
27
|
+
family: TransformersFamily;
|
|
28
|
+
toolFormat: ToolCallFormat;
|
|
29
|
+
contextLength: number;
|
|
30
|
+
vision: boolean;
|
|
31
|
+
modelClass?: string;
|
|
32
|
+
/** Approximate download size in bytes (for progress UI). */
|
|
33
|
+
size: number;
|
|
34
|
+
/** Human-readable label for the model selector. */
|
|
35
|
+
label: string;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export type TransformersModelId =
|
|
39
|
+
| 'transformers-gemma-4-e2b'
|
|
40
|
+
| 'transformers-gemma-4-e4b'
|
|
41
|
+
| 'transformers-qwen-3-4b'
|
|
42
|
+
| 'transformers-qwen-3.5-2b'
|
|
43
|
+
| 'transformers-qwen-3.5-4b'
|
|
44
|
+
| 'transformers-ministral-3-3b';
|
|
45
|
+
|
|
46
|
+
export const TRANSFORMERS_MODELS: Record<TransformersModelId, TransformersModelEntry> = {
|
|
47
|
+
'transformers-gemma-4-e2b': {
|
|
48
|
+
repo: 'onnx-community/gemma-4-E2B-it-ONNX',
|
|
49
|
+
modelClass: 'Gemma4ForConditionalGeneration',
|
|
50
|
+
dtype: {
|
|
51
|
+
audio_encoder: 'q4',
|
|
52
|
+
vision_encoder: 'q4',
|
|
53
|
+
embed_tokens: 'q4',
|
|
54
|
+
decoder_model_merged: 'q4f16',
|
|
55
|
+
},
|
|
56
|
+
family: 'gemma4',
|
|
57
|
+
toolFormat: 'gemma-native',
|
|
58
|
+
contextLength: 32768,
|
|
59
|
+
vision: true,
|
|
60
|
+
size: 2_000_000_000,
|
|
61
|
+
label: 'Gemma 4 E2B (Vision)',
|
|
62
|
+
},
|
|
63
|
+
'transformers-gemma-4-e4b': {
|
|
64
|
+
repo: 'onnx-community/gemma-4-E4B-it-ONNX',
|
|
65
|
+
modelClass: 'Gemma4ForConditionalGeneration',
|
|
66
|
+
dtype: {
|
|
67
|
+
audio_encoder: 'q4',
|
|
68
|
+
vision_encoder: 'q4',
|
|
69
|
+
embed_tokens: 'q4',
|
|
70
|
+
decoder_model_merged: 'q4f16',
|
|
71
|
+
},
|
|
72
|
+
family: 'gemma4',
|
|
73
|
+
toolFormat: 'gemma-native',
|
|
74
|
+
contextLength: 32768,
|
|
75
|
+
vision: true,
|
|
76
|
+
size: 3_000_000_000,
|
|
77
|
+
label: 'Gemma 4 E4B (Vision)',
|
|
78
|
+
},
|
|
79
|
+
'transformers-qwen-3-4b': {
|
|
80
|
+
// onnx-community/Qwen3-4B-ONNX ships a monolithic model_q4f16.onnx
|
|
81
|
+
// (not split into embed_tokens + decoder_model_merged), so transformers.js
|
|
82
|
+
// expects a scalar dtype string to resolve onnx/model_<dtype>.onnx.
|
|
83
|
+
repo: 'onnx-community/Qwen3-4B-ONNX',
|
|
84
|
+
dtype: 'q4f16',
|
|
85
|
+
family: 'qwen3',
|
|
86
|
+
toolFormat: 'qwen-json',
|
|
87
|
+
contextLength: 32768,
|
|
88
|
+
vision: false,
|
|
89
|
+
size: 3_050_000_000,
|
|
90
|
+
label: 'Qwen 3 4B',
|
|
91
|
+
},
|
|
92
|
+
'transformers-qwen-3.5-2b': {
|
|
93
|
+
repo: 'onnx-community/Qwen3.5-2B-ONNX',
|
|
94
|
+
dtype: { embed_tokens: 'q4', vision_encoder: 'fp16', decoder_model_merged: 'q4' },
|
|
95
|
+
family: 'qwen3',
|
|
96
|
+
toolFormat: 'qwen-json',
|
|
97
|
+
contextLength: 32768,
|
|
98
|
+
vision: true,
|
|
99
|
+
modelClass: 'Qwen3_5ForConditionalGeneration',
|
|
100
|
+
size: 1_600_000_000,
|
|
101
|
+
label: 'Qwen 3.5 2B',
|
|
102
|
+
},
|
|
103
|
+
'transformers-qwen-3.5-4b': {
|
|
104
|
+
repo: 'onnx-community/Qwen3.5-4B-ONNX',
|
|
105
|
+
dtype: { embed_tokens: 'q4', vision_encoder: 'fp16', decoder_model_merged: 'q4' },
|
|
106
|
+
family: 'qwen3',
|
|
107
|
+
toolFormat: 'qwen-json',
|
|
108
|
+
contextLength: 32768,
|
|
109
|
+
vision: true,
|
|
110
|
+
modelClass: 'Qwen3_5ForConditionalGeneration',
|
|
111
|
+
size: 3_000_000_000,
|
|
112
|
+
label: 'Qwen 3.5 4B',
|
|
113
|
+
},
|
|
114
|
+
'transformers-ministral-3-3b': {
|
|
115
|
+
repo: 'mistralai/Ministral-3-3B-Instruct-2512-ONNX',
|
|
116
|
+
// Mistral3ForConditionalGeneration is registered internally but not
|
|
117
|
+
// re-exported from transformers.js 4.1.0 — use the Auto wrapper, which
|
|
118
|
+
// routes via the registered name (this is what the official demo does).
|
|
119
|
+
modelClass: 'AutoModelForImageTextToText',
|
|
120
|
+
dtype: {
|
|
121
|
+
embed_tokens: 'fp16',
|
|
122
|
+
vision_encoder: 'q4',
|
|
123
|
+
decoder_model_merged: 'q4f16',
|
|
124
|
+
},
|
|
125
|
+
family: 'mistral',
|
|
126
|
+
toolFormat: 'mistral-toolcalls',
|
|
127
|
+
contextLength: 32768,
|
|
128
|
+
vision: true,
|
|
129
|
+
size: 2_200_000_000,
|
|
130
|
+
label: 'Ministral 3 3B (Vision)',
|
|
131
|
+
},
|
|
132
|
+
};
|
|
133
|
+
|
|
134
|
+
export function getTransformersModel(id: TransformersModelId): TransformersModelEntry {
|
|
135
|
+
return TRANSFORMERS_MODELS[id];
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
export function listTransformersModels(): Array<{ id: TransformersModelId; entry: TransformersModelEntry }> {
|
|
139
|
+
return Object.entries(TRANSFORMERS_MODELS).map(([id, entry]) => ({
|
|
140
|
+
id: id as TransformersModelId,
|
|
141
|
+
entry,
|
|
142
|
+
}));
|
|
143
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pure serializer: ChatMessage[] → flat [{role, content}] array suitable for
|
|
3
|
+
* tokenizer.apply_chat_template in the transformers.js worker.
|
|
4
|
+
*
|
|
5
|
+
* Extracted from TransformersProvider so it can be unit-tested without
|
|
6
|
+
* spinning up a Web Worker. Tool calls and tool results are rendered as
|
|
7
|
+
* textual spans using the wire format each model family expects:
|
|
8
|
+
*
|
|
9
|
+
* - Qwen (ChatML): <tool_call>{json}</tool_call>
|
|
10
|
+
* <tool_response>{json}</tool_response>
|
|
11
|
+
* - Mistral: [TOOL_CALLS][{json}]
|
|
12
|
+
* [TOOL_RESULTS] {json} [/TOOL_RESULTS]
|
|
13
|
+
* - Gemma: legacy path (this serializer is not used — see
|
|
14
|
+
* buildGemmaPrompt in prompts/gemma4-prompt-builder.ts).
|
|
15
|
+
*
|
|
16
|
+
* The actual role tags (<|im_start|>user\n…<|im_end|>, [INST]…[/INST]) are
|
|
17
|
+
* added by apply_chat_template from the model's baked-in chat_template.
|
|
18
|
+
*/
|
|
19
|
+
import type { ChatMessage, ContentBlock } from '../types.js';
|
|
20
|
+
import { formatToolCall, formatToolResponse } from '../prompts/gemma4-prompt-builder.js';
|
|
21
|
+
|
|
22
|
+
export type PromptKind = 'gemma' | 'qwen' | 'mistral';
|
|
23
|
+
|
|
24
|
+
export function serializeMessagesForTemplate(
|
|
25
|
+
messages: ChatMessage[],
|
|
26
|
+
promptKind: PromptKind,
|
|
27
|
+
): Array<{ role: string; content: string }> {
|
|
28
|
+
const out: Array<{ role: string; content: string }> = [];
|
|
29
|
+
for (const msg of messages) {
|
|
30
|
+
const role = msg.role; // 'user' | 'assistant' | 'system'
|
|
31
|
+
if (typeof msg.content === 'string') {
|
|
32
|
+
out.push({ role, content: msg.content });
|
|
33
|
+
continue;
|
|
34
|
+
}
|
|
35
|
+
const segments: string[] = [];
|
|
36
|
+
let toolResultBuf: string[] = [];
|
|
37
|
+
const flushToolResults = () => {
|
|
38
|
+
if (toolResultBuf.length === 0) return;
|
|
39
|
+
if (promptKind === 'qwen') {
|
|
40
|
+
for (const tr of toolResultBuf) {
|
|
41
|
+
segments.push(`<tool_response>\n${tr}\n</tool_response>`);
|
|
42
|
+
}
|
|
43
|
+
} else if (promptKind === 'mistral') {
|
|
44
|
+
for (const tr of toolResultBuf) {
|
|
45
|
+
segments.push(`[TOOL_RESULTS] ${tr} [/TOOL_RESULTS]`);
|
|
46
|
+
}
|
|
47
|
+
} else {
|
|
48
|
+
// Gemma path — kept for defensive completeness; main code uses
|
|
49
|
+
// buildGemmaPrompt instead.
|
|
50
|
+
for (const tr of toolResultBuf) segments.push(formatToolResponse(tr));
|
|
51
|
+
}
|
|
52
|
+
toolResultBuf = [];
|
|
53
|
+
};
|
|
54
|
+
for (const block of msg.content as ContentBlock[]) {
|
|
55
|
+
if (block.type === 'text') {
|
|
56
|
+
segments.push(block.text);
|
|
57
|
+
} else if (block.type === 'tool_use') {
|
|
58
|
+
if (promptKind === 'qwen') {
|
|
59
|
+
segments.push(`<tool_call>\n${JSON.stringify({ name: block.name, arguments: block.input })}\n</tool_call>`);
|
|
60
|
+
} else if (promptKind === 'mistral') {
|
|
61
|
+
segments.push(`[TOOL_CALLS][${JSON.stringify({ name: block.name, arguments: block.input })}]`);
|
|
62
|
+
} else {
|
|
63
|
+
segments.push(formatToolCall(block.name, block.input));
|
|
64
|
+
}
|
|
65
|
+
} else if (block.type === 'tool_result') {
|
|
66
|
+
toolResultBuf.push(block.content);
|
|
67
|
+
}
|
|
68
|
+
// 'image' blocks are not reachable here: vision turns go through the
|
|
69
|
+
// legacy `prompt` path in TransformersProvider.chat().
|
|
70
|
+
}
|
|
71
|
+
flushToolResults();
|
|
72
|
+
// Promote pure-tool-result turns: Qwen uses the 'tool' role, Mistral
|
|
73
|
+
// keeps 'user' (the template wraps tool results inside a user turn).
|
|
74
|
+
const onlyToolResult = (msg.content as ContentBlock[]).every(b => b.type === 'tool_result');
|
|
75
|
+
const effectiveRole = onlyToolResult && role === 'user'
|
|
76
|
+
? (promptKind === 'qwen' ? 'tool' : 'user')
|
|
77
|
+
: role;
|
|
78
|
+
out.push({ role: effectiveRole, content: segments.join('\n') });
|
|
79
|
+
}
|
|
80
|
+
return out;
|
|
81
|
+
}
|