@clinebot/llms 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +198 -0
- package/dist/config-browser.d.ts +3 -0
- package/dist/config.d.ts +3 -0
- package/dist/index.browser.d.ts +4 -0
- package/dist/index.browser.js +1 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +7 -0
- package/dist/models/generated-access.d.ts +4 -0
- package/dist/models/generated-provider-loaders.d.ts +13 -0
- package/dist/models/generated.d.ts +14 -0
- package/dist/models/index.d.ts +43 -0
- package/dist/models/models-dev-catalog.d.ts +32 -0
- package/dist/models/providers/aihubmix.d.ts +5 -0
- package/dist/models/providers/anthropic.d.ts +53 -0
- package/dist/models/providers/asksage.d.ts +5 -0
- package/dist/models/providers/baseten.d.ts +5 -0
- package/dist/models/providers/bedrock.d.ts +7 -0
- package/dist/models/providers/cerebras.d.ts +7 -0
- package/dist/models/providers/claude-code.d.ts +4 -0
- package/dist/models/providers/cline.d.ts +34 -0
- package/dist/models/providers/deepseek.d.ts +8 -0
- package/dist/models/providers/dify.d.ts +5 -0
- package/dist/models/providers/doubao.d.ts +7 -0
- package/dist/models/providers/fireworks.d.ts +8 -0
- package/dist/models/providers/gemini.d.ts +9 -0
- package/dist/models/providers/groq.d.ts +8 -0
- package/dist/models/providers/hicap.d.ts +5 -0
- package/dist/models/providers/huawei-cloud-maas.d.ts +5 -0
- package/dist/models/providers/huggingface.d.ts +6 -0
- package/dist/models/providers/index.d.ts +45 -0
- package/dist/models/providers/litellm.d.ts +5 -0
- package/dist/models/providers/lmstudio.d.ts +5 -0
- package/dist/models/providers/minimax.d.ts +7 -0
- package/dist/models/providers/mistral.d.ts +5 -0
- package/dist/models/providers/moonshot.d.ts +7 -0
- package/dist/models/providers/nebius.d.ts +7 -0
- package/dist/models/providers/nous-research.d.ts +7 -0
- package/dist/models/providers/oca.d.ts +9 -0
- package/dist/models/providers/ollama.d.ts +5 -0
- package/dist/models/providers/openai-codex.d.ts +10 -0
- package/dist/models/providers/openai.d.ts +9 -0
- package/dist/models/providers/opencode.d.ts +10 -0
- package/dist/models/providers/openrouter.d.ts +7 -0
- package/dist/models/providers/qwen-code.d.ts +7 -0
- package/dist/models/providers/qwen.d.ts +7 -0
- package/dist/models/providers/requesty.d.ts +6 -0
- package/dist/models/providers/sambanova.d.ts +7 -0
- package/dist/models/providers/sapaicore.d.ts +7 -0
- package/dist/models/providers/together.d.ts +8 -0
- package/dist/models/providers/vercel-ai-gateway.d.ts +5 -0
- package/dist/models/providers/vertex.d.ts +7 -0
- package/dist/models/providers/xai.d.ts +8 -0
- package/dist/models/providers/zai.d.ts +7 -0
- package/dist/models/query.d.ts +181 -0
- package/dist/models/registry.d.ts +123 -0
- package/dist/models/schemas/index.d.ts +7 -0
- package/dist/models/schemas/model.d.ts +340 -0
- package/dist/models/schemas/query.d.ts +191 -0
- package/dist/providers/handlers/ai-sdk-community.d.ts +46 -0
- package/dist/providers/handlers/ai-sdk-provider-base.d.ts +32 -0
- package/dist/providers/handlers/anthropic-base.d.ts +26 -0
- package/dist/providers/handlers/asksage.d.ts +12 -0
- package/dist/providers/handlers/auth.d.ts +5 -0
- package/dist/providers/handlers/base.d.ts +55 -0
- package/dist/providers/handlers/bedrock-base.d.ts +23 -0
- package/dist/providers/handlers/bedrock-client.d.ts +4 -0
- package/dist/providers/handlers/community-sdk.d.ts +97 -0
- package/dist/providers/handlers/fetch-base.d.ts +18 -0
- package/dist/providers/handlers/gemini-base.d.ts +25 -0
- package/dist/providers/handlers/index.d.ts +19 -0
- package/dist/providers/handlers/openai-base.d.ts +54 -0
- package/dist/providers/handlers/openai-responses.d.ts +64 -0
- package/dist/providers/handlers/providers.d.ts +43 -0
- package/dist/providers/handlers/r1-base.d.ts +62 -0
- package/dist/providers/handlers/registry.d.ts +106 -0
- package/dist/providers/handlers/vertex.d.ts +32 -0
- package/dist/providers/index.d.ts +100 -0
- package/dist/providers/public.browser.d.ts +2 -0
- package/dist/providers/public.d.ts +3 -0
- package/dist/providers/shared/openai-compatible.d.ts +10 -0
- package/dist/providers/transform/ai-sdk-community-format.d.ts +9 -0
- package/dist/providers/transform/anthropic-format.d.ts +24 -0
- package/dist/providers/transform/content-format.d.ts +3 -0
- package/dist/providers/transform/gemini-format.d.ts +19 -0
- package/dist/providers/transform/index.d.ts +10 -0
- package/dist/providers/transform/openai-format.d.ts +36 -0
- package/dist/providers/transform/r1-format.d.ts +26 -0
- package/dist/providers/types/config.d.ts +261 -0
- package/dist/providers/types/handler.d.ts +71 -0
- package/dist/providers/types/index.d.ts +11 -0
- package/dist/providers/types/messages.d.ts +139 -0
- package/dist/providers/types/model-info.d.ts +32 -0
- package/dist/providers/types/provider-ids.d.ts +63 -0
- package/dist/providers/types/settings.d.ts +308 -0
- package/dist/providers/types/stream.d.ts +106 -0
- package/dist/providers/utils/index.d.ts +7 -0
- package/dist/providers/utils/retry.d.ts +38 -0
- package/dist/providers/utils/stream-processor.d.ts +110 -0
- package/dist/providers/utils/tool-processor.d.ts +34 -0
- package/dist/sdk.d.ts +18 -0
- package/dist/types.d.ts +60 -0
- package/package.json +66 -0
- package/src/catalog.ts +20 -0
- package/src/config-browser.ts +11 -0
- package/src/config.ts +49 -0
- package/src/index.browser.ts +9 -0
- package/src/index.ts +10 -0
- package/src/live-providers.test.ts +137 -0
- package/src/models/generated-access.ts +41 -0
- package/src/models/generated-provider-loaders.ts +166 -0
- package/src/models/generated.ts +11997 -0
- package/src/models/index.ts +271 -0
- package/src/models/models-dev-catalog.test.ts +161 -0
- package/src/models/models-dev-catalog.ts +161 -0
- package/src/models/providers/aihubmix.ts +19 -0
- package/src/models/providers/anthropic.ts +60 -0
- package/src/models/providers/asksage.ts +19 -0
- package/src/models/providers/baseten.ts +21 -0
- package/src/models/providers/bedrock.ts +30 -0
- package/src/models/providers/cerebras.ts +24 -0
- package/src/models/providers/claude-code.ts +51 -0
- package/src/models/providers/cline.ts +25 -0
- package/src/models/providers/deepseek.ts +33 -0
- package/src/models/providers/dify.ts +17 -0
- package/src/models/providers/doubao.ts +33 -0
- package/src/models/providers/fireworks.ts +34 -0
- package/src/models/providers/gemini.ts +43 -0
- package/src/models/providers/groq.ts +33 -0
- package/src/models/providers/hicap.ts +18 -0
- package/src/models/providers/huawei-cloud-maas.ts +18 -0
- package/src/models/providers/huggingface.ts +22 -0
- package/src/models/providers/index.ts +162 -0
- package/src/models/providers/litellm.ts +19 -0
- package/src/models/providers/lmstudio.ts +22 -0
- package/src/models/providers/minimax.ts +34 -0
- package/src/models/providers/mistral.ts +19 -0
- package/src/models/providers/moonshot.ts +34 -0
- package/src/models/providers/nebius.ts +24 -0
- package/src/models/providers/nous-research.ts +21 -0
- package/src/models/providers/oca.ts +30 -0
- package/src/models/providers/ollama.ts +18 -0
- package/src/models/providers/openai-codex.ts +30 -0
- package/src/models/providers/openai.ts +43 -0
- package/src/models/providers/opencode.ts +28 -0
- package/src/models/providers/openrouter.ts +24 -0
- package/src/models/providers/qwen-code.ts +33 -0
- package/src/models/providers/qwen.ts +34 -0
- package/src/models/providers/requesty.ts +23 -0
- package/src/models/providers/sambanova.ts +23 -0
- package/src/models/providers/sapaicore.ts +34 -0
- package/src/models/providers/together.ts +35 -0
- package/src/models/providers/vercel-ai-gateway.ts +23 -0
- package/src/models/providers/vertex.ts +36 -0
- package/src/models/providers/xai.ts +34 -0
- package/src/models/providers/zai.ts +25 -0
- package/src/models/query.ts +407 -0
- package/src/models/registry.ts +511 -0
- package/src/models/schemas/index.ts +62 -0
- package/src/models/schemas/model.ts +308 -0
- package/src/models/schemas/query.ts +336 -0
- package/src/providers/browser.ts +4 -0
- package/src/providers/handlers/ai-sdk-community.ts +226 -0
- package/src/providers/handlers/ai-sdk-provider-base.ts +193 -0
- package/src/providers/handlers/anthropic-base.ts +372 -0
- package/src/providers/handlers/asksage.test.ts +103 -0
- package/src/providers/handlers/asksage.ts +138 -0
- package/src/providers/handlers/auth.test.ts +19 -0
- package/src/providers/handlers/auth.ts +121 -0
- package/src/providers/handlers/base.test.ts +46 -0
- package/src/providers/handlers/base.ts +160 -0
- package/src/providers/handlers/bedrock-base.ts +390 -0
- package/src/providers/handlers/bedrock-client.ts +100 -0
- package/src/providers/handlers/codex.test.ts +123 -0
- package/src/providers/handlers/community-sdk.test.ts +288 -0
- package/src/providers/handlers/community-sdk.ts +392 -0
- package/src/providers/handlers/fetch-base.ts +68 -0
- package/src/providers/handlers/gemini-base.ts +302 -0
- package/src/providers/handlers/index.ts +67 -0
- package/src/providers/handlers/openai-base.ts +277 -0
- package/src/providers/handlers/openai-responses.ts +598 -0
- package/src/providers/handlers/providers.test.ts +120 -0
- package/src/providers/handlers/providers.ts +563 -0
- package/src/providers/handlers/r1-base.ts +280 -0
- package/src/providers/handlers/registry.ts +185 -0
- package/src/providers/handlers/vertex.test.ts +124 -0
- package/src/providers/handlers/vertex.ts +292 -0
- package/src/providers/index.ts +534 -0
- package/src/providers/public.browser.ts +20 -0
- package/src/providers/public.ts +51 -0
- package/src/providers/shared/openai-compatible.ts +63 -0
- package/src/providers/transform/ai-sdk-community-format.test.ts +73 -0
- package/src/providers/transform/ai-sdk-community-format.ts +115 -0
- package/src/providers/transform/anthropic-format.ts +218 -0
- package/src/providers/transform/content-format.ts +34 -0
- package/src/providers/transform/format-conversion.test.ts +310 -0
- package/src/providers/transform/gemini-format.ts +167 -0
- package/src/providers/transform/index.ts +22 -0
- package/src/providers/transform/openai-format.ts +247 -0
- package/src/providers/transform/r1-format.ts +287 -0
- package/src/providers/types/config.ts +388 -0
- package/src/providers/types/handler.ts +87 -0
- package/src/providers/types/index.ts +120 -0
- package/src/providers/types/messages.ts +158 -0
- package/src/providers/types/model-info.test.ts +57 -0
- package/src/providers/types/model-info.ts +65 -0
- package/src/providers/types/provider-ids.test.ts +12 -0
- package/src/providers/types/provider-ids.ts +89 -0
- package/src/providers/types/settings.test.ts +49 -0
- package/src/providers/types/settings.ts +533 -0
- package/src/providers/types/stream.ts +117 -0
- package/src/providers/utils/index.ts +27 -0
- package/src/providers/utils/retry.test.ts +140 -0
- package/src/providers/utils/retry.ts +188 -0
- package/src/providers/utils/stream-processor.test.ts +232 -0
- package/src/providers/utils/stream-processor.ts +472 -0
- package/src/providers/utils/tool-processor.test.ts +34 -0
- package/src/providers/utils/tool-processor.ts +111 -0
- package/src/sdk.ts +264 -0
- package/src/types.ts +79 -0
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Message Format Converter
|
|
3
|
+
*
|
|
4
|
+
* Converts our unified Message format to OpenAI's ChatCompletionMessageParam format.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { formatFileContentBlock } from "@clinebot/shared";
|
|
8
|
+
import type OpenAI from "openai";
|
|
9
|
+
import type {
|
|
10
|
+
ContentBlock,
|
|
11
|
+
FileContent,
|
|
12
|
+
ImageContent,
|
|
13
|
+
Message,
|
|
14
|
+
TextContent,
|
|
15
|
+
ToolResultContent,
|
|
16
|
+
ToolUseContent,
|
|
17
|
+
} from "../types/messages";
|
|
18
|
+
import {
|
|
19
|
+
normalizeToolUseInput,
|
|
20
|
+
serializeToolResultContent,
|
|
21
|
+
} from "./content-format";
|
|
22
|
+
|
|
23
|
+
type OpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam;
|
|
24
|
+
type OpenAIContentPart = OpenAI.Chat.ChatCompletionContentPart;
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Convert messages to OpenAI format
|
|
28
|
+
*/
|
|
29
|
+
export function convertToOpenAIMessages(messages: Message[]): OpenAIMessage[] {
|
|
30
|
+
return messages.flatMap(convertMessage);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function convertMessage(message: Message): OpenAIMessage[] {
|
|
34
|
+
const { role, content } = message;
|
|
35
|
+
|
|
36
|
+
// Simple string content
|
|
37
|
+
if (typeof content === "string") {
|
|
38
|
+
return [{ role, content } as OpenAIMessage];
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// Array content - need to process blocks
|
|
42
|
+
if (role === "assistant") {
|
|
43
|
+
return [convertAssistantMessage(content)];
|
|
44
|
+
} else {
|
|
45
|
+
return convertUserMessage(content);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function convertAssistantMessage(content: ContentBlock[]): OpenAIMessage {
|
|
50
|
+
const textParts: string[] = [];
|
|
51
|
+
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
|
|
52
|
+
|
|
53
|
+
for (const block of content) {
|
|
54
|
+
switch (block.type) {
|
|
55
|
+
case "text":
|
|
56
|
+
textParts.push((block as TextContent).text);
|
|
57
|
+
break;
|
|
58
|
+
case "tool_use": {
|
|
59
|
+
const toolUse = block as ToolUseContent;
|
|
60
|
+
toolCalls.push({
|
|
61
|
+
id: toolUse.id,
|
|
62
|
+
type: "function",
|
|
63
|
+
function: {
|
|
64
|
+
name: toolUse.name,
|
|
65
|
+
arguments: JSON.stringify(normalizeToolUseInput(toolUse.input)),
|
|
66
|
+
},
|
|
67
|
+
});
|
|
68
|
+
break;
|
|
69
|
+
}
|
|
70
|
+
case "thinking":
|
|
71
|
+
// OpenAI doesn't have native thinking blocks, skip
|
|
72
|
+
break;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
const message: OpenAI.Chat.ChatCompletionAssistantMessageParam = {
|
|
77
|
+
role: "assistant",
|
|
78
|
+
content: textParts.length > 0 ? textParts.join("\n") : null,
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
if (toolCalls.length > 0) {
|
|
82
|
+
message.tool_calls = toolCalls;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return message;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
function convertUserMessage(content: ContentBlock[]): OpenAIMessage[] {
|
|
89
|
+
const messages: OpenAIMessage[] = [];
|
|
90
|
+
|
|
91
|
+
// Convert all tool results to separate tool messages
|
|
92
|
+
const toolResults = content.filter(
|
|
93
|
+
(b) => b.type === "tool_result",
|
|
94
|
+
) as ToolResultContent[];
|
|
95
|
+
for (const result of toolResults) {
|
|
96
|
+
messages.push({
|
|
97
|
+
role: "tool",
|
|
98
|
+
tool_call_id: result.tool_use_id,
|
|
99
|
+
content: serializeToolResultContent(result.content),
|
|
100
|
+
});
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// Preserve any non-tool user content as a regular user message
|
|
104
|
+
const userContent = content.filter((b) => b.type !== "tool_result");
|
|
105
|
+
if (userContent.length === 0) {
|
|
106
|
+
return messages;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const parts: OpenAIContentPart[] = [];
|
|
110
|
+
|
|
111
|
+
for (const block of userContent) {
|
|
112
|
+
switch (block.type) {
|
|
113
|
+
case "text":
|
|
114
|
+
parts.push({ type: "text", text: (block as TextContent).text });
|
|
115
|
+
break;
|
|
116
|
+
case "file": {
|
|
117
|
+
const fileBlock = block as FileContent;
|
|
118
|
+
parts.push({
|
|
119
|
+
type: "text",
|
|
120
|
+
text: formatFileContentBlock(fileBlock.path, fileBlock.content),
|
|
121
|
+
});
|
|
122
|
+
break;
|
|
123
|
+
}
|
|
124
|
+
case "image": {
|
|
125
|
+
const img = block as ImageContent;
|
|
126
|
+
parts.push({
|
|
127
|
+
type: "image_url",
|
|
128
|
+
image_url: {
|
|
129
|
+
url: `data:${img.mediaType};base64,${img.data}`,
|
|
130
|
+
},
|
|
131
|
+
});
|
|
132
|
+
break;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
if (parts.length === 0) {
|
|
137
|
+
return messages;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
messages.push({
|
|
141
|
+
role: "user",
|
|
142
|
+
content:
|
|
143
|
+
parts.length === 1 && parts[0].type === "text" ? parts[0].text : parts,
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
return messages;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
/**
|
|
150
|
+
* Normalize a JSON Schema for OpenAI strict mode.
|
|
151
|
+
*
|
|
152
|
+
* Strict mode requires:
|
|
153
|
+
* - `additionalProperties: false` on every object
|
|
154
|
+
* - All properties listed in `required` (optional ones become nullable)
|
|
155
|
+
*/
|
|
156
|
+
function normalizeForStrictMode(schema: unknown): unknown {
|
|
157
|
+
if (!schema || typeof schema !== "object" || Array.isArray(schema)) {
|
|
158
|
+
return schema;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
const s = { ...(schema as Record<string, unknown>) };
|
|
162
|
+
|
|
163
|
+
// Remove $schema – OpenAI rejects it
|
|
164
|
+
delete s.$schema;
|
|
165
|
+
|
|
166
|
+
if (s.type === "object") {
|
|
167
|
+
s.additionalProperties = false;
|
|
168
|
+
|
|
169
|
+
const properties = s.properties as Record<string, unknown> | undefined;
|
|
170
|
+
const required = (s.required as string[] | undefined) ?? [];
|
|
171
|
+
|
|
172
|
+
if (properties) {
|
|
173
|
+
const allKeys = Object.keys(properties);
|
|
174
|
+
const requiredSet = new Set(required);
|
|
175
|
+
|
|
176
|
+
// Make every property required; wrap non-required ones as nullable
|
|
177
|
+
const normalized: Record<string, unknown> = {};
|
|
178
|
+
for (const key of allKeys) {
|
|
179
|
+
let prop = normalizeForStrictMode(properties[key]);
|
|
180
|
+
if (!requiredSet.has(key)) {
|
|
181
|
+
// Wrap as nullable via anyOf
|
|
182
|
+
prop = { anyOf: [prop, { type: "null" }] };
|
|
183
|
+
}
|
|
184
|
+
normalized[key] = prop;
|
|
185
|
+
}
|
|
186
|
+
s.properties = normalized;
|
|
187
|
+
s.required = allKeys;
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Recurse into nested schemas
|
|
192
|
+
if (s.items) {
|
|
193
|
+
s.items = Array.isArray(s.items)
|
|
194
|
+
? s.items.map((i) => normalizeForStrictMode(i))
|
|
195
|
+
: normalizeForStrictMode(s.items);
|
|
196
|
+
}
|
|
197
|
+
for (const keyword of ["anyOf", "oneOf", "allOf"] as const) {
|
|
198
|
+
if (Array.isArray(s[keyword])) {
|
|
199
|
+
s[keyword] = (s[keyword] as unknown[]).map((i) =>
|
|
200
|
+
normalizeForStrictMode(i),
|
|
201
|
+
);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
return s;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
/**
|
|
209
|
+
* Convert tool definitions to OpenAI format
|
|
210
|
+
*/
|
|
211
|
+
export function convertToolsToOpenAI(
|
|
212
|
+
tools: Array<{ name: string; description: string; inputSchema: unknown }>,
|
|
213
|
+
options?: { strict?: boolean },
|
|
214
|
+
): OpenAI.Chat.ChatCompletionTool[] {
|
|
215
|
+
const strict = options?.strict ?? true;
|
|
216
|
+
return tools.map((tool) => ({
|
|
217
|
+
type: "function" as const,
|
|
218
|
+
function: {
|
|
219
|
+
name: tool.name,
|
|
220
|
+
description: tool.description,
|
|
221
|
+
parameters: normalizeForStrictMode(
|
|
222
|
+
tool.inputSchema,
|
|
223
|
+
) as OpenAI.FunctionParameters,
|
|
224
|
+
strict,
|
|
225
|
+
},
|
|
226
|
+
}));
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Build tool params for OpenAI request
|
|
231
|
+
*/
|
|
232
|
+
export function getOpenAIToolParams(
|
|
233
|
+
tools?: Array<{ name: string; description: string; inputSchema: unknown }>,
|
|
234
|
+
options?: { strict?: boolean },
|
|
235
|
+
): {
|
|
236
|
+
tools?: OpenAI.Chat.ChatCompletionTool[];
|
|
237
|
+
tool_choice?: OpenAI.Chat.ChatCompletionToolChoiceOption;
|
|
238
|
+
} {
|
|
239
|
+
if (!tools || tools.length === 0) {
|
|
240
|
+
return {};
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
return {
|
|
244
|
+
tools: convertToolsToOpenAI(tools, options),
|
|
245
|
+
tool_choice: "auto",
|
|
246
|
+
};
|
|
247
|
+
}
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* R1 Message Format Converter
|
|
3
|
+
*
|
|
4
|
+
* Handles the special message format required by DeepSeek Reasoner and other R1-based models.
|
|
5
|
+
* Key requirements:
|
|
6
|
+
* 1. Consecutive messages with the same role must be merged
|
|
7
|
+
* 2. reasoning_content should be passed back during tool calling in the same turn
|
|
8
|
+
* 3. No temperature parameter for reasoner models
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { formatFileContentBlock } from "@clinebot/shared";
|
|
12
|
+
import type OpenAI from "openai";
|
|
13
|
+
import type {
|
|
14
|
+
ContentBlock,
|
|
15
|
+
FileContent,
|
|
16
|
+
ImageContent,
|
|
17
|
+
Message,
|
|
18
|
+
TextContent,
|
|
19
|
+
ThinkingContent,
|
|
20
|
+
ToolResultContent,
|
|
21
|
+
ToolUseContent,
|
|
22
|
+
} from "../types/messages";
|
|
23
|
+
import {
|
|
24
|
+
normalizeToolUseInput,
|
|
25
|
+
serializeToolResultContent,
|
|
26
|
+
} from "./content-format";
|
|
27
|
+
|
|
28
|
+
type OpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam;
|
|
29
|
+
type OpenAIContentPart = OpenAI.Chat.ChatCompletionContentPart;
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* DeepSeek Reasoner message format with reasoning_content support
|
|
33
|
+
*/
|
|
34
|
+
export type R1Message = OpenAI.Chat.ChatCompletionMessageParam & {
|
|
35
|
+
reasoning_content?: string;
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Convert messages to R1 format
|
|
40
|
+
*
|
|
41
|
+
* This handles:
|
|
42
|
+
* 1. Converting content blocks to OpenAI format
|
|
43
|
+
* 2. Merging consecutive messages with the same role
|
|
44
|
+
* 3. Adding reasoning_content for tool calling continuations
|
|
45
|
+
*/
|
|
46
|
+
export function convertToR1Messages(messages: Message[]): R1Message[] {
|
|
47
|
+
// First convert to OpenAI format
|
|
48
|
+
const openAiMessages = messages.flatMap(convertMessageToOpenAI);
|
|
49
|
+
|
|
50
|
+
// Then merge consecutive same-role messages
|
|
51
|
+
const merged = mergeConsecutiveMessages(openAiMessages);
|
|
52
|
+
|
|
53
|
+
// Finally add reasoning_content for current turn assistant messages
|
|
54
|
+
return addReasoningContent(merged, messages);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Convert a single message to OpenAI format (without merging)
|
|
59
|
+
*/
|
|
60
|
+
function convertMessageToOpenAI(message: Message): OpenAIMessage[] {
|
|
61
|
+
const { role, content } = message;
|
|
62
|
+
|
|
63
|
+
// Simple string content
|
|
64
|
+
if (typeof content === "string") {
|
|
65
|
+
return [{ role, content } as OpenAIMessage];
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Array content - need to process blocks
|
|
69
|
+
if (role === "assistant") {
|
|
70
|
+
return [convertAssistantMessage(content)];
|
|
71
|
+
} else {
|
|
72
|
+
return convertUserMessage(content);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
function convertAssistantMessage(content: ContentBlock[]): OpenAIMessage {
|
|
77
|
+
const textParts: string[] = [];
|
|
78
|
+
const toolCalls: OpenAI.Chat.ChatCompletionMessageToolCall[] = [];
|
|
79
|
+
|
|
80
|
+
for (const block of content) {
|
|
81
|
+
switch (block.type) {
|
|
82
|
+
case "text":
|
|
83
|
+
textParts.push((block as TextContent).text);
|
|
84
|
+
break;
|
|
85
|
+
case "file": {
|
|
86
|
+
const fileBlock = block as FileContent;
|
|
87
|
+
textParts.push(
|
|
88
|
+
formatFileContentBlock(fileBlock.path, fileBlock.content),
|
|
89
|
+
);
|
|
90
|
+
break;
|
|
91
|
+
}
|
|
92
|
+
case "tool_use": {
|
|
93
|
+
const toolUse = block as ToolUseContent;
|
|
94
|
+
toolCalls.push({
|
|
95
|
+
id: toolUse.id,
|
|
96
|
+
type: "function",
|
|
97
|
+
function: {
|
|
98
|
+
name: toolUse.name,
|
|
99
|
+
arguments: JSON.stringify(normalizeToolUseInput(toolUse.input)),
|
|
100
|
+
},
|
|
101
|
+
});
|
|
102
|
+
break;
|
|
103
|
+
}
|
|
104
|
+
case "thinking":
|
|
105
|
+
// Thinking blocks are handled separately via reasoning_content
|
|
106
|
+
break;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
const message: OpenAI.Chat.ChatCompletionAssistantMessageParam = {
|
|
111
|
+
role: "assistant",
|
|
112
|
+
content: textParts.length > 0 ? textParts.join("\n") : null,
|
|
113
|
+
};
|
|
114
|
+
|
|
115
|
+
if (toolCalls.length > 0) {
|
|
116
|
+
message.tool_calls = toolCalls;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
return message;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
function convertUserMessage(content: ContentBlock[]): OpenAIMessage[] {
|
|
123
|
+
const messages: OpenAIMessage[] = [];
|
|
124
|
+
|
|
125
|
+
// Convert all tool results to separate tool messages
|
|
126
|
+
const toolResults = content.filter(
|
|
127
|
+
(b) => b.type === "tool_result",
|
|
128
|
+
) as ToolResultContent[];
|
|
129
|
+
for (const result of toolResults) {
|
|
130
|
+
messages.push({
|
|
131
|
+
role: "tool",
|
|
132
|
+
tool_call_id: result.tool_use_id,
|
|
133
|
+
content: serializeToolResultContent(result.content),
|
|
134
|
+
});
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Regular user message with text/images
|
|
138
|
+
const userContent = content.filter((b) => b.type !== "tool_result");
|
|
139
|
+
const parts: OpenAIContentPart[] = [];
|
|
140
|
+
|
|
141
|
+
for (const block of userContent) {
|
|
142
|
+
switch (block.type) {
|
|
143
|
+
case "text":
|
|
144
|
+
parts.push({ type: "text", text: (block as TextContent).text });
|
|
145
|
+
break;
|
|
146
|
+
case "image": {
|
|
147
|
+
const img = block as ImageContent;
|
|
148
|
+
parts.push({
|
|
149
|
+
type: "image_url",
|
|
150
|
+
image_url: {
|
|
151
|
+
url: `data:${img.mediaType};base64,${img.data}`,
|
|
152
|
+
},
|
|
153
|
+
});
|
|
154
|
+
break;
|
|
155
|
+
}
|
|
156
|
+
case "file": {
|
|
157
|
+
const fileBlock = block as FileContent;
|
|
158
|
+
parts.push({
|
|
159
|
+
type: "text",
|
|
160
|
+
text: formatFileContentBlock(fileBlock.path, fileBlock.content),
|
|
161
|
+
});
|
|
162
|
+
break;
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
}
|
|
166
|
+
if (parts.length === 0) {
|
|
167
|
+
return messages;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
messages.push({
|
|
171
|
+
role: "user",
|
|
172
|
+
content:
|
|
173
|
+
parts.length === 1 && parts[0].type === "text" ? parts[0].text : parts,
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
return messages;
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
/**
|
|
180
|
+
* Merge consecutive messages with the same role
|
|
181
|
+
*
|
|
182
|
+
* DeepSeek Reasoner does not support successive messages with the same role,
|
|
183
|
+
* so we need to merge them together.
|
|
184
|
+
*/
|
|
185
|
+
function mergeConsecutiveMessages(messages: OpenAIMessage[]): OpenAIMessage[] {
|
|
186
|
+
return messages.reduce<OpenAIMessage[]>((merged, message) => {
|
|
187
|
+
const lastMessage = merged[merged.length - 1];
|
|
188
|
+
|
|
189
|
+
// Never merge tool messages: each tool response has its own tool_call_id.
|
|
190
|
+
if (lastMessage?.role === message.role && message.role !== "tool") {
|
|
191
|
+
mergeMessageContent(lastMessage, message);
|
|
192
|
+
} else {
|
|
193
|
+
merged.push({ ...message });
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
return merged;
|
|
197
|
+
}, []);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Merge content from source message into target message
|
|
202
|
+
*/
|
|
203
|
+
function mergeMessageContent(
|
|
204
|
+
target: OpenAIMessage,
|
|
205
|
+
source: OpenAIMessage,
|
|
206
|
+
): void {
|
|
207
|
+
const targetContent = (target as any).content;
|
|
208
|
+
const sourceContent = (source as any).content;
|
|
209
|
+
|
|
210
|
+
if (typeof targetContent === "string" && typeof sourceContent === "string") {
|
|
211
|
+
(target as any).content = `${targetContent}\n${sourceContent}`;
|
|
212
|
+
} else {
|
|
213
|
+
// Convert to array format and merge
|
|
214
|
+
const targetArray = normalizeToArray(targetContent);
|
|
215
|
+
const sourceArray = normalizeToArray(sourceContent);
|
|
216
|
+
(target as any).content = [...targetArray, ...sourceArray];
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
function normalizeToArray(
|
|
221
|
+
content: string | null | OpenAIContentPart[],
|
|
222
|
+
): (
|
|
223
|
+
| OpenAI.Chat.ChatCompletionContentPartText
|
|
224
|
+
| OpenAI.Chat.ChatCompletionContentPartImage
|
|
225
|
+
)[] {
|
|
226
|
+
if (content === null || content === undefined) {
|
|
227
|
+
return [];
|
|
228
|
+
}
|
|
229
|
+
if (Array.isArray(content)) {
|
|
230
|
+
return content as (
|
|
231
|
+
| OpenAI.Chat.ChatCompletionContentPartText
|
|
232
|
+
| OpenAI.Chat.ChatCompletionContentPartImage
|
|
233
|
+
)[];
|
|
234
|
+
}
|
|
235
|
+
return [{ type: "text" as const, text: content }];
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* Add reasoning_content to assistant messages for DeepSeek Reasoner
|
|
240
|
+
*
|
|
241
|
+
* Per DeepSeek API: reasoning_content should be passed back during tool calling
|
|
242
|
+
* in the same turn, and omitted when starting a new turn.
|
|
243
|
+
*/
|
|
244
|
+
function addReasoningContent(
|
|
245
|
+
openAiMessages: OpenAIMessage[],
|
|
246
|
+
originalMessages: Message[],
|
|
247
|
+
): R1Message[] {
|
|
248
|
+
// Find last user message index (start of current turn)
|
|
249
|
+
let lastUserIndex = -1;
|
|
250
|
+
for (let i = openAiMessages.length - 1; i >= 0; i--) {
|
|
251
|
+
if (openAiMessages[i].role === "user") {
|
|
252
|
+
lastUserIndex = i;
|
|
253
|
+
break;
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
// Extract thinking content from original messages, keyed by assistant message index
|
|
258
|
+
const thinkingByIndex = new Map<number, string>();
|
|
259
|
+
let assistantIdx = 0;
|
|
260
|
+
|
|
261
|
+
for (const msg of originalMessages) {
|
|
262
|
+
if (msg.role === "assistant") {
|
|
263
|
+
if (Array.isArray(msg.content)) {
|
|
264
|
+
const thinking = msg.content
|
|
265
|
+
.filter((p): p is ThinkingContent => p.type === "thinking")
|
|
266
|
+
.map((p) => p.thinking)
|
|
267
|
+
.join("\n");
|
|
268
|
+
if (thinking) {
|
|
269
|
+
thinkingByIndex.set(assistantIdx, thinking);
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
assistantIdx++;
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
// Add reasoning_content only to assistant messages in current turn
|
|
277
|
+
let aiIdx = 0;
|
|
278
|
+
return openAiMessages.map((msg, i): R1Message => {
|
|
279
|
+
if (msg.role === "assistant") {
|
|
280
|
+
const thinking = thinkingByIndex.get(aiIdx++);
|
|
281
|
+
if (thinking && i >= lastUserIndex) {
|
|
282
|
+
return { ...msg, reasoning_content: thinking };
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
return msg;
|
|
286
|
+
});
|
|
287
|
+
}
|