snow-ai 0.2.10 → 0.2.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/anthropic.d.ts +36 -0
- package/dist/api/anthropic.js +372 -0
- package/dist/api/chat.d.ts +3 -0
- package/dist/api/chat.js +39 -11
- package/dist/api/gemini.d.ts +35 -0
- package/dist/api/gemini.js +310 -0
- package/dist/api/responses.d.ts +3 -0
- package/dist/api/responses.js +50 -42
- package/dist/app.js +4 -1
- package/dist/hooks/useConversation.js +27 -9
- package/dist/ui/components/ChatInput.d.ts +3 -0
- package/dist/ui/components/ChatInput.js +104 -18
- package/dist/ui/pages/ApiConfigScreen.js +41 -6
- package/dist/ui/pages/ChatScreen.js +4 -1
- package/dist/ui/pages/ModelConfigScreen.js +87 -32
- package/dist/ui/pages/SystemPromptConfigScreen.d.ts +6 -0
- package/dist/ui/pages/SystemPromptConfigScreen.js +83 -0
- package/dist/ui/pages/WelcomeScreen.js +5 -0
- package/dist/utils/apiConfig.d.ts +11 -2
- package/dist/utils/apiConfig.js +60 -14
- package/dist/utils/textBuffer.d.ts +9 -2
- package/dist/utils/textBuffer.js +114 -22
- package/package.json +3 -1
- package/readme.md +26 -6
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import type { ChatMessage } from './chat.js';
|
|
2
|
+
import type { ChatCompletionTool } from 'openai/resources/chat/completions';
|
|
3
|
+
export interface AnthropicOptions {
|
|
4
|
+
model: string;
|
|
5
|
+
messages: ChatMessage[];
|
|
6
|
+
temperature?: number;
|
|
7
|
+
max_tokens?: number;
|
|
8
|
+
tools?: ChatCompletionTool[];
|
|
9
|
+
sessionId?: string;
|
|
10
|
+
}
|
|
11
|
+
export interface UsageInfo {
|
|
12
|
+
prompt_tokens: number;
|
|
13
|
+
completion_tokens: number;
|
|
14
|
+
total_tokens: number;
|
|
15
|
+
cache_creation_input_tokens?: number;
|
|
16
|
+
cache_read_input_tokens?: number;
|
|
17
|
+
}
|
|
18
|
+
export interface AnthropicStreamChunk {
|
|
19
|
+
type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage';
|
|
20
|
+
content?: string;
|
|
21
|
+
tool_calls?: Array<{
|
|
22
|
+
id: string;
|
|
23
|
+
type: 'function';
|
|
24
|
+
function: {
|
|
25
|
+
name: string;
|
|
26
|
+
arguments: string;
|
|
27
|
+
};
|
|
28
|
+
}>;
|
|
29
|
+
delta?: string;
|
|
30
|
+
usage?: UsageInfo;
|
|
31
|
+
}
|
|
32
|
+
export declare function resetAnthropicClient(): void;
|
|
33
|
+
/**
|
|
34
|
+
* Create streaming chat completion using Anthropic API
|
|
35
|
+
*/
|
|
36
|
+
export declare function createStreamingAnthropicCompletion(options: AnthropicOptions, abortSignal?: AbortSignal): AsyncGenerator<AnthropicStreamChunk, void, unknown>;
|
|
@@ -0,0 +1,372 @@
|
|
|
1
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import { createHash, randomUUID } from 'crypto';
|
|
3
|
+
import { getOpenAiConfig, getCustomSystemPrompt } from '../utils/apiConfig.js';
|
|
4
|
+
import { SYSTEM_PROMPT } from './systemPrompt.js';
|
|
5
|
+
let anthropicClient = null;
|
|
6
|
+
function getAnthropicClient() {
|
|
7
|
+
if (!anthropicClient) {
|
|
8
|
+
const config = getOpenAiConfig();
|
|
9
|
+
if (!config.apiKey) {
|
|
10
|
+
throw new Error('Anthropic API configuration is incomplete. Please configure API key first.');
|
|
11
|
+
}
|
|
12
|
+
const clientConfig = {
|
|
13
|
+
apiKey: config.apiKey,
|
|
14
|
+
};
|
|
15
|
+
// Support custom baseUrl for proxy servers
|
|
16
|
+
if (config.baseUrl && config.baseUrl !== 'https://api.openai.com/v1') {
|
|
17
|
+
clientConfig.baseURL = config.baseUrl;
|
|
18
|
+
}
|
|
19
|
+
// If Anthropic Beta is enabled, add default query parameter
|
|
20
|
+
if (config.anthropicBeta) {
|
|
21
|
+
clientConfig.defaultQuery = { beta: 'true' };
|
|
22
|
+
}
|
|
23
|
+
// Add Authorization header for enhanced compatibility
|
|
24
|
+
clientConfig.defaultHeaders = {
|
|
25
|
+
'Authorization': `Bearer ${config.apiKey}`,
|
|
26
|
+
};
|
|
27
|
+
anthropicClient = new Anthropic(clientConfig);
|
|
28
|
+
}
|
|
29
|
+
return anthropicClient;
|
|
30
|
+
}
|
|
31
|
+
export function resetAnthropicClient() {
|
|
32
|
+
anthropicClient = null;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Generate a user_id in the format: user_<hash>_account__session_<uuid>
|
|
36
|
+
* This matches Anthropic's expected format for tracking and caching
|
|
37
|
+
* The hash is based on sessionId only to keep it consistent within the same session
|
|
38
|
+
*/
|
|
39
|
+
function generateUserId(sessionId) {
|
|
40
|
+
// Generate a 64-character hash (consistent for the same session)
|
|
41
|
+
const hash = createHash('sha256')
|
|
42
|
+
.update(`anthropic_user_${sessionId}`)
|
|
43
|
+
.digest('hex');
|
|
44
|
+
return `user_${hash}_account__session_${sessionId}`;
|
|
45
|
+
}
|
|
46
|
+
/**
|
|
47
|
+
* Convert OpenAI-style tools to Anthropic tool format
|
|
48
|
+
* Adds cache_control to the last tool for prompt caching
|
|
49
|
+
*/
|
|
50
|
+
function convertToolsToAnthropic(tools) {
|
|
51
|
+
if (!tools || tools.length === 0) {
|
|
52
|
+
return undefined;
|
|
53
|
+
}
|
|
54
|
+
const convertedTools = tools
|
|
55
|
+
.filter(tool => tool.type === 'function' && 'function' in tool)
|
|
56
|
+
.map(tool => {
|
|
57
|
+
if (tool.type === 'function' && 'function' in tool) {
|
|
58
|
+
return {
|
|
59
|
+
name: tool.function.name,
|
|
60
|
+
description: tool.function.description || '',
|
|
61
|
+
input_schema: tool.function.parameters
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
throw new Error('Invalid tool format');
|
|
65
|
+
});
|
|
66
|
+
// Add cache_control to the last tool for prompt caching
|
|
67
|
+
if (convertedTools.length > 0) {
|
|
68
|
+
const lastTool = convertedTools[convertedTools.length - 1];
|
|
69
|
+
lastTool.cache_control = { type: 'ephemeral' };
|
|
70
|
+
}
|
|
71
|
+
return convertedTools;
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Convert our ChatMessage format to Anthropic's message format
|
|
75
|
+
* Adds cache_control to system prompt and last user message for prompt caching
|
|
76
|
+
* Logic:
|
|
77
|
+
* 1. If custom system prompt exists: use custom as system, prepend default as first user message
|
|
78
|
+
* 2. If no custom system prompt: use default as system
|
|
79
|
+
*/
|
|
80
|
+
function convertToAnthropicMessages(messages) {
|
|
81
|
+
const customSystemPrompt = getCustomSystemPrompt();
|
|
82
|
+
let systemContent;
|
|
83
|
+
const anthropicMessages = [];
|
|
84
|
+
for (const msg of messages) {
|
|
85
|
+
// Extract system message
|
|
86
|
+
if (msg.role === 'system') {
|
|
87
|
+
systemContent = msg.content;
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
// Handle tool result messages
|
|
91
|
+
if (msg.role === 'tool' && msg.tool_call_id) {
|
|
92
|
+
// Anthropic expects tool results as user messages with tool_result content
|
|
93
|
+
anthropicMessages.push({
|
|
94
|
+
role: 'user',
|
|
95
|
+
content: [{
|
|
96
|
+
type: 'tool_result',
|
|
97
|
+
tool_use_id: msg.tool_call_id,
|
|
98
|
+
content: msg.content
|
|
99
|
+
}]
|
|
100
|
+
});
|
|
101
|
+
continue;
|
|
102
|
+
}
|
|
103
|
+
// Handle user messages with images
|
|
104
|
+
if (msg.role === 'user' && msg.images && msg.images.length > 0) {
|
|
105
|
+
const content = [];
|
|
106
|
+
// Add text content
|
|
107
|
+
if (msg.content) {
|
|
108
|
+
content.push({
|
|
109
|
+
type: 'text',
|
|
110
|
+
text: msg.content
|
|
111
|
+
});
|
|
112
|
+
}
|
|
113
|
+
// Add images
|
|
114
|
+
for (const image of msg.images) {
|
|
115
|
+
// Extract base64 data and mime type
|
|
116
|
+
const base64Match = image.data.match(/^data:([^;]+);base64,(.+)$/);
|
|
117
|
+
if (base64Match) {
|
|
118
|
+
content.push({
|
|
119
|
+
type: 'image',
|
|
120
|
+
source: {
|
|
121
|
+
type: 'base64',
|
|
122
|
+
media_type: base64Match[1] || image.mimeType,
|
|
123
|
+
data: base64Match[2] || ''
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
anthropicMessages.push({
|
|
129
|
+
role: 'user',
|
|
130
|
+
content
|
|
131
|
+
});
|
|
132
|
+
continue;
|
|
133
|
+
}
|
|
134
|
+
// Handle assistant messages with tool calls
|
|
135
|
+
if (msg.role === 'assistant' && msg.tool_calls && msg.tool_calls.length > 0) {
|
|
136
|
+
const content = [];
|
|
137
|
+
// Add text content if present
|
|
138
|
+
if (msg.content) {
|
|
139
|
+
content.push({
|
|
140
|
+
type: 'text',
|
|
141
|
+
text: msg.content
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
// Add tool uses
|
|
145
|
+
for (const toolCall of msg.tool_calls) {
|
|
146
|
+
content.push({
|
|
147
|
+
type: 'tool_use',
|
|
148
|
+
id: toolCall.id,
|
|
149
|
+
name: toolCall.function.name,
|
|
150
|
+
input: JSON.parse(toolCall.function.arguments)
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
anthropicMessages.push({
|
|
154
|
+
role: 'assistant',
|
|
155
|
+
content
|
|
156
|
+
});
|
|
157
|
+
continue;
|
|
158
|
+
}
|
|
159
|
+
// Handle regular text messages
|
|
160
|
+
if (msg.role === 'user' || msg.role === 'assistant') {
|
|
161
|
+
anthropicMessages.push({
|
|
162
|
+
role: msg.role,
|
|
163
|
+
content: msg.content
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
// 如果配置了自定义系统提示词
|
|
168
|
+
if (customSystemPrompt) {
|
|
169
|
+
// 自定义系统提示词作为 system,默认系统提示词作为第一条用户消息
|
|
170
|
+
systemContent = customSystemPrompt;
|
|
171
|
+
anthropicMessages.unshift({
|
|
172
|
+
role: 'user',
|
|
173
|
+
content: SYSTEM_PROMPT
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
else if (!systemContent) {
|
|
177
|
+
// 没有自定义系统提示词,默认系统提示词作为 system
|
|
178
|
+
systemContent = SYSTEM_PROMPT;
|
|
179
|
+
}
|
|
180
|
+
// Add cache_control to last user message for prompt caching
|
|
181
|
+
if (anthropicMessages.length > 0) {
|
|
182
|
+
const lastMessageIndex = anthropicMessages.length - 1;
|
|
183
|
+
const lastMessage = anthropicMessages[lastMessageIndex];
|
|
184
|
+
if (lastMessage && lastMessage.role === 'user') {
|
|
185
|
+
// Convert content to array format if it's a string
|
|
186
|
+
if (typeof lastMessage.content === 'string') {
|
|
187
|
+
lastMessage.content = [{
|
|
188
|
+
type: 'text',
|
|
189
|
+
text: lastMessage.content,
|
|
190
|
+
cache_control: { type: 'ephemeral' }
|
|
191
|
+
}];
|
|
192
|
+
}
|
|
193
|
+
else if (Array.isArray(lastMessage.content)) {
|
|
194
|
+
// Add cache_control to last content block
|
|
195
|
+
const lastContentIndex = lastMessage.content.length - 1;
|
|
196
|
+
if (lastContentIndex >= 0) {
|
|
197
|
+
const lastContent = lastMessage.content[lastContentIndex];
|
|
198
|
+
lastContent.cache_control = { type: 'ephemeral' };
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
// Format system prompt with cache_control (only if we have a system prompt)
|
|
204
|
+
const system = systemContent ? [{
|
|
205
|
+
type: 'text',
|
|
206
|
+
text: systemContent,
|
|
207
|
+
cache_control: { type: 'ephemeral' }
|
|
208
|
+
}] : undefined;
|
|
209
|
+
return { system, messages: anthropicMessages };
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Create streaming chat completion using Anthropic API
|
|
213
|
+
*/
|
|
214
|
+
export async function* createStreamingAnthropicCompletion(options, abortSignal) {
|
|
215
|
+
const client = getAnthropicClient();
|
|
216
|
+
try {
|
|
217
|
+
const { system, messages } = convertToAnthropicMessages(options.messages);
|
|
218
|
+
// Generate user_id with session tracking if sessionId is provided
|
|
219
|
+
const sessionId = options.sessionId || randomUUID();
|
|
220
|
+
const userId = generateUserId(sessionId);
|
|
221
|
+
// Prepare request body for logging
|
|
222
|
+
const requestBody = {
|
|
223
|
+
model: options.model,
|
|
224
|
+
max_tokens: options.max_tokens || 4096,
|
|
225
|
+
temperature: options.temperature ?? 0.7,
|
|
226
|
+
system,
|
|
227
|
+
messages,
|
|
228
|
+
tools: convertToolsToAnthropic(options.tools),
|
|
229
|
+
metadata: {
|
|
230
|
+
user_id: userId
|
|
231
|
+
},
|
|
232
|
+
stream: true
|
|
233
|
+
};
|
|
234
|
+
// Create streaming request
|
|
235
|
+
const stream = await client.messages.create(requestBody);
|
|
236
|
+
let contentBuffer = '';
|
|
237
|
+
let toolCallsBuffer = new Map();
|
|
238
|
+
let hasToolCalls = false;
|
|
239
|
+
let usageData;
|
|
240
|
+
let currentToolUseId = null; // Track current tool use block ID
|
|
241
|
+
for await (const event of stream) {
|
|
242
|
+
if (abortSignal?.aborted) {
|
|
243
|
+
return;
|
|
244
|
+
}
|
|
245
|
+
// Handle different event types
|
|
246
|
+
if (event.type === 'content_block_start') {
|
|
247
|
+
const block = event.content_block;
|
|
248
|
+
// Handle tool use blocks
|
|
249
|
+
if (block.type === 'tool_use') {
|
|
250
|
+
hasToolCalls = true;
|
|
251
|
+
currentToolUseId = block.id; // Store current tool use ID
|
|
252
|
+
toolCallsBuffer.set(block.id, {
|
|
253
|
+
id: block.id,
|
|
254
|
+
type: 'function',
|
|
255
|
+
function: {
|
|
256
|
+
name: block.name,
|
|
257
|
+
arguments: ''
|
|
258
|
+
}
|
|
259
|
+
});
|
|
260
|
+
// Yield delta for token counting
|
|
261
|
+
yield {
|
|
262
|
+
type: 'tool_call_delta',
|
|
263
|
+
delta: block.name
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
else if (event.type === 'content_block_delta') {
|
|
268
|
+
const delta = event.delta;
|
|
269
|
+
// Handle text content
|
|
270
|
+
if (delta.type === 'text_delta') {
|
|
271
|
+
const text = delta.text;
|
|
272
|
+
contentBuffer += text;
|
|
273
|
+
yield {
|
|
274
|
+
type: 'content',
|
|
275
|
+
content: text
|
|
276
|
+
};
|
|
277
|
+
}
|
|
278
|
+
// Handle tool input deltas
|
|
279
|
+
if (delta.type === 'input_json_delta') {
|
|
280
|
+
const jsonDelta = delta.partial_json;
|
|
281
|
+
// Use currentToolUseId instead of event.index
|
|
282
|
+
if (currentToolUseId) {
|
|
283
|
+
const toolCall = toolCallsBuffer.get(currentToolUseId);
|
|
284
|
+
if (toolCall) {
|
|
285
|
+
toolCall.function.arguments += jsonDelta;
|
|
286
|
+
// Yield delta for token counting
|
|
287
|
+
yield {
|
|
288
|
+
type: 'tool_call_delta',
|
|
289
|
+
delta: jsonDelta
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
else if (event.type === 'content_block_stop') {
|
|
296
|
+
// Reset current tool use ID when block ends
|
|
297
|
+
currentToolUseId = null;
|
|
298
|
+
}
|
|
299
|
+
else if (event.type === 'message_start') {
|
|
300
|
+
// Capture initial usage data (including cache metrics)
|
|
301
|
+
if (event.message.usage) {
|
|
302
|
+
usageData = {
|
|
303
|
+
prompt_tokens: event.message.usage.input_tokens || 0,
|
|
304
|
+
completion_tokens: event.message.usage.output_tokens || 0,
|
|
305
|
+
total_tokens: (event.message.usage.input_tokens || 0) + (event.message.usage.output_tokens || 0),
|
|
306
|
+
cache_creation_input_tokens: event.message.usage.cache_creation_input_tokens,
|
|
307
|
+
cache_read_input_tokens: event.message.usage.cache_read_input_tokens
|
|
308
|
+
};
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
else if (event.type === 'message_delta') {
|
|
312
|
+
// Update usage data with final token counts (including cache metrics)
|
|
313
|
+
if (event.usage) {
|
|
314
|
+
if (!usageData) {
|
|
315
|
+
usageData = {
|
|
316
|
+
prompt_tokens: 0,
|
|
317
|
+
completion_tokens: 0,
|
|
318
|
+
total_tokens: 0
|
|
319
|
+
};
|
|
320
|
+
}
|
|
321
|
+
usageData.completion_tokens = event.usage.output_tokens || 0;
|
|
322
|
+
usageData.total_tokens = usageData.prompt_tokens + usageData.completion_tokens;
|
|
323
|
+
// Update cache metrics if present
|
|
324
|
+
if (event.usage.cache_creation_input_tokens !== undefined) {
|
|
325
|
+
usageData.cache_creation_input_tokens = event.usage.cache_creation_input_tokens;
|
|
326
|
+
}
|
|
327
|
+
if (event.usage.cache_read_input_tokens !== undefined) {
|
|
328
|
+
usageData.cache_read_input_tokens = event.usage.cache_read_input_tokens;
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
// Yield tool calls if any (only after stream completes)
|
|
334
|
+
if (hasToolCalls && toolCallsBuffer.size > 0) {
|
|
335
|
+
// Validate that all tool call arguments are complete valid JSON
|
|
336
|
+
const toolCalls = Array.from(toolCallsBuffer.values());
|
|
337
|
+
for (const toolCall of toolCalls) {
|
|
338
|
+
try {
|
|
339
|
+
// Validate JSON completeness
|
|
340
|
+
JSON.parse(toolCall.function.arguments);
|
|
341
|
+
}
|
|
342
|
+
catch (e) {
|
|
343
|
+
throw new Error(`Incomplete tool call JSON for ${toolCall.function.name}: ${toolCall.function.arguments}`);
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
yield {
|
|
347
|
+
type: 'tool_calls',
|
|
348
|
+
tool_calls: toolCalls
|
|
349
|
+
};
|
|
350
|
+
}
|
|
351
|
+
// Yield usage information if available
|
|
352
|
+
if (usageData) {
|
|
353
|
+
yield {
|
|
354
|
+
type: 'usage',
|
|
355
|
+
usage: usageData
|
|
356
|
+
};
|
|
357
|
+
}
|
|
358
|
+
// Signal completion
|
|
359
|
+
yield {
|
|
360
|
+
type: 'done'
|
|
361
|
+
};
|
|
362
|
+
}
|
|
363
|
+
catch (error) {
|
|
364
|
+
if (abortSignal?.aborted) {
|
|
365
|
+
return;
|
|
366
|
+
}
|
|
367
|
+
if (error instanceof Error) {
|
|
368
|
+
throw new Error(`Anthropic streaming completion failed: ${error.message}`);
|
|
369
|
+
}
|
|
370
|
+
throw new Error('Anthropic streaming completion failed: Unknown error');
|
|
371
|
+
}
|
|
372
|
+
}
|
package/dist/api/chat.d.ts
CHANGED
|
@@ -69,6 +69,9 @@ export interface UsageInfo {
|
|
|
69
69
|
prompt_tokens: number;
|
|
70
70
|
completion_tokens: number;
|
|
71
71
|
total_tokens: number;
|
|
72
|
+
cache_creation_input_tokens?: number;
|
|
73
|
+
cache_read_input_tokens?: number;
|
|
74
|
+
cached_tokens?: number;
|
|
72
75
|
}
|
|
73
76
|
export interface StreamChunk {
|
|
74
77
|
type: 'content' | 'tool_calls' | 'tool_call_delta' | 'reasoning_delta' | 'done' | 'usage';
|
package/dist/api/chat.js
CHANGED
|
@@ -1,12 +1,16 @@
|
|
|
1
1
|
import OpenAI from 'openai';
|
|
2
|
-
import { getOpenAiConfig } from '../utils/apiConfig.js';
|
|
2
|
+
import { getOpenAiConfig, getCustomSystemPrompt } from '../utils/apiConfig.js';
|
|
3
3
|
import { executeMCPTool } from '../utils/mcpToolsManager.js';
|
|
4
4
|
import { SYSTEM_PROMPT } from './systemPrompt.js';
|
|
5
5
|
/**
|
|
6
6
|
* Convert our ChatMessage format to OpenAI's ChatCompletionMessageParam format
|
|
7
7
|
* Automatically prepends system prompt if not present
|
|
8
|
+
* Logic:
|
|
9
|
+
* 1. If custom system prompt exists: use custom as system, prepend default as first user message
|
|
10
|
+
* 2. If no custom system prompt: use default as system
|
|
8
11
|
*/
|
|
9
12
|
function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
|
|
13
|
+
const customSystemPrompt = getCustomSystemPrompt();
|
|
10
14
|
let result = messages.map(msg => {
|
|
11
15
|
// 如果消息包含图片,使用 content 数组格式
|
|
12
16
|
if (msg.role === 'user' && msg.images && msg.images.length > 0) {
|
|
@@ -51,15 +55,37 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
|
|
|
51
55
|
}
|
|
52
56
|
return baseMessage;
|
|
53
57
|
});
|
|
54
|
-
//
|
|
55
|
-
if (includeSystemPrompt
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
58
|
+
// 如果需要系统提示词
|
|
59
|
+
if (includeSystemPrompt) {
|
|
60
|
+
// 如果第一条消息已经是 system 消息,跳过
|
|
61
|
+
if (result.length > 0 && result[0]?.role === 'system') {
|
|
62
|
+
return result;
|
|
63
|
+
}
|
|
64
|
+
// 如果配置了自定义系统提示词
|
|
65
|
+
if (customSystemPrompt) {
|
|
66
|
+
// 自定义系统提示词作为 system 消息,默认系统提示词作为第一条 user 消息
|
|
67
|
+
result = [
|
|
68
|
+
{
|
|
69
|
+
role: 'system',
|
|
70
|
+
content: customSystemPrompt
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
role: 'user',
|
|
74
|
+
content: SYSTEM_PROMPT
|
|
75
|
+
},
|
|
76
|
+
...result
|
|
77
|
+
];
|
|
78
|
+
}
|
|
79
|
+
else {
|
|
80
|
+
// 没有自定义系统提示词,默认系统提示词作为 system 消息
|
|
81
|
+
result = [
|
|
82
|
+
{
|
|
83
|
+
role: 'system',
|
|
84
|
+
content: SYSTEM_PROMPT
|
|
85
|
+
},
|
|
86
|
+
...result
|
|
87
|
+
];
|
|
88
|
+
}
|
|
63
89
|
}
|
|
64
90
|
return result;
|
|
65
91
|
}
|
|
@@ -249,7 +275,9 @@ export async function* createStreamingChatCompletion(options, abortSignal) {
|
|
|
249
275
|
usageData = {
|
|
250
276
|
prompt_tokens: usageValue.prompt_tokens || 0,
|
|
251
277
|
completion_tokens: usageValue.completion_tokens || 0,
|
|
252
|
-
total_tokens: usageValue.total_tokens || 0
|
|
278
|
+
total_tokens: usageValue.total_tokens || 0,
|
|
279
|
+
// OpenAI Chat API: cached_tokens in prompt_tokens_details
|
|
280
|
+
cached_tokens: usageValue.prompt_tokens_details?.cached_tokens
|
|
253
281
|
};
|
|
254
282
|
}
|
|
255
283
|
// Skip content processing if no choices (but usage is already captured above)
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import type { ChatMessage } from './chat.js';
|
|
2
|
+
import type { ChatCompletionTool } from 'openai/resources/chat/completions';
|
|
3
|
+
export interface GeminiOptions {
|
|
4
|
+
model: string;
|
|
5
|
+
messages: ChatMessage[];
|
|
6
|
+
temperature?: number;
|
|
7
|
+
tools?: ChatCompletionTool[];
|
|
8
|
+
}
|
|
9
|
+
export interface UsageInfo {
|
|
10
|
+
prompt_tokens: number;
|
|
11
|
+
completion_tokens: number;
|
|
12
|
+
total_tokens: number;
|
|
13
|
+
cache_creation_input_tokens?: number;
|
|
14
|
+
cache_read_input_tokens?: number;
|
|
15
|
+
cached_tokens?: number;
|
|
16
|
+
}
|
|
17
|
+
export interface GeminiStreamChunk {
|
|
18
|
+
type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage';
|
|
19
|
+
content?: string;
|
|
20
|
+
tool_calls?: Array<{
|
|
21
|
+
id: string;
|
|
22
|
+
type: 'function';
|
|
23
|
+
function: {
|
|
24
|
+
name: string;
|
|
25
|
+
arguments: string;
|
|
26
|
+
};
|
|
27
|
+
}>;
|
|
28
|
+
delta?: string;
|
|
29
|
+
usage?: UsageInfo;
|
|
30
|
+
}
|
|
31
|
+
export declare function resetGeminiClient(): void;
|
|
32
|
+
/**
|
|
33
|
+
* Create streaming chat completion using Gemini API
|
|
34
|
+
*/
|
|
35
|
+
export declare function createStreamingGeminiCompletion(options: GeminiOptions, abortSignal?: AbortSignal): AsyncGenerator<GeminiStreamChunk, void, unknown>;
|