snow-ai 0.2.9 → 0.2.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,34 @@
1
+ import type { ChatMessage } from './chat.js';
2
+ import type { ChatCompletionTool } from 'openai/resources/chat/completions';
3
+ export interface AnthropicOptions {
4
+ model: string;
5
+ messages: ChatMessage[];
6
+ temperature?: number;
7
+ max_tokens?: number;
8
+ tools?: ChatCompletionTool[];
9
+ sessionId?: string;
10
+ }
11
+ export interface UsageInfo {
12
+ prompt_tokens: number;
13
+ completion_tokens: number;
14
+ total_tokens: number;
15
+ }
16
+ export interface AnthropicStreamChunk {
17
+ type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage';
18
+ content?: string;
19
+ tool_calls?: Array<{
20
+ id: string;
21
+ type: 'function';
22
+ function: {
23
+ name: string;
24
+ arguments: string;
25
+ };
26
+ }>;
27
+ delta?: string;
28
+ usage?: UsageInfo;
29
+ }
30
+ export declare function resetAnthropicClient(): void;
31
+ /**
32
+ * Create streaming chat completion using Anthropic API
33
+ */
34
+ export declare function createStreamingAnthropicCompletion(options: AnthropicOptions, abortSignal?: AbortSignal): AsyncGenerator<AnthropicStreamChunk, void, unknown>;
@@ -0,0 +1,337 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
2
+ import { createHash, randomUUID } from 'crypto';
3
+ import { getOpenAiConfig } from '../utils/apiConfig.js';
4
+ import { SYSTEM_PROMPT } from './systemPrompt.js';
5
+ let anthropicClient = null;
6
+ function getAnthropicClient() {
7
+ if (!anthropicClient) {
8
+ const config = getOpenAiConfig();
9
+ if (!config.apiKey) {
10
+ throw new Error('Anthropic API configuration is incomplete. Please configure API key first.');
11
+ }
12
+ const clientConfig = {
13
+ apiKey: config.apiKey,
14
+ };
15
+ // Support custom baseUrl for proxy servers
16
+ if (config.baseUrl && config.baseUrl !== 'https://api.openai.com/v1') {
17
+ clientConfig.baseURL = config.baseUrl;
18
+ }
19
+ // If Anthropic Beta is enabled, add default query parameter
20
+ if (config.anthropicBeta) {
21
+ clientConfig.defaultQuery = { beta: 'true' };
22
+ }
23
+ // Add Authorization header for enhanced compatibility
24
+ clientConfig.defaultHeaders = {
25
+ 'Authorization': `Bearer ${config.apiKey}`,
26
+ };
27
+ anthropicClient = new Anthropic(clientConfig);
28
+ }
29
+ return anthropicClient;
30
+ }
31
+ export function resetAnthropicClient() {
32
+ anthropicClient = null;
33
+ }
34
+ /**
35
+ * Generate a user_id in the format: user_<hash>_account__session_<uuid>
36
+ * This matches Anthropic's expected format for tracking and caching
37
+ * The hash is based on sessionId only to keep it consistent within the same session
38
+ */
39
+ function generateUserId(sessionId) {
40
+ // Generate a 64-character hash (consistent for the same session)
41
+ const hash = createHash('sha256')
42
+ .update(`anthropic_user_${sessionId}`)
43
+ .digest('hex');
44
+ return `user_${hash}_account__session_${sessionId}`;
45
+ }
46
+ /**
47
+ * Convert OpenAI-style tools to Anthropic tool format
48
+ */
49
+ function convertToolsToAnthropic(tools) {
50
+ if (!tools || tools.length === 0) {
51
+ return undefined;
52
+ }
53
+ return tools
54
+ .filter(tool => tool.type === 'function' && 'function' in tool)
55
+ .map(tool => {
56
+ if (tool.type === 'function' && 'function' in tool) {
57
+ return {
58
+ name: tool.function.name,
59
+ description: tool.function.description || '',
60
+ input_schema: tool.function.parameters
61
+ };
62
+ }
63
+ throw new Error('Invalid tool format');
64
+ });
65
+ }
66
+ /**
67
+ * Convert our ChatMessage format to Anthropic's message format
68
+ * Adds cache_control to system prompt and last user message for prompt caching
69
+ * Logic:
70
+ * 1. If custom system prompt exists: use custom as system, prepend default as first user message
71
+ * 2. If no custom system prompt: use default as system
72
+ */
73
+ function convertToAnthropicMessages(messages) {
74
+ const config = getOpenAiConfig();
75
+ const customSystemPrompt = config.systemPrompt;
76
+ let systemContent;
77
+ const anthropicMessages = [];
78
+ for (const msg of messages) {
79
+ // Extract system message
80
+ if (msg.role === 'system') {
81
+ systemContent = msg.content;
82
+ continue;
83
+ }
84
+ // Handle tool result messages
85
+ if (msg.role === 'tool' && msg.tool_call_id) {
86
+ // Anthropic expects tool results as user messages with tool_result content
87
+ anthropicMessages.push({
88
+ role: 'user',
89
+ content: [{
90
+ type: 'tool_result',
91
+ tool_use_id: msg.tool_call_id,
92
+ content: msg.content
93
+ }]
94
+ });
95
+ continue;
96
+ }
97
+ // Handle user messages with images
98
+ if (msg.role === 'user' && msg.images && msg.images.length > 0) {
99
+ const content = [];
100
+ // Add text content
101
+ if (msg.content) {
102
+ content.push({
103
+ type: 'text',
104
+ text: msg.content
105
+ });
106
+ }
107
+ // Add images
108
+ for (const image of msg.images) {
109
+ // Extract base64 data and mime type
110
+ const base64Match = image.data.match(/^data:([^;]+);base64,(.+)$/);
111
+ if (base64Match) {
112
+ content.push({
113
+ type: 'image',
114
+ source: {
115
+ type: 'base64',
116
+ media_type: base64Match[1] || image.mimeType,
117
+ data: base64Match[2] || ''
118
+ }
119
+ });
120
+ }
121
+ }
122
+ anthropicMessages.push({
123
+ role: 'user',
124
+ content
125
+ });
126
+ continue;
127
+ }
128
+ // Handle assistant messages with tool calls
129
+ if (msg.role === 'assistant' && msg.tool_calls && msg.tool_calls.length > 0) {
130
+ const content = [];
131
+ // Add text content if present
132
+ if (msg.content) {
133
+ content.push({
134
+ type: 'text',
135
+ text: msg.content
136
+ });
137
+ }
138
+ // Add tool uses
139
+ for (const toolCall of msg.tool_calls) {
140
+ content.push({
141
+ type: 'tool_use',
142
+ id: toolCall.id,
143
+ name: toolCall.function.name,
144
+ input: JSON.parse(toolCall.function.arguments)
145
+ });
146
+ }
147
+ anthropicMessages.push({
148
+ role: 'assistant',
149
+ content
150
+ });
151
+ continue;
152
+ }
153
+ // Handle regular text messages
154
+ if (msg.role === 'user' || msg.role === 'assistant') {
155
+ anthropicMessages.push({
156
+ role: msg.role,
157
+ content: msg.content
158
+ });
159
+ }
160
+ }
161
+ // 如果配置了自定义系统提示词
162
+ if (customSystemPrompt) {
163
+ // 自定义系统提示词作为 system,默认系统提示词作为第一条用户消息
164
+ systemContent = customSystemPrompt;
165
+ anthropicMessages.unshift({
166
+ role: 'user',
167
+ content: SYSTEM_PROMPT
168
+ });
169
+ }
170
+ else if (!systemContent) {
171
+ // 没有自定义系统提示词,默认系统提示词作为 system
172
+ systemContent = SYSTEM_PROMPT;
173
+ }
174
+ // Add cache_control to last user message for prompt caching
175
+ if (anthropicMessages.length > 0) {
176
+ const lastMessageIndex = anthropicMessages.length - 1;
177
+ const lastMessage = anthropicMessages[lastMessageIndex];
178
+ if (lastMessage && lastMessage.role === 'user') {
179
+ // Convert content to array format if it's a string
180
+ if (typeof lastMessage.content === 'string') {
181
+ lastMessage.content = [{
182
+ type: 'text',
183
+ text: lastMessage.content,
184
+ cache_control: { type: 'ephemeral' }
185
+ }];
186
+ }
187
+ else if (Array.isArray(lastMessage.content)) {
188
+ // Add cache_control to last content block
189
+ const lastContentIndex = lastMessage.content.length - 1;
190
+ if (lastContentIndex >= 0) {
191
+ const lastContent = lastMessage.content[lastContentIndex];
192
+ lastContent.cache_control = { type: 'ephemeral' };
193
+ }
194
+ }
195
+ }
196
+ }
197
+ // Format system prompt with cache_control (only if we have a system prompt)
198
+ const system = systemContent ? [{
199
+ type: 'text',
200
+ text: systemContent,
201
+ cache_control: { type: 'ephemeral' }
202
+ }] : undefined;
203
+ return { system, messages: anthropicMessages };
204
+ }
205
+ /**
206
+ * Create streaming chat completion using Anthropic API
207
+ */
208
+ export async function* createStreamingAnthropicCompletion(options, abortSignal) {
209
+ const client = getAnthropicClient();
210
+ try {
211
+ const { system, messages } = convertToAnthropicMessages(options.messages);
212
+ // Generate user_id with session tracking if sessionId is provided
213
+ const sessionId = options.sessionId || randomUUID();
214
+ const userId = generateUserId(sessionId);
215
+ // Prepare request body for logging
216
+ const requestBody = {
217
+ model: options.model,
218
+ max_tokens: options.max_tokens || 4096,
219
+ temperature: options.temperature ?? 0.7,
220
+ system,
221
+ messages,
222
+ tools: convertToolsToAnthropic(options.tools),
223
+ metadata: {
224
+ user_id: userId
225
+ },
226
+ stream: true
227
+ };
228
+ // Create streaming request
229
+ const stream = await client.messages.create(requestBody);
230
+ let contentBuffer = '';
231
+ let toolCallsBuffer = new Map();
232
+ let hasToolCalls = false;
233
+ let usageData;
234
+ for await (const event of stream) {
235
+ if (abortSignal?.aborted) {
236
+ return;
237
+ }
238
+ // Handle different event types
239
+ if (event.type === 'content_block_start') {
240
+ const block = event.content_block;
241
+ // Handle tool use blocks
242
+ if (block.type === 'tool_use') {
243
+ hasToolCalls = true;
244
+ toolCallsBuffer.set(block.id, {
245
+ id: block.id,
246
+ type: 'function',
247
+ function: {
248
+ name: block.name,
249
+ arguments: ''
250
+ }
251
+ });
252
+ // Yield delta for token counting
253
+ yield {
254
+ type: 'tool_call_delta',
255
+ delta: block.name
256
+ };
257
+ }
258
+ }
259
+ else if (event.type === 'content_block_delta') {
260
+ const delta = event.delta;
261
+ // Handle text content
262
+ if (delta.type === 'text_delta') {
263
+ const text = delta.text;
264
+ contentBuffer += text;
265
+ yield {
266
+ type: 'content',
267
+ content: text
268
+ };
269
+ }
270
+ // Handle tool input deltas
271
+ if (delta.type === 'input_json_delta') {
272
+ const jsonDelta = delta.partial_json;
273
+ const toolCall = toolCallsBuffer.get(event.index.toString());
274
+ if (toolCall) {
275
+ toolCall.function.arguments += jsonDelta;
276
+ // Yield delta for token counting
277
+ yield {
278
+ type: 'tool_call_delta',
279
+ delta: jsonDelta
280
+ };
281
+ }
282
+ }
283
+ }
284
+ else if (event.type === 'message_start') {
285
+ // Capture initial usage data
286
+ if (event.message.usage) {
287
+ usageData = {
288
+ prompt_tokens: event.message.usage.input_tokens || 0,
289
+ completion_tokens: event.message.usage.output_tokens || 0,
290
+ total_tokens: (event.message.usage.input_tokens || 0) + (event.message.usage.output_tokens || 0)
291
+ };
292
+ }
293
+ }
294
+ else if (event.type === 'message_delta') {
295
+ // Update usage data with final token counts
296
+ if (event.usage) {
297
+ if (!usageData) {
298
+ usageData = {
299
+ prompt_tokens: 0,
300
+ completion_tokens: 0,
301
+ total_tokens: 0
302
+ };
303
+ }
304
+ usageData.completion_tokens = event.usage.output_tokens || 0;
305
+ usageData.total_tokens = usageData.prompt_tokens + usageData.completion_tokens;
306
+ }
307
+ }
308
+ }
309
+ // Yield tool calls if any
310
+ if (hasToolCalls && toolCallsBuffer.size > 0) {
311
+ yield {
312
+ type: 'tool_calls',
313
+ tool_calls: Array.from(toolCallsBuffer.values())
314
+ };
315
+ }
316
+ // Yield usage information if available
317
+ if (usageData) {
318
+ yield {
319
+ type: 'usage',
320
+ usage: usageData
321
+ };
322
+ }
323
+ // Signal completion
324
+ yield {
325
+ type: 'done'
326
+ };
327
+ }
328
+ catch (error) {
329
+ if (abortSignal?.aborted) {
330
+ return;
331
+ }
332
+ if (error instanceof Error) {
333
+ throw new Error(`Anthropic streaming completion failed: ${error.message}`);
334
+ }
335
+ throw new Error('Anthropic streaming completion failed: Unknown error');
336
+ }
337
+ }
package/dist/api/chat.js CHANGED
@@ -5,8 +5,13 @@ import { SYSTEM_PROMPT } from './systemPrompt.js';
5
5
  /**
6
6
  * Convert our ChatMessage format to OpenAI's ChatCompletionMessageParam format
7
7
  * Automatically prepends system prompt if not present
8
+ * Logic:
9
+ * 1. If custom system prompt exists: use custom as system, prepend default as first user message
10
+ * 2. If no custom system prompt: use default as system
8
11
  */
9
12
  function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
13
+ const config = getOpenAiConfig();
14
+ const customSystemPrompt = config.systemPrompt;
10
15
  let result = messages.map(msg => {
11
16
  // 如果消息包含图片,使用 content 数组格式
12
17
  if (msg.role === 'user' && msg.images && msg.images.length > 0) {
@@ -51,15 +56,37 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
51
56
  }
52
57
  return baseMessage;
53
58
  });
54
- // 如果需要系统提示词且第一条消息不是 system 消息,则添加
55
- if (includeSystemPrompt && (result.length === 0 || result[0]?.role !== 'system')) {
56
- result = [
57
- {
58
- role: 'system',
59
- content: SYSTEM_PROMPT
60
- },
61
- ...result
62
- ];
59
+ // 如果需要系统提示词
60
+ if (includeSystemPrompt) {
61
+ // 如果第一条消息已经是 system 消息,跳过
62
+ if (result.length > 0 && result[0]?.role === 'system') {
63
+ return result;
64
+ }
65
+ // 如果配置了自定义系统提示词
66
+ if (customSystemPrompt) {
67
+ // 自定义系统提示词作为 system 消息,默认系统提示词作为第一条 user 消息
68
+ result = [
69
+ {
70
+ role: 'system',
71
+ content: customSystemPrompt
72
+ },
73
+ {
74
+ role: 'user',
75
+ content: SYSTEM_PROMPT
76
+ },
77
+ ...result
78
+ ];
79
+ }
80
+ else {
81
+ // 没有自定义系统提示词,默认系统提示词作为 system 消息
82
+ result = [
83
+ {
84
+ role: 'system',
85
+ content: SYSTEM_PROMPT
86
+ },
87
+ ...result
88
+ ];
89
+ }
63
90
  }
64
91
  return result;
65
92
  }
@@ -0,0 +1,32 @@
1
+ import type { ChatMessage } from './chat.js';
2
+ import type { ChatCompletionTool } from 'openai/resources/chat/completions';
3
+ export interface GeminiOptions {
4
+ model: string;
5
+ messages: ChatMessage[];
6
+ temperature?: number;
7
+ tools?: ChatCompletionTool[];
8
+ }
9
+ export interface UsageInfo {
10
+ prompt_tokens: number;
11
+ completion_tokens: number;
12
+ total_tokens: number;
13
+ }
14
+ export interface GeminiStreamChunk {
15
+ type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage';
16
+ content?: string;
17
+ tool_calls?: Array<{
18
+ id: string;
19
+ type: 'function';
20
+ function: {
21
+ name: string;
22
+ arguments: string;
23
+ };
24
+ }>;
25
+ delta?: string;
26
+ usage?: UsageInfo;
27
+ }
28
+ export declare function resetGeminiClient(): void;
29
+ /**
30
+ * Create streaming chat completion using Gemini API
31
+ */
32
+ export declare function createStreamingGeminiCompletion(options: GeminiOptions, abortSignal?: AbortSignal): AsyncGenerator<GeminiStreamChunk, void, unknown>;