snow-ai 0.3.7 → 0.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/dist/agents/compactAgent.js +7 -3
  2. package/dist/agents/summaryAgent.d.ts +57 -0
  3. package/dist/agents/summaryAgent.js +259 -0
  4. package/dist/api/anthropic.d.ts +1 -0
  5. package/dist/api/anthropic.js +20 -13
  6. package/dist/api/chat.d.ts +1 -0
  7. package/dist/api/chat.js +23 -12
  8. package/dist/api/gemini.d.ts +1 -0
  9. package/dist/api/gemini.js +14 -8
  10. package/dist/api/responses.d.ts +1 -0
  11. package/dist/api/responses.js +23 -15
  12. package/dist/app.js +15 -2
  13. package/dist/hooks/useCommandHandler.js +58 -0
  14. package/dist/hooks/useCommandPanel.d.ts +2 -1
  15. package/dist/hooks/useCommandPanel.js +6 -1
  16. package/dist/hooks/useConversation.js +44 -24
  17. package/dist/hooks/useSnapshotState.d.ts +2 -0
  18. package/dist/mcp/filesystem.d.ts +131 -46
  19. package/dist/mcp/filesystem.js +188 -35
  20. package/dist/mcp/types/filesystem.types.d.ts +91 -0
  21. package/dist/mcp/utils/filesystem/batch-operations.utils.d.ts +39 -0
  22. package/dist/mcp/utils/filesystem/batch-operations.utils.js +182 -0
  23. package/dist/ui/components/ChatInput.d.ts +2 -1
  24. package/dist/ui/components/ChatInput.js +3 -3
  25. package/dist/ui/components/CommandPanel.d.ts +2 -1
  26. package/dist/ui/components/CommandPanel.js +18 -3
  27. package/dist/ui/components/MarkdownRenderer.js +10 -1
  28. package/dist/ui/components/MessageList.js +1 -1
  29. package/dist/ui/components/PendingMessages.js +1 -1
  30. package/dist/ui/components/PendingToolCalls.d.ts +11 -0
  31. package/dist/ui/components/PendingToolCalls.js +35 -0
  32. package/dist/ui/components/ToolResultPreview.d.ts +1 -1
  33. package/dist/ui/components/ToolResultPreview.js +116 -152
  34. package/dist/ui/pages/ChatScreen.d.ts +1 -0
  35. package/dist/ui/pages/ChatScreen.js +99 -60
  36. package/dist/utils/chatExporter.d.ts +9 -0
  37. package/dist/utils/chatExporter.js +126 -0
  38. package/dist/utils/commandExecutor.d.ts +1 -1
  39. package/dist/utils/commands/export.d.ts +2 -0
  40. package/dist/utils/commands/export.js +12 -0
  41. package/dist/utils/commands/init.js +3 -3
  42. package/dist/utils/fileDialog.d.ts +9 -0
  43. package/dist/utils/fileDialog.js +74 -0
  44. package/dist/utils/fileUtils.js +3 -3
  45. package/dist/utils/incrementalSnapshot.d.ts +7 -0
  46. package/dist/utils/incrementalSnapshot.js +35 -0
  47. package/dist/utils/messageFormatter.js +89 -6
  48. package/dist/utils/sessionConverter.js +11 -0
  49. package/dist/utils/sessionManager.d.ts +5 -0
  50. package/dist/utils/sessionManager.js +45 -0
  51. package/dist/utils/toolDisplayConfig.d.ts +16 -0
  52. package/dist/utils/toolDisplayConfig.js +42 -0
  53. package/package.json +1 -1
@@ -101,19 +101,22 @@ export class CompactAgent {
101
101
  model: this.modelName,
102
102
  messages,
103
103
  max_tokens: 4096,
104
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
104
105
  }, abortSignal);
105
106
  break;
106
107
  case 'gemini':
107
108
  streamGenerator = createStreamingGeminiCompletion({
108
109
  model: this.modelName,
109
- messages
110
+ messages,
111
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
110
112
  }, abortSignal);
111
113
  break;
112
114
  case 'responses':
113
115
  streamGenerator = createStreamingResponse({
114
116
  model: this.modelName,
115
117
  messages,
116
- stream: true
118
+ stream: true,
119
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
117
120
  }, abortSignal);
118
121
  break;
119
122
  case 'chat':
@@ -121,7 +124,8 @@ export class CompactAgent {
121
124
  streamGenerator = createStreamingChatCompletion({
122
125
  model: this.modelName,
123
126
  messages,
124
- stream: true
127
+ stream: true,
128
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
125
129
  }, abortSignal);
126
130
  break;
127
131
  }
@@ -0,0 +1,57 @@
1
+ /**
2
+ * Summary Agent Service
3
+ *
4
+ * Generates concise summaries for conversations after the first user-assistant exchange.
5
+ * This service operates in the background without blocking the main conversation flow.
6
+ *
7
+ * Features:
8
+ * - Uses basicModel for efficient, low-cost summarization
9
+ * - Follows the same API routing as main flow (chat, responses, gemini, anthropic)
10
+ * - Generates title (max 50 chars) and summary (max 150 chars)
11
+ * - Only runs once after the first complete conversation exchange
12
+ * - Silent execution with error handling to prevent main flow disruption
13
+ */
14
+ export declare class SummaryAgent {
15
+ private modelName;
16
+ private requestMethod;
17
+ private initialized;
18
+ /**
19
+ * Initialize the summary agent with current configuration
20
+ * @returns true if initialized successfully, false otherwise
21
+ */
22
+ private initialize;
23
+ /**
24
+ * Check if summary agent is available
25
+ */
26
+ isAvailable(): Promise<boolean>;
27
+ /**
28
+ * Call the model with streaming API and assemble complete response
29
+ * Uses the same routing logic as main flow for consistency
30
+ *
31
+ * @param messages - Chat messages
32
+ * @param abortSignal - Optional abort signal to cancel the request
33
+ */
34
+ private callModel;
35
+ /**
36
+ * Generate title and summary for a conversation
37
+ *
38
+ * @param userMessage - User's first message content
39
+ * @param assistantMessage - Assistant's first response content
40
+ * @param abortSignal - Optional abort signal to cancel generation
41
+ * @returns Object containing title and summary, or null if generation fails
42
+ */
43
+ generateSummary(userMessage: string, assistantMessage: string, abortSignal?: AbortSignal): Promise<{
44
+ title: string;
45
+ summary: string;
46
+ } | null>;
47
+ /**
48
+ * Generate fallback summary when AI generation fails
49
+ * Simply truncates the user message for title and summary
50
+ */
51
+ private generateFallbackSummary;
52
+ /**
53
+ * Truncate string to specified length, adding ellipsis if truncated
54
+ */
55
+ private truncateString;
56
+ }
57
+ export declare const summaryAgent: SummaryAgent;
@@ -0,0 +1,259 @@
1
+ import { getOpenAiConfig } from '../utils/apiConfig.js';
2
+ import { logger } from '../utils/logger.js';
3
+ import { createStreamingChatCompletion } from '../api/chat.js';
4
+ import { createStreamingResponse } from '../api/responses.js';
5
+ import { createStreamingGeminiCompletion } from '../api/gemini.js';
6
+ import { createStreamingAnthropicCompletion } from '../api/anthropic.js';
7
+ /**
8
+ * Summary Agent Service
9
+ *
10
+ * Generates concise summaries for conversations after the first user-assistant exchange.
11
+ * This service operates in the background without blocking the main conversation flow.
12
+ *
13
+ * Features:
14
+ * - Uses basicModel for efficient, low-cost summarization
15
+ * - Follows the same API routing as main flow (chat, responses, gemini, anthropic)
16
+ * - Generates title (max 50 chars) and summary (max 150 chars)
17
+ * - Only runs once after the first complete conversation exchange
18
+ * - Silent execution with error handling to prevent main flow disruption
19
+ */
20
+ export class SummaryAgent {
21
+ constructor() {
22
+ Object.defineProperty(this, "modelName", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: ''
27
+ });
28
+ Object.defineProperty(this, "requestMethod", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: 'chat'
33
+ });
34
+ Object.defineProperty(this, "initialized", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: false
39
+ });
40
+ }
41
+ /**
42
+ * Initialize the summary agent with current configuration
43
+ * @returns true if initialized successfully, false otherwise
44
+ */
45
+ async initialize() {
46
+ try {
47
+ const config = getOpenAiConfig();
48
+ // Check if basic model is configured
49
+ if (!config.basicModel) {
50
+ logger.warn('Summary agent: Basic model not configured, using advanced model as fallback');
51
+ // Fallback to advanced model if basic model is not configured
52
+ if (!config.advancedModel) {
53
+ logger.warn('Summary agent: No model configured');
54
+ return false;
55
+ }
56
+ this.modelName = config.advancedModel;
57
+ }
58
+ else {
59
+ this.modelName = config.basicModel;
60
+ }
61
+ this.requestMethod = config.requestMethod;
62
+ this.initialized = true;
63
+ return true;
64
+ }
65
+ catch (error) {
66
+ logger.warn('Summary agent: Failed to initialize:', error);
67
+ return false;
68
+ }
69
+ }
70
+ /**
71
+ * Check if summary agent is available
72
+ */
73
+ async isAvailable() {
74
+ if (!this.initialized) {
75
+ return await this.initialize();
76
+ }
77
+ return true;
78
+ }
79
+ /**
80
+ * Call the model with streaming API and assemble complete response
81
+ * Uses the same routing logic as main flow for consistency
82
+ *
83
+ * @param messages - Chat messages
84
+ * @param abortSignal - Optional abort signal to cancel the request
85
+ */
86
+ async callModel(messages, abortSignal) {
87
+ let streamGenerator;
88
+ // Route to appropriate streaming API based on request method
89
+ switch (this.requestMethod) {
90
+ case 'anthropic':
91
+ streamGenerator = createStreamingAnthropicCompletion({
92
+ model: this.modelName,
93
+ messages,
94
+ max_tokens: 500, // Limited tokens for summary generation
95
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
96
+ }, abortSignal);
97
+ break;
98
+ case 'gemini':
99
+ streamGenerator = createStreamingGeminiCompletion({
100
+ model: this.modelName,
101
+ messages,
102
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
103
+ }, abortSignal);
104
+ break;
105
+ case 'responses':
106
+ streamGenerator = createStreamingResponse({
107
+ model: this.modelName,
108
+ messages,
109
+ stream: true,
110
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
111
+ }, abortSignal);
112
+ break;
113
+ case 'chat':
114
+ default:
115
+ streamGenerator = createStreamingChatCompletion({
116
+ model: this.modelName,
117
+ messages,
118
+ stream: true,
119
+ includeBuiltinSystemPrompt: false, // 不需要内置系统提示词
120
+ }, abortSignal);
121
+ break;
122
+ }
123
+ // Assemble complete content from streaming response
124
+ let completeContent = '';
125
+ try {
126
+ for await (const chunk of streamGenerator) {
127
+ // Check abort signal
128
+ if (abortSignal?.aborted) {
129
+ throw new Error('Request aborted');
130
+ }
131
+ // Handle different chunk formats based on request method
132
+ if (this.requestMethod === 'chat') {
133
+ // Chat API uses standard OpenAI format
134
+ if (chunk.choices && chunk.choices[0]?.delta?.content) {
135
+ completeContent += chunk.choices[0].delta.content;
136
+ }
137
+ }
138
+ else {
139
+ // Responses, Gemini, and Anthropic APIs use unified format
140
+ if (chunk.type === 'content' && chunk.content) {
141
+ completeContent += chunk.content;
142
+ }
143
+ }
144
+ }
145
+ }
146
+ catch (streamError) {
147
+ logger.error('Summary agent: Streaming error:', streamError);
148
+ throw streamError;
149
+ }
150
+ return completeContent;
151
+ }
152
+ /**
153
+ * Generate title and summary for a conversation
154
+ *
155
+ * @param userMessage - User's first message content
156
+ * @param assistantMessage - Assistant's first response content
157
+ * @param abortSignal - Optional abort signal to cancel generation
158
+ * @returns Object containing title and summary, or null if generation fails
159
+ */
160
+ async generateSummary(userMessage, assistantMessage, abortSignal) {
161
+ const available = await this.isAvailable();
162
+ if (!available) {
163
+ logger.warn('Summary agent: Not available, using fallback summary');
164
+ return this.generateFallbackSummary(userMessage, assistantMessage);
165
+ }
166
+ try {
167
+ const summaryPrompt = `You are a conversation summarization assistant. Based on the first exchange between the user and AI assistant below, generate a concise title and summary.
168
+
169
+ IMPORTANT: Generate the title and summary in the SAME LANGUAGE as the user's message. If the user writes in Chinese, respond in Chinese. If in English, respond in English.
170
+
171
+ User message:
172
+ ${userMessage}
173
+
174
+ AI assistant reply:
175
+ ${assistantMessage}
176
+
177
+ Requirements:
178
+ 1. Generate a short title (max 50 characters) that captures the conversation topic
179
+ 2. Generate a summary (max 150 characters) that briefly describes the core content
180
+ 3. Title should be concise and clear, avoid complete sentences
181
+ 4. Summary should contain key information while staying brief
182
+ 5. Use the SAME LANGUAGE as the user's message
183
+
184
+ Output in the following JSON format (JSON only, no other content):
185
+ {
186
+ "title": "Conversation title",
187
+ "summary": "Conversation summary"
188
+ }`;
189
+ const messages = [
190
+ {
191
+ role: 'user',
192
+ content: summaryPrompt,
193
+ },
194
+ ];
195
+ const response = await this.callModel(messages, abortSignal);
196
+ if (!response || response.trim().length === 0) {
197
+ logger.warn('Summary agent: Empty response, using fallback');
198
+ return this.generateFallbackSummary(userMessage, assistantMessage);
199
+ }
200
+ // Parse JSON response
201
+ try {
202
+ // Extract JSON from markdown code blocks if present
203
+ let jsonStr = response.trim();
204
+ const jsonMatch = jsonStr.match(/```(?:json)?\s*\n?([\s\S]*?)\n?```/);
205
+ if (jsonMatch) {
206
+ jsonStr = jsonMatch[1].trim();
207
+ }
208
+ const parsed = JSON.parse(jsonStr);
209
+ if (!parsed.title || !parsed.summary) {
210
+ logger.warn('Summary agent: Invalid JSON structure, using fallback');
211
+ return this.generateFallbackSummary(userMessage, assistantMessage);
212
+ }
213
+ // Ensure title and summary are within length limits
214
+ const title = this.truncateString(parsed.title, 50);
215
+ const summary = this.truncateString(parsed.summary, 150);
216
+ logger.info('Summary agent: Successfully generated summary', {
217
+ title,
218
+ summary,
219
+ });
220
+ return { title, summary };
221
+ }
222
+ catch (parseError) {
223
+ logger.warn('Summary agent: Failed to parse JSON response, using fallback', parseError);
224
+ return this.generateFallbackSummary(userMessage, assistantMessage);
225
+ }
226
+ }
227
+ catch (error) {
228
+ logger.error('Summary agent: Failed to generate summary', error);
229
+ return this.generateFallbackSummary(userMessage, assistantMessage);
230
+ }
231
+ }
232
+ /**
233
+ * Generate fallback summary when AI generation fails
234
+ * Simply truncates the user message for title and summary
235
+ */
236
+ generateFallbackSummary(userMessage, _assistantMessage) {
237
+ // Clean newlines and extra spaces
238
+ const cleanedUser = userMessage
239
+ .replace(/[\r\n]+/g, ' ')
240
+ .replace(/\s+/g, ' ')
241
+ .trim();
242
+ // Use first 50 chars as title
243
+ const title = this.truncateString(cleanedUser, 50);
244
+ // Use first 150 chars as summary
245
+ const summary = this.truncateString(cleanedUser, 150);
246
+ return { title, summary };
247
+ }
248
+ /**
249
+ * Truncate string to specified length, adding ellipsis if truncated
250
+ */
251
+ truncateString(str, maxLength) {
252
+ if (str.length <= maxLength) {
253
+ return str;
254
+ }
255
+ return str.slice(0, maxLength - 3) + '...';
256
+ }
257
+ }
258
+ // Export singleton instance
259
+ export const summaryAgent = new SummaryAgent();
@@ -6,6 +6,7 @@ export interface AnthropicOptions {
6
6
  max_tokens?: number;
7
7
  tools?: ChatCompletionTool[];
8
8
  sessionId?: string;
9
+ includeBuiltinSystemPrompt?: boolean;
9
10
  }
10
11
  export interface AnthropicStreamChunk {
11
12
  type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage';
@@ -68,8 +68,10 @@ function convertToolsToAnthropic(tools) {
68
68
  /**
69
69
  * Convert our ChatMessage format to Anthropic's message format
70
70
  * Adds cache_control to system prompt and last user message for prompt caching
71
+ * @param messages - The messages to convert
72
+ * @param includeBuiltinSystemPrompt - Whether to include builtin system prompt (default true)
71
73
  */
72
- function convertToAnthropicMessages(messages) {
74
+ function convertToAnthropicMessages(messages, includeBuiltinSystemPrompt = true) {
73
75
  const customSystemPrompt = getCustomSystemPrompt();
74
76
  let systemContent;
75
77
  const anthropicMessages = [];
@@ -149,20 +151,25 @@ function convertToAnthropicMessages(messages) {
149
151
  });
150
152
  }
151
153
  }
154
+ // 如果配置了自定义系统提示词(最高优先级,始终添加)
152
155
  if (customSystemPrompt) {
153
156
  systemContent = customSystemPrompt;
154
- anthropicMessages.unshift({
155
- role: 'user',
156
- content: [
157
- {
158
- type: 'text',
159
- text: getSystemPrompt(),
160
- cache_control: { type: 'ephemeral' },
161
- },
162
- ],
163
- });
157
+ if (includeBuiltinSystemPrompt) {
158
+ // 将默认系统提示词作为第一条用户消息
159
+ anthropicMessages.unshift({
160
+ role: 'user',
161
+ content: [
162
+ {
163
+ type: 'text',
164
+ text: getSystemPrompt(),
165
+ cache_control: { type: 'ephemeral' },
166
+ },
167
+ ],
168
+ });
169
+ }
164
170
  }
165
- else if (!systemContent) {
171
+ else if (!systemContent && includeBuiltinSystemPrompt) {
172
+ // 没有自定义系统提示词,但需要添加默认系统提示词
166
173
  systemContent = getSystemPrompt();
167
174
  }
168
175
  let lastUserMessageIndex = -1;
@@ -253,7 +260,7 @@ async function* parseSSEStream(reader) {
253
260
  export async function* createStreamingAnthropicCompletion(options, abortSignal, onRetry) {
254
261
  yield* withRetryGenerator(async function* () {
255
262
  const config = getAnthropicConfig();
256
- const { system, messages } = convertToAnthropicMessages(options.messages);
263
+ const { system, messages } = convertToAnthropicMessages(options.messages, options.includeBuiltinSystemPrompt !== false);
257
264
  const sessionId = options.sessionId || randomUUID();
258
265
  const userId = generateUserId(sessionId);
259
266
  const requestBody = {
@@ -13,6 +13,7 @@ export interface ChatCompletionOptions {
13
13
  name: string;
14
14
  };
15
15
  };
16
+ includeBuiltinSystemPrompt?: boolean;
16
17
  }
17
18
  export interface ChatCompletionChunk {
18
19
  id: string;
package/dist/api/chat.js CHANGED
@@ -9,8 +9,10 @@ import { saveUsageToFile } from '../utils/usageLogger.js';
9
9
  * Logic:
10
10
  * 1. If custom system prompt exists: use custom as system, prepend default as first user message
11
11
  * 2. If no custom system prompt: use default as system
12
+ * @param messages - The messages to convert
13
+ * @param includeBuiltinSystemPrompt - Whether to include builtin system prompt (default true)
12
14
  */
13
- function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
15
+ function convertToOpenAIMessages(messages, includeBuiltinSystemPrompt = true) {
14
16
  const customSystemPrompt = getCustomSystemPrompt();
15
17
  let result = messages.map(msg => {
16
18
  // 如果消息包含图片,使用 content 数组格式
@@ -56,14 +58,13 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
56
58
  }
57
59
  return baseMessage;
58
60
  });
59
- // 如果需要系统提示词
60
- if (includeSystemPrompt) {
61
- // 如果第一条消息已经是 system 消息,跳过
62
- if (result.length > 0 && result[0]?.role === 'system') {
63
- return result;
64
- }
65
- // 如果配置了自定义系统提示词
66
- if (customSystemPrompt) {
61
+ // 如果第一条消息已经是 system 消息,跳过
62
+ if (result.length > 0 && result[0]?.role === 'system') {
63
+ return result;
64
+ }
65
+ // 如果配置了自定义系统提示词(最高优先级,始终添加)
66
+ if (customSystemPrompt) {
67
+ if (includeBuiltinSystemPrompt) {
67
68
  // 自定义系统提示词作为 system 消息,默认系统提示词作为第一条 user 消息
68
69
  result = [
69
70
  {
@@ -78,16 +79,26 @@ function convertToOpenAIMessages(messages, includeSystemPrompt = true) {
78
79
  ];
79
80
  }
80
81
  else {
81
- // 没有自定义系统提示词,默认系统提示词作为 system 消息
82
+ // 只添加自定义系统提示词
82
83
  result = [
83
84
  {
84
85
  role: 'system',
85
- content: getSystemPrompt(),
86
+ content: customSystemPrompt,
86
87
  },
87
88
  ...result,
88
89
  ];
89
90
  }
90
91
  }
92
+ else if (includeBuiltinSystemPrompt) {
93
+ // 没有自定义系统提示词,但需要添加默认系统提示词
94
+ result = [
95
+ {
96
+ role: 'system',
97
+ content: getSystemPrompt(),
98
+ },
99
+ ...result,
100
+ ];
101
+ }
91
102
  return result;
92
103
  }
93
104
  let openaiConfig = null;
@@ -161,7 +172,7 @@ export async function* createStreamingChatCompletion(options, abortSignal, onRet
161
172
  yield* withRetryGenerator(async function* () {
162
173
  const requestBody = {
163
174
  model: options.model,
164
- messages: convertToOpenAIMessages(options.messages),
175
+ messages: convertToOpenAIMessages(options.messages, options.includeBuiltinSystemPrompt !== false),
165
176
  stream: true,
166
177
  stream_options: { include_usage: true },
167
178
  temperature: options.temperature || 0.7,
@@ -4,6 +4,7 @@ export interface GeminiOptions {
4
4
  messages: ChatMessage[];
5
5
  temperature?: number;
6
6
  tools?: ChatCompletionTool[];
7
+ includeBuiltinSystemPrompt?: boolean;
7
8
  }
8
9
  export interface GeminiStreamChunk {
9
10
  type: 'content' | 'tool_calls' | 'tool_call_delta' | 'done' | 'usage';
@@ -53,8 +53,10 @@ function convertToolsToGemini(tools) {
53
53
  }
54
54
  /**
55
55
  * Convert our ChatMessage format to Gemini's format
56
+ * @param messages - The messages to convert
57
+ * @param includeBuiltinSystemPrompt - Whether to include builtin system prompt (default true)
56
58
  */
57
- function convertToGeminiMessages(messages) {
59
+ function convertToGeminiMessages(messages, includeBuiltinSystemPrompt = true) {
58
60
  const customSystemPrompt = getCustomSystemPrompt();
59
61
  let systemInstruction;
60
62
  const contents = [];
@@ -197,15 +199,19 @@ function convertToGeminiMessages(messages) {
197
199
  contents.push({ role, parts });
198
200
  }
199
201
  // Handle system instruction
202
+ // 如果配置了自定义系统提示词(最高优先级,始终添加)
200
203
  if (customSystemPrompt) {
201
204
  systemInstruction = customSystemPrompt;
202
- // Prepend default system prompt as first user message
203
- contents.unshift({
204
- role: 'user',
205
- parts: [{ text: getSystemPrompt() }],
206
- });
205
+ if (includeBuiltinSystemPrompt) {
206
+ // Prepend default system prompt as first user message
207
+ contents.unshift({
208
+ role: 'user',
209
+ parts: [{ text: getSystemPrompt() }],
210
+ });
211
+ }
207
212
  }
208
- else if (!systemInstruction) {
213
+ else if (!systemInstruction && includeBuiltinSystemPrompt) {
214
+ // 没有自定义系统提示词,但需要添加默认系统提示词
209
215
  systemInstruction = getSystemPrompt();
210
216
  }
211
217
  return { systemInstruction, contents };
@@ -217,7 +223,7 @@ export async function* createStreamingGeminiCompletion(options, abortSignal, onR
217
223
  const config = getGeminiConfig();
218
224
  // 使用重试包装生成器
219
225
  yield* withRetryGenerator(async function* () {
220
- const { systemInstruction, contents } = convertToGeminiMessages(options.messages);
226
+ const { systemInstruction, contents } = convertToGeminiMessages(options.messages, options.includeBuiltinSystemPrompt !== false);
221
227
  // Build request payload
222
228
  const requestBody = {
223
229
  contents,
@@ -14,6 +14,7 @@ export interface ResponseOptions {
14
14
  prompt_cache_key?: string;
15
15
  store?: boolean;
16
16
  include?: string[];
17
+ includeBuiltinSystemPrompt?: boolean;
17
18
  }
18
19
  export interface ResponseStreamChunk {
19
20
  type: 'content' | 'tool_calls' | 'tool_call_delta' | 'reasoning_delta' | 'reasoning_started' | 'reasoning_data' | 'done' | 'usage';
@@ -82,7 +82,7 @@ function getOpenAIConfig() {
82
82
  export function resetOpenAIClient() {
83
83
  openaiConfig = null;
84
84
  }
85
- function convertToResponseInput(messages) {
85
+ function convertToResponseInput(messages, includeBuiltinSystemPrompt = true) {
86
86
  const customSystemPrompt = getCustomSystemPrompt();
87
87
  const result = [];
88
88
  for (const msg of messages) {
@@ -160,24 +160,32 @@ function convertToResponseInput(messages) {
160
160
  }
161
161
  // 确定系统提示词:参考 anthropic.ts 的逻辑
162
162
  let systemInstructions;
163
+ // 如果配置了自定义系统提示词(最高优先级,始终添加)
163
164
  if (customSystemPrompt) {
164
- // 有自定义系统提示词:自定义作为 instructions,默认作为第一条用户消息
165
+ // 有自定义系统提示词:自定义作为 instructions
165
166
  systemInstructions = customSystemPrompt;
166
- result.unshift({
167
- type: 'message',
168
- role: 'user',
169
- content: [
170
- {
171
- type: 'input_text',
172
- text: getSystemPrompt(),
173
- },
174
- ],
175
- });
167
+ if (includeBuiltinSystemPrompt) {
168
+ // 默认系统提示词作为第一条用户消息
169
+ result.unshift({
170
+ type: 'message',
171
+ role: 'user',
172
+ content: [
173
+ {
174
+ type: 'input_text',
175
+ text: getSystemPrompt(),
176
+ },
177
+ ],
178
+ });
179
+ }
176
180
  }
177
- else {
178
- // 没有自定义系统提示词:默认作为 instructions
181
+ else if (includeBuiltinSystemPrompt) {
182
+ // 没有自定义系统提示词,但需要添加默认系统提示词
179
183
  systemInstructions = getSystemPrompt();
180
184
  }
185
+ else {
186
+ // 既没有自定义系统提示词,也不需要添加默认系统提示词
187
+ systemInstructions = 'You are a helpful assistant.';
188
+ }
181
189
  return { input: result, systemInstructions };
182
190
  }
183
191
  /**
@@ -228,7 +236,7 @@ async function* parseSSEStream(reader) {
228
236
  export async function* createStreamingResponse(options, abortSignal, onRetry) {
229
237
  const config = getOpenAIConfig();
230
238
  // 提取系统提示词和转换后的消息
231
- const { input: requestInput, systemInstructions } = convertToResponseInput(options.messages);
239
+ const { input: requestInput, systemInstructions } = convertToResponseInput(options.messages, options.includeBuiltinSystemPrompt !== false);
232
240
  // 使用重试包装生成器
233
241
  yield* withRetryGenerator(async function* () {
234
242
  const requestPayload = {