tabby-ai-assistant 1.0.12 → 1.0.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.editorconfig +18 -0
- package/README.md +113 -55
- package/dist/index.js +1 -1
- package/package.json +6 -4
- package/src/components/chat/ai-sidebar.component.scss +220 -9
- package/src/components/chat/ai-sidebar.component.ts +364 -29
- package/src/components/chat/chat-input.component.ts +36 -4
- package/src/components/chat/chat-interface.component.ts +225 -5
- package/src/components/chat/chat-message.component.ts +6 -1
- package/src/components/settings/context-settings.component.ts +91 -91
- package/src/components/terminal/ai-toolbar-button.component.ts +4 -2
- package/src/components/terminal/command-suggestion.component.ts +148 -6
- package/src/index.ts +0 -6
- package/src/providers/tabby/ai-toolbar-button.provider.ts +7 -3
- package/src/services/chat/ai-sidebar.service.ts +414 -410
- package/src/services/chat/chat-session.service.ts +36 -12
- package/src/services/context/compaction.ts +110 -134
- package/src/services/context/manager.ts +27 -7
- package/src/services/context/memory.ts +17 -33
- package/src/services/context/summary.service.ts +136 -0
- package/src/services/core/ai-assistant.service.ts +1060 -37
- package/src/services/core/ai-provider-manager.service.ts +154 -25
- package/src/services/core/checkpoint.service.ts +218 -18
- package/src/services/core/config-provider.service.ts +4 -12
- package/src/services/core/toast.service.ts +106 -106
- package/src/services/providers/anthropic-provider.service.ts +126 -202
- package/src/services/providers/base-provider.service.ts +315 -21
- package/src/services/providers/glm-provider.service.ts +151 -233
- package/src/services/providers/minimax-provider.service.ts +55 -238
- package/src/services/providers/ollama-provider.service.ts +117 -188
- package/src/services/providers/openai-compatible.service.ts +165 -177
- package/src/services/providers/openai-provider.service.ts +170 -177
- package/src/services/providers/vllm-provider.service.ts +116 -188
- package/src/services/terminal/terminal-context.service.ts +265 -5
- package/src/services/terminal/terminal-manager.service.ts +748 -748
- package/src/services/terminal/terminal-tools.service.ts +612 -441
- package/src/types/ai.types.ts +156 -3
- package/src/types/provider.types.ts +206 -75
- package/src/utils/cost.utils.ts +249 -0
- package/src/utils/validation.utils.ts +306 -2
- package/dist/index.js.LICENSE.txt +0 -18
- package/src/index.ts.backup +0 -165
- package/src/services/chat/chat-history.service.ts.backup +0 -239
- package/src/services/terminal/command-analyzer.service.ts +0 -43
- package/src/services/terminal/context-menu.service.ts +0 -45
- package/src/services/terminal/hotkey.service.ts +0 -53
- package/webpack.config.js.backup +0 -57
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { Injectable } from '@angular/core';
|
|
2
|
-
import { Observable, from } from 'rxjs';
|
|
2
|
+
import { Observable, Observer, from } from 'rxjs';
|
|
3
3
|
import axios, { AxiosInstance } from 'axios';
|
|
4
4
|
import { BaseAiProvider } from './base-provider.service';
|
|
5
|
-
import { ProviderCapability,
|
|
6
|
-
import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole } from '../../types/ai.types';
|
|
5
|
+
import { ProviderCapability, ValidationResult } from '../../types/provider.types';
|
|
6
|
+
import { ChatRequest, ChatResponse, CommandRequest, CommandResponse, ExplainRequest, ExplainResponse, AnalysisRequest, AnalysisResponse, MessageRole, StreamEvent } from '../../types/ai.types';
|
|
7
7
|
import { LoggerService } from '../core/logger.service';
|
|
8
8
|
|
|
9
9
|
/**
|
|
@@ -137,11 +137,144 @@ export class GlmProviderService extends BaseAiProvider {
|
|
|
137
137
|
}
|
|
138
138
|
|
|
139
139
|
/**
|
|
140
|
-
* 流式聊天功能 -
|
|
140
|
+
* 流式聊天功能 - 支持工具调用事件
|
|
141
141
|
*/
|
|
142
|
-
chatStream(request: ChatRequest): Observable<
|
|
143
|
-
|
|
144
|
-
|
|
142
|
+
chatStream(request: ChatRequest): Observable<StreamEvent> {
|
|
143
|
+
return new Observable<StreamEvent>((subscriber: Observer<StreamEvent>) => {
|
|
144
|
+
if (!this.client) {
|
|
145
|
+
const error = new Error('GLM client not initialized');
|
|
146
|
+
subscriber.next({ type: 'error', error: error.message });
|
|
147
|
+
subscriber.error(error);
|
|
148
|
+
return;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
let currentToolId = '';
|
|
152
|
+
let currentToolName = '';
|
|
153
|
+
let currentToolInput = '';
|
|
154
|
+
let fullContent = '';
|
|
155
|
+
|
|
156
|
+
const abortController = new AbortController();
|
|
157
|
+
|
|
158
|
+
const runStream = async () => {
|
|
159
|
+
try {
|
|
160
|
+
const response = await this.client!.post('/v1/messages', {
|
|
161
|
+
model: this.config?.model || 'glm-4.6',
|
|
162
|
+
max_tokens: request.maxTokens || 1000,
|
|
163
|
+
system: request.systemPrompt || this.getDefaultSystemPrompt(),
|
|
164
|
+
messages: this.transformMessages(request.messages),
|
|
165
|
+
temperature: request.temperature || 0.95,
|
|
166
|
+
stream: true
|
|
167
|
+
}, {
|
|
168
|
+
responseType: 'stream'
|
|
169
|
+
});
|
|
170
|
+
|
|
171
|
+
const stream = response.data;
|
|
172
|
+
const decoder = new TextDecoder();
|
|
173
|
+
let buffer = '';
|
|
174
|
+
|
|
175
|
+
for await (const chunk of stream) {
|
|
176
|
+
if (abortController.signal.aborted) break;
|
|
177
|
+
|
|
178
|
+
buffer += decoder.decode(chunk, { stream: true });
|
|
179
|
+
const lines = buffer.split('\n');
|
|
180
|
+
buffer = lines.pop() || '';
|
|
181
|
+
|
|
182
|
+
for (const line of lines) {
|
|
183
|
+
if (line.startsWith('data:')) {
|
|
184
|
+
const data = line.slice(5).trim();
|
|
185
|
+
if (data === '[DONE]') continue;
|
|
186
|
+
|
|
187
|
+
try {
|
|
188
|
+
const parsed = JSON.parse(data);
|
|
189
|
+
const eventType = parsed.type;
|
|
190
|
+
const eventData = parsed;
|
|
191
|
+
|
|
192
|
+
this.logger.debug('Stream event', { type: eventType });
|
|
193
|
+
|
|
194
|
+
// 处理文本增量
|
|
195
|
+
if (eventType === 'content_block_delta' && eventData.delta?.type === 'text_delta') {
|
|
196
|
+
const textDelta = eventData.delta.text;
|
|
197
|
+
fullContent += textDelta;
|
|
198
|
+
subscriber.next({
|
|
199
|
+
type: 'text_delta',
|
|
200
|
+
textDelta
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
// 处理工具调用开始
|
|
204
|
+
else if (eventType === 'content_block_start' && eventData.content_block?.type === 'tool_use') {
|
|
205
|
+
currentToolId = eventData.content_block.id || `tool_${Date.now()}`;
|
|
206
|
+
currentToolName = eventData.content_block.name;
|
|
207
|
+
currentToolInput = '';
|
|
208
|
+
subscriber.next({
|
|
209
|
+
type: 'tool_use_start',
|
|
210
|
+
toolCall: {
|
|
211
|
+
id: currentToolId,
|
|
212
|
+
name: currentToolName,
|
|
213
|
+
input: {}
|
|
214
|
+
}
|
|
215
|
+
});
|
|
216
|
+
this.logger.debug('Stream event', { type: 'tool_use_start', name: currentToolName });
|
|
217
|
+
}
|
|
218
|
+
// 处理工具调用参数
|
|
219
|
+
else if (eventType === 'content_block_delta' && eventData.delta?.type === 'input_json_delta') {
|
|
220
|
+
currentToolInput += eventData.delta.partial_json || '';
|
|
221
|
+
}
|
|
222
|
+
// 处理工具调用结束
|
|
223
|
+
else if (eventType === 'content_block_stop') {
|
|
224
|
+
if (currentToolId && currentToolName) {
|
|
225
|
+
let parsedInput = {};
|
|
226
|
+
try {
|
|
227
|
+
parsedInput = JSON.parse(currentToolInput || '{}');
|
|
228
|
+
} catch (e) {
|
|
229
|
+
// 使用原始输入
|
|
230
|
+
}
|
|
231
|
+
subscriber.next({
|
|
232
|
+
type: 'tool_use_end',
|
|
233
|
+
toolCall: {
|
|
234
|
+
id: currentToolId,
|
|
235
|
+
name: currentToolName,
|
|
236
|
+
input: parsedInput
|
|
237
|
+
}
|
|
238
|
+
});
|
|
239
|
+
this.logger.debug('Stream event', { type: 'tool_use_end', name: currentToolName });
|
|
240
|
+
currentToolId = '';
|
|
241
|
+
currentToolName = '';
|
|
242
|
+
currentToolInput = '';
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
} catch (e) {
|
|
246
|
+
// 忽略解析错误
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
subscriber.next({
|
|
253
|
+
type: 'message_end',
|
|
254
|
+
message: {
|
|
255
|
+
id: this.generateId(),
|
|
256
|
+
role: MessageRole.ASSISTANT,
|
|
257
|
+
content: fullContent,
|
|
258
|
+
timestamp: new Date()
|
|
259
|
+
}
|
|
260
|
+
});
|
|
261
|
+
this.logger.debug('Stream event', { type: 'message_end', contentLength: fullContent.length });
|
|
262
|
+
subscriber.complete();
|
|
263
|
+
|
|
264
|
+
} catch (error) {
|
|
265
|
+
if ((error as any).name !== 'AbortError') {
|
|
266
|
+
const errorMessage = `GLM stream failed: ${error instanceof Error ? error.message : String(error)}`;
|
|
267
|
+
this.logger.error('Stream error', error);
|
|
268
|
+
subscriber.next({ type: 'error', error: errorMessage });
|
|
269
|
+
subscriber.error(new Error(errorMessage));
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
};
|
|
273
|
+
|
|
274
|
+
runStream();
|
|
275
|
+
|
|
276
|
+
return () => abortController.abort();
|
|
277
|
+
});
|
|
145
278
|
}
|
|
146
279
|
|
|
147
280
|
/**
|
|
@@ -213,39 +346,19 @@ export class GlmProviderService extends BaseAiProvider {
|
|
|
213
346
|
return this.parseAnalysisResponse(response.message.content);
|
|
214
347
|
}
|
|
215
348
|
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
try {
|
|
221
|
-
if (!this.client) {
|
|
222
|
-
return HealthStatus.UNHEALTHY;
|
|
223
|
-
}
|
|
224
|
-
|
|
225
|
-
// 简单的测试请求
|
|
226
|
-
const response = await this.client.post('/v1/messages', {
|
|
227
|
-
model: this.config?.model || 'glm-4.6',
|
|
228
|
-
max_tokens: 1,
|
|
229
|
-
messages: [
|
|
230
|
-
{
|
|
231
|
-
role: 'user',
|
|
232
|
-
content: [{ type: 'text', text: 'Hi' }]
|
|
233
|
-
}
|
|
234
|
-
]
|
|
235
|
-
});
|
|
236
|
-
|
|
237
|
-
if (response.status === 200) {
|
|
238
|
-
this.lastHealthCheck = { status: HealthStatus.HEALTHY, timestamp: new Date() };
|
|
239
|
-
return HealthStatus.HEALTHY;
|
|
240
|
-
}
|
|
349
|
+
protected async sendTestRequest(request: ChatRequest): Promise<ChatResponse> {
|
|
350
|
+
if (!this.client) {
|
|
351
|
+
throw new Error('GLM client not initialized');
|
|
352
|
+
}
|
|
241
353
|
|
|
242
|
-
|
|
354
|
+
const response = await this.client.post('/v1/messages', {
|
|
355
|
+
model: this.config?.model || 'glm-4.6',
|
|
356
|
+
max_tokens: request.maxTokens || 1,
|
|
357
|
+
messages: this.transformMessages(request.messages),
|
|
358
|
+
temperature: request.temperature || 0
|
|
359
|
+
});
|
|
243
360
|
|
|
244
|
-
|
|
245
|
-
this.logger.error('GLM health check failed', error);
|
|
246
|
-
this.lastHealthCheck = { status: HealthStatus.UNHEALTHY, timestamp: new Date() };
|
|
247
|
-
return HealthStatus.UNHEALTHY;
|
|
248
|
-
}
|
|
361
|
+
return this.transformChatResponse(response.data);
|
|
249
362
|
}
|
|
250
363
|
|
|
251
364
|
/**
|
|
@@ -273,14 +386,6 @@ export class GlmProviderService extends BaseAiProvider {
|
|
|
273
386
|
return result;
|
|
274
387
|
}
|
|
275
388
|
|
|
276
|
-
/**
|
|
277
|
-
* 获取默认基础URL
|
|
278
|
-
* GLM提供与Anthropic兼容的API端点
|
|
279
|
-
*/
|
|
280
|
-
protected getDefaultBaseURL(): string {
|
|
281
|
-
return 'https://open.bigmodel.cn/api/anthropic';
|
|
282
|
-
}
|
|
283
|
-
|
|
284
389
|
/**
|
|
285
390
|
* 转换消息格式(Anthropic兼容)
|
|
286
391
|
*/
|
|
@@ -312,191 +417,4 @@ export class GlmProviderService extends BaseAiProvider {
|
|
|
312
417
|
} : undefined
|
|
313
418
|
};
|
|
314
419
|
}
|
|
315
|
-
|
|
316
|
-
/**
|
|
317
|
-
* 构建命令生成提示
|
|
318
|
-
*/
|
|
319
|
-
private buildCommandPrompt(request: CommandRequest): string {
|
|
320
|
-
let prompt = `请将以下自然语言描述转换为准确的终端命令:\n\n"${request.naturalLanguage}"\n\n`;
|
|
321
|
-
|
|
322
|
-
if (request.context) {
|
|
323
|
-
prompt += `当前环境:\n`;
|
|
324
|
-
if (request.context.currentDirectory) {
|
|
325
|
-
prompt += `- 当前目录:${request.context.currentDirectory}\n`;
|
|
326
|
-
}
|
|
327
|
-
if (request.context.operatingSystem) {
|
|
328
|
-
prompt += `- 操作系统:${request.context.operatingSystem}\n`;
|
|
329
|
-
}
|
|
330
|
-
if (request.context.shell) {
|
|
331
|
-
prompt += `- Shell:${request.context.shell}\n`;
|
|
332
|
-
}
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
prompt += `\n请直接返回JSON格式:\n`;
|
|
336
|
-
prompt += `{\n`;
|
|
337
|
-
prompt += ` "command": "具体命令",\n`;
|
|
338
|
-
prompt += ` "explanation": "命令解释",\n`;
|
|
339
|
-
prompt += ` "confidence": 0.95\n`;
|
|
340
|
-
prompt += `}\n`;
|
|
341
|
-
|
|
342
|
-
return prompt;
|
|
343
|
-
}
|
|
344
|
-
|
|
345
|
-
/**
|
|
346
|
-
* 构建命令解释提示
|
|
347
|
-
*/
|
|
348
|
-
private buildExplainPrompt(request: ExplainRequest): string {
|
|
349
|
-
let prompt = `请详细解释以下终端命令:\n\n\`${request.command}\`\n\n`;
|
|
350
|
-
|
|
351
|
-
if (request.context?.currentDirectory) {
|
|
352
|
-
prompt += `当前目录:${request.context.currentDirectory}\n`;
|
|
353
|
-
}
|
|
354
|
-
if (request.context?.operatingSystem) {
|
|
355
|
-
prompt += `操作系统:${request.context.operatingSystem}\n`;
|
|
356
|
-
}
|
|
357
|
-
|
|
358
|
-
prompt += `\n请按以下JSON格式返回:\n`;
|
|
359
|
-
prompt += `{\n`;
|
|
360
|
-
prompt += ` "explanation": "整体解释",\n`;
|
|
361
|
-
prompt += ` "breakdown": [\n`;
|
|
362
|
-
prompt += ` {"part": "命令部分", "description": "说明"}\n`;
|
|
363
|
-
prompt += ` ],\n`;
|
|
364
|
-
prompt += ` "examples": ["使用示例"]\n`;
|
|
365
|
-
prompt += `}\n`;
|
|
366
|
-
|
|
367
|
-
return prompt;
|
|
368
|
-
}
|
|
369
|
-
|
|
370
|
-
/**
|
|
371
|
-
* 构建结果分析提示
|
|
372
|
-
*/
|
|
373
|
-
private buildAnalysisPrompt(request: AnalysisRequest): string {
|
|
374
|
-
let prompt = `请分析以下命令执行结果:\n\n`;
|
|
375
|
-
prompt += `命令:${request.command}\n`;
|
|
376
|
-
prompt += `退出码:${request.exitCode}\n`;
|
|
377
|
-
prompt += `输出:\n${request.output}\n\n`;
|
|
378
|
-
|
|
379
|
-
if (request.context?.workingDirectory) {
|
|
380
|
-
prompt += `工作目录:${request.context.workingDirectory}\n`;
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
prompt += `\n请按以下JSON格式返回:\n`;
|
|
384
|
-
prompt += `{\n`;
|
|
385
|
-
prompt += ` "summary": "结果总结",\n`;
|
|
386
|
-
prompt += ` "insights": ["洞察1", "洞察2"],\n`;
|
|
387
|
-
prompt += ` "success": true/false,\n`;
|
|
388
|
-
prompt += ` "issues": [\n`;
|
|
389
|
-
prompt += ` {"severity": "warning|error|info", "message": "问题描述", "suggestion": "建议"}\n`;
|
|
390
|
-
prompt += ` ]\n`;
|
|
391
|
-
prompt += `}\n`;
|
|
392
|
-
|
|
393
|
-
return prompt;
|
|
394
|
-
}
|
|
395
|
-
|
|
396
|
-
/**
|
|
397
|
-
* 解析命令响应
|
|
398
|
-
*/
|
|
399
|
-
private parseCommandResponse(content: string): CommandResponse {
|
|
400
|
-
try {
|
|
401
|
-
const match = content.match(/\{[\s\S]*\}/);
|
|
402
|
-
if (match) {
|
|
403
|
-
const parsed = JSON.parse(match[0]);
|
|
404
|
-
return {
|
|
405
|
-
command: parsed.command || '',
|
|
406
|
-
explanation: parsed.explanation || '',
|
|
407
|
-
confidence: parsed.confidence || 0.5
|
|
408
|
-
};
|
|
409
|
-
}
|
|
410
|
-
} catch (error) {
|
|
411
|
-
this.logger.warn('Failed to parse command response as JSON', error);
|
|
412
|
-
}
|
|
413
|
-
|
|
414
|
-
// 备用解析
|
|
415
|
-
const lines = content.split('\n').map(l => l.trim()).filter(l => l);
|
|
416
|
-
return {
|
|
417
|
-
command: lines[0] || '',
|
|
418
|
-
explanation: lines.slice(1).join(' ') || 'AI生成的命令',
|
|
419
|
-
confidence: 0.5
|
|
420
|
-
};
|
|
421
|
-
}
|
|
422
|
-
|
|
423
|
-
/**
|
|
424
|
-
* 解析解释响应
|
|
425
|
-
*/
|
|
426
|
-
private parseExplainResponse(content: string): ExplainResponse {
|
|
427
|
-
try {
|
|
428
|
-
const match = content.match(/\{[\s\S]*\}/);
|
|
429
|
-
if (match) {
|
|
430
|
-
const parsed = JSON.parse(match[0]);
|
|
431
|
-
return {
|
|
432
|
-
explanation: parsed.explanation || '',
|
|
433
|
-
breakdown: parsed.breakdown || [],
|
|
434
|
-
examples: parsed.examples || []
|
|
435
|
-
};
|
|
436
|
-
}
|
|
437
|
-
} catch (error) {
|
|
438
|
-
this.logger.warn('Failed to parse explain response as JSON', error);
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
return {
|
|
442
|
-
explanation: content,
|
|
443
|
-
breakdown: []
|
|
444
|
-
};
|
|
445
|
-
}
|
|
446
|
-
|
|
447
|
-
/**
|
|
448
|
-
* 解析分析响应
|
|
449
|
-
*/
|
|
450
|
-
private parseAnalysisResponse(content: string): AnalysisResponse {
|
|
451
|
-
try {
|
|
452
|
-
const match = content.match(/\{[\s\S]*\}/);
|
|
453
|
-
if (match) {
|
|
454
|
-
const parsed = JSON.parse(match[0]);
|
|
455
|
-
return {
|
|
456
|
-
summary: parsed.summary || '',
|
|
457
|
-
insights: parsed.insights || [],
|
|
458
|
-
success: parsed.success !== false,
|
|
459
|
-
issues: parsed.issues || []
|
|
460
|
-
};
|
|
461
|
-
}
|
|
462
|
-
} catch (error) {
|
|
463
|
-
this.logger.warn('Failed to parse analysis response as JSON', error);
|
|
464
|
-
}
|
|
465
|
-
|
|
466
|
-
return {
|
|
467
|
-
summary: content,
|
|
468
|
-
insights: [],
|
|
469
|
-
success: true
|
|
470
|
-
};
|
|
471
|
-
}
|
|
472
|
-
|
|
473
|
-
private getDefaultSystemPrompt(): string {
|
|
474
|
-
return `你是一个专业的终端命令助手,运行在 Tabby 终端中。
|
|
475
|
-
|
|
476
|
-
## 核心能力
|
|
477
|
-
你可以通过以下工具直接操作终端:
|
|
478
|
-
- write_to_terminal: 向终端写入并执行命令
|
|
479
|
-
- read_terminal_output: 读取终端输出
|
|
480
|
-
- get_terminal_list: 获取所有终端列表
|
|
481
|
-
- get_terminal_cwd: 获取当前工作目录
|
|
482
|
-
- focus_terminal: 切换到指定索引的终端(需要参数 terminal_index)
|
|
483
|
-
- get_terminal_selection: 获取终端中选中的文本
|
|
484
|
-
|
|
485
|
-
## 重要规则
|
|
486
|
-
1. 当用户请求执行命令(如"查看当前目录"、"列出文件"等),你必须使用 write_to_terminal 工具来执行
|
|
487
|
-
2. **当用户请求切换终端(如"切换到终端0"、"打开终端4"等),你必须使用 focus_terminal 工具**
|
|
488
|
-
3. 不要只是描述你"将要做什么",而是直接调用工具执行
|
|
489
|
-
4. 执行命令后,使用 read_terminal_output 读取结果并报告给用户
|
|
490
|
-
5. 如果不确定当前目录或终端状态,先使用 get_terminal_cwd 或 get_terminal_list 获取信息
|
|
491
|
-
6. **永远不要假装执行了操作,必须真正调用工具**
|
|
492
|
-
|
|
493
|
-
## 示例
|
|
494
|
-
用户:"查看当前目录的文件"
|
|
495
|
-
正确做法:调用 write_to_terminal 工具,参数 { "command": "dir", "execute": true }
|
|
496
|
-
错误做法:仅回复文字"我将执行 dir 命令"
|
|
497
|
-
|
|
498
|
-
用户:"切换到终端4"
|
|
499
|
-
正确做法:调用 focus_terminal 工具,参数 { "terminal_index": 4 }
|
|
500
|
-
错误做法:仅回复文字"已切换到终端4"(不调用工具)`;
|
|
501
|
-
}
|
|
502
420
|
}
|