@shareai-lab/kode 1.0.71 → 1.0.73
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +142 -1
- package/README.zh-CN.md +47 -1
- package/package.json +5 -1
- package/src/ProjectOnboarding.tsx +47 -29
- package/src/Tool.ts +33 -4
- package/src/commands/agents.tsx +3401 -0
- package/src/commands/help.tsx +2 -2
- package/src/commands/resume.tsx +2 -1
- package/src/commands/terminalSetup.ts +4 -4
- package/src/commands.ts +3 -0
- package/src/components/ApproveApiKey.tsx +1 -1
- package/src/components/Config.tsx +10 -6
- package/src/components/ConsoleOAuthFlow.tsx +5 -4
- package/src/components/CustomSelect/select-option.tsx +28 -2
- package/src/components/CustomSelect/select.tsx +14 -5
- package/src/components/CustomSelect/theme.ts +45 -0
- package/src/components/Help.tsx +4 -4
- package/src/components/InvalidConfigDialog.tsx +1 -1
- package/src/components/LogSelector.tsx +1 -1
- package/src/components/MCPServerApprovalDialog.tsx +1 -1
- package/src/components/Message.tsx +2 -0
- package/src/components/ModelListManager.tsx +10 -6
- package/src/components/ModelSelector.tsx +201 -23
- package/src/components/ModelStatusDisplay.tsx +7 -5
- package/src/components/PromptInput.tsx +117 -87
- package/src/components/SentryErrorBoundary.ts +3 -3
- package/src/components/StickerRequestForm.tsx +16 -0
- package/src/components/StructuredDiff.tsx +36 -29
- package/src/components/TextInput.tsx +13 -0
- package/src/components/TodoItem.tsx +11 -0
- package/src/components/TrustDialog.tsx +1 -1
- package/src/components/messages/AssistantLocalCommandOutputMessage.tsx +5 -1
- package/src/components/messages/AssistantToolUseMessage.tsx +14 -4
- package/src/components/messages/TaskProgressMessage.tsx +32 -0
- package/src/components/messages/TaskToolMessage.tsx +58 -0
- package/src/components/permissions/FallbackPermissionRequest.tsx +2 -4
- package/src/components/permissions/FileEditPermissionRequest/FileEditPermissionRequest.tsx +1 -1
- package/src/components/permissions/FileEditPermissionRequest/FileEditToolDiff.tsx +5 -3
- package/src/components/permissions/FileWritePermissionRequest/FileWritePermissionRequest.tsx +1 -1
- package/src/components/permissions/FileWritePermissionRequest/FileWriteToolDiff.tsx +5 -3
- package/src/components/permissions/FilesystemPermissionRequest/FilesystemPermissionRequest.tsx +2 -4
- package/src/components/permissions/PermissionRequest.tsx +3 -5
- package/src/constants/macros.ts +2 -0
- package/src/constants/modelCapabilities.ts +179 -0
- package/src/constants/models.ts +90 -0
- package/src/constants/product.ts +1 -1
- package/src/context.ts +7 -7
- package/src/entrypoints/cli.tsx +23 -3
- package/src/entrypoints/mcp.ts +10 -10
- package/src/hooks/useCanUseTool.ts +1 -1
- package/src/hooks/useTextInput.ts +5 -2
- package/src/hooks/useUnifiedCompletion.ts +1404 -0
- package/src/messages.ts +1 -0
- package/src/query.ts +3 -0
- package/src/screens/ConfigureNpmPrefix.tsx +1 -1
- package/src/screens/Doctor.tsx +1 -1
- package/src/screens/REPL.tsx +15 -9
- package/src/services/adapters/base.ts +38 -0
- package/src/services/adapters/chatCompletions.ts +90 -0
- package/src/services/adapters/responsesAPI.ts +170 -0
- package/src/services/claude.ts +198 -62
- package/src/services/customCommands.ts +43 -22
- package/src/services/gpt5ConnectionTest.ts +340 -0
- package/src/services/mcpClient.ts +1 -1
- package/src/services/mentionProcessor.ts +273 -0
- package/src/services/modelAdapterFactory.ts +69 -0
- package/src/services/openai.ts +521 -12
- package/src/services/responseStateManager.ts +90 -0
- package/src/services/systemReminder.ts +113 -12
- package/src/test/testAdapters.ts +96 -0
- package/src/tools/AskExpertModelTool/AskExpertModelTool.tsx +120 -56
- package/src/tools/BashTool/BashTool.tsx +4 -31
- package/src/tools/BashTool/BashToolResultMessage.tsx +1 -1
- package/src/tools/BashTool/OutputLine.tsx +1 -0
- package/src/tools/FileEditTool/FileEditTool.tsx +4 -5
- package/src/tools/FileReadTool/FileReadTool.tsx +43 -10
- package/src/tools/MCPTool/MCPTool.tsx +2 -1
- package/src/tools/MultiEditTool/MultiEditTool.tsx +2 -2
- package/src/tools/NotebookReadTool/NotebookReadTool.tsx +15 -23
- package/src/tools/StickerRequestTool/StickerRequestTool.tsx +1 -1
- package/src/tools/TaskTool/TaskTool.tsx +170 -86
- package/src/tools/TaskTool/prompt.ts +61 -25
- package/src/tools/ThinkTool/ThinkTool.tsx +1 -3
- package/src/tools/TodoWriteTool/TodoWriteTool.tsx +11 -10
- package/src/tools/lsTool/lsTool.tsx +5 -2
- package/src/tools.ts +16 -16
- package/src/types/conversation.ts +51 -0
- package/src/types/logs.ts +58 -0
- package/src/types/modelCapabilities.ts +64 -0
- package/src/types/notebook.ts +87 -0
- package/src/utils/advancedFuzzyMatcher.ts +290 -0
- package/src/utils/agentLoader.ts +284 -0
- package/src/utils/ask.tsx +1 -0
- package/src/utils/commands.ts +1 -1
- package/src/utils/commonUnixCommands.ts +161 -0
- package/src/utils/config.ts +173 -2
- package/src/utils/conversationRecovery.ts +1 -0
- package/src/utils/debugLogger.ts +13 -13
- package/src/utils/exampleCommands.ts +1 -0
- package/src/utils/fuzzyMatcher.ts +328 -0
- package/src/utils/messages.tsx +6 -5
- package/src/utils/responseState.ts +23 -0
- package/src/utils/secureFile.ts +559 -0
- package/src/utils/terminal.ts +1 -0
- package/src/utils/theme.ts +11 -0
- package/src/hooks/useSlashCommandTypeahead.ts +0 -137
|
@@ -82,28 +82,31 @@ class SystemReminderService {
|
|
|
82
82
|
() => this.dispatchTodoEvent(agentId),
|
|
83
83
|
() => this.dispatchSecurityEvent(),
|
|
84
84
|
() => this.dispatchPerformanceEvent(),
|
|
85
|
+
() => this.getMentionReminders(), // Add mention reminders
|
|
85
86
|
]
|
|
86
87
|
|
|
87
88
|
for (const generator of reminderGenerators) {
|
|
88
|
-
if (reminders.length >=
|
|
89
|
-
|
|
90
|
-
const
|
|
91
|
-
if (
|
|
92
|
-
reminders
|
|
93
|
-
|
|
89
|
+
if (reminders.length >= 5) break // Slightly increase limit to accommodate mentions
|
|
90
|
+
|
|
91
|
+
const result = generator()
|
|
92
|
+
if (result) {
|
|
93
|
+
// Handle both single reminders and arrays
|
|
94
|
+
const remindersToAdd = Array.isArray(result) ? result : [result]
|
|
95
|
+
reminders.push(...remindersToAdd)
|
|
96
|
+
this.sessionState.reminderCount += remindersToAdd.length
|
|
94
97
|
}
|
|
95
98
|
}
|
|
96
99
|
|
|
97
100
|
// Log aggregated metrics instead of individual events for performance
|
|
98
101
|
if (reminders.length > 0) {
|
|
99
102
|
logEvent('system_reminder_batch', {
|
|
100
|
-
count: reminders.length,
|
|
103
|
+
count: reminders.length.toString(),
|
|
101
104
|
types: reminders.map(r => r.type).join(','),
|
|
102
105
|
priorities: reminders.map(r => r.priority).join(','),
|
|
103
106
|
categories: reminders.map(r => r.category).join(','),
|
|
104
|
-
sessionCount: this.sessionState.reminderCount,
|
|
107
|
+
sessionCount: this.sessionState.reminderCount.toString(),
|
|
105
108
|
agentId: agentId || 'default',
|
|
106
|
-
timestamp: currentTime,
|
|
109
|
+
timestamp: currentTime.toString(),
|
|
107
110
|
})
|
|
108
111
|
}
|
|
109
112
|
|
|
@@ -224,6 +227,43 @@ class SystemReminderService {
|
|
|
224
227
|
return null
|
|
225
228
|
}
|
|
226
229
|
|
|
230
|
+
/**
|
|
231
|
+
* Retrieve cached mention reminders
|
|
232
|
+
* Returns recent mentions (within 5 seconds) that haven't expired
|
|
233
|
+
*/
|
|
234
|
+
private getMentionReminders(): ReminderMessage[] {
|
|
235
|
+
const currentTime = Date.now()
|
|
236
|
+
const MENTION_FRESHNESS_WINDOW = 5000 // 5 seconds
|
|
237
|
+
const reminders: ReminderMessage[] = []
|
|
238
|
+
const expiredKeys: string[] = []
|
|
239
|
+
|
|
240
|
+
// Single pass through cache for both collection and cleanup identification
|
|
241
|
+
for (const [key, reminder] of this.reminderCache.entries()) {
|
|
242
|
+
if (this.isMentionReminder(reminder)) {
|
|
243
|
+
const age = currentTime - reminder.timestamp
|
|
244
|
+
if (age <= MENTION_FRESHNESS_WINDOW) {
|
|
245
|
+
reminders.push(reminder)
|
|
246
|
+
} else {
|
|
247
|
+
expiredKeys.push(key)
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Clean up expired mention reminders in separate pass for performance
|
|
253
|
+
expiredKeys.forEach(key => this.reminderCache.delete(key))
|
|
254
|
+
|
|
255
|
+
return reminders
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
/**
|
|
259
|
+
* Type guard for mention reminders - centralized type checking
|
|
260
|
+
* Eliminates hardcoded type strings scattered throughout the code
|
|
261
|
+
*/
|
|
262
|
+
private isMentionReminder(reminder: ReminderMessage): boolean {
|
|
263
|
+
const mentionTypes = ['agent_mention', 'file_mention', 'ask_model_mention']
|
|
264
|
+
return mentionTypes.includes(reminder.type)
|
|
265
|
+
}
|
|
266
|
+
|
|
227
267
|
/**
|
|
228
268
|
* Generate reminders for external file changes
|
|
229
269
|
* Called when todo files are modified externally
|
|
@@ -302,9 +342,9 @@ class SystemReminderService {
|
|
|
302
342
|
// Log session startup
|
|
303
343
|
logEvent('system_reminder_session_startup', {
|
|
304
344
|
agentId: context.agentId || 'default',
|
|
305
|
-
contextKeys: Object.keys(context.context || {}),
|
|
306
|
-
messageCount: context.messages || 0,
|
|
307
|
-
timestamp: context.timestamp,
|
|
345
|
+
contextKeys: Object.keys(context.context || {}).join(','),
|
|
346
|
+
messageCount: (context.messages || 0).toString(),
|
|
347
|
+
timestamp: context.timestamp.toString(),
|
|
308
348
|
})
|
|
309
349
|
})
|
|
310
350
|
|
|
@@ -343,6 +383,40 @@ class SystemReminderService {
|
|
|
343
383
|
this.addEventListener('file:edited', context => {
|
|
344
384
|
// File edit handling
|
|
345
385
|
})
|
|
386
|
+
|
|
387
|
+
// Unified mention event handlers - eliminates code duplication
|
|
388
|
+
this.addEventListener('agent:mentioned', context => {
|
|
389
|
+
this.createMentionReminder({
|
|
390
|
+
type: 'agent_mention',
|
|
391
|
+
key: `agent_mention_${context.agentType}_${context.timestamp}`,
|
|
392
|
+
category: 'task',
|
|
393
|
+
priority: 'high',
|
|
394
|
+
content: `The user mentioned @${context.originalMention}. You MUST use the Task tool with subagent_type="${context.agentType}" to delegate this task to the specified agent. Provide a detailed, self-contained task description that fully captures the user's intent for the ${context.agentType} agent to execute.`,
|
|
395
|
+
timestamp: context.timestamp
|
|
396
|
+
})
|
|
397
|
+
})
|
|
398
|
+
|
|
399
|
+
this.addEventListener('file:mentioned', context => {
|
|
400
|
+
this.createMentionReminder({
|
|
401
|
+
type: 'file_mention',
|
|
402
|
+
key: `file_mention_${context.filePath}_${context.timestamp}`,
|
|
403
|
+
category: 'general',
|
|
404
|
+
priority: 'high',
|
|
405
|
+
content: `The user mentioned @${context.originalMention}. You MUST read the entire content of the file at path: ${context.filePath} using the Read tool to understand the full context before proceeding with the user's request.`,
|
|
406
|
+
timestamp: context.timestamp
|
|
407
|
+
})
|
|
408
|
+
})
|
|
409
|
+
|
|
410
|
+
this.addEventListener('ask-model:mentioned', context => {
|
|
411
|
+
this.createMentionReminder({
|
|
412
|
+
type: 'ask_model_mention',
|
|
413
|
+
key: `ask_model_mention_${context.modelName}_${context.timestamp}`,
|
|
414
|
+
category: 'task',
|
|
415
|
+
priority: 'high',
|
|
416
|
+
content: `The user mentioned @${context.modelName}. You MUST use the AskExpertModelTool to consult this specific model for expert opinions and analysis. Provide the user's question or context clearly to get the most relevant response from ${context.modelName}.`,
|
|
417
|
+
timestamp: context.timestamp
|
|
418
|
+
})
|
|
419
|
+
})
|
|
346
420
|
}
|
|
347
421
|
|
|
348
422
|
public addEventListener(
|
|
@@ -366,6 +440,33 @@ class SystemReminderService {
|
|
|
366
440
|
})
|
|
367
441
|
}
|
|
368
442
|
|
|
443
|
+
/**
|
|
444
|
+
* Unified mention reminder creation - eliminates duplicate logic
|
|
445
|
+
* Centralizes reminder creation with consistent deduplication
|
|
446
|
+
*/
|
|
447
|
+
private createMentionReminder(params: {
|
|
448
|
+
type: string
|
|
449
|
+
key: string
|
|
450
|
+
category: ReminderMessage['category']
|
|
451
|
+
priority: ReminderMessage['priority']
|
|
452
|
+
content: string
|
|
453
|
+
timestamp: number
|
|
454
|
+
}): void {
|
|
455
|
+
if (!this.sessionState.remindersSent.has(params.key)) {
|
|
456
|
+
this.sessionState.remindersSent.add(params.key)
|
|
457
|
+
|
|
458
|
+
const reminder = this.createReminderMessage(
|
|
459
|
+
params.type,
|
|
460
|
+
params.category,
|
|
461
|
+
params.priority,
|
|
462
|
+
params.content,
|
|
463
|
+
params.timestamp
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
this.reminderCache.set(params.key, reminder)
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
|
|
369
470
|
public resetSession(): void {
|
|
370
471
|
this.sessionState = {
|
|
371
472
|
lastTodoUpdate: 0,
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import { ModelAdapterFactory } from '../services/modelAdapterFactory'
|
|
2
|
+
import { getModelCapabilities } from '../constants/modelCapabilities'
|
|
3
|
+
import { ModelProfile } from '../utils/config'
|
|
4
|
+
|
|
5
|
+
// Test different models' adapter selection
|
|
6
|
+
const testModels: ModelProfile[] = [
|
|
7
|
+
{
|
|
8
|
+
name: 'GPT-5 Test',
|
|
9
|
+
modelName: 'gpt-5',
|
|
10
|
+
provider: 'openai',
|
|
11
|
+
apiKey: 'test-key',
|
|
12
|
+
maxTokens: 8192,
|
|
13
|
+
contextLength: 128000,
|
|
14
|
+
reasoningEffort: 'medium',
|
|
15
|
+
isActive: true,
|
|
16
|
+
createdAt: Date.now()
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
name: 'GPT-4o Test',
|
|
20
|
+
modelName: 'gpt-4o',
|
|
21
|
+
provider: 'openai',
|
|
22
|
+
apiKey: 'test-key',
|
|
23
|
+
maxTokens: 4096,
|
|
24
|
+
contextLength: 128000,
|
|
25
|
+
isActive: true,
|
|
26
|
+
createdAt: Date.now()
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
name: 'Claude Test',
|
|
30
|
+
modelName: 'claude-3-5-sonnet-20241022',
|
|
31
|
+
provider: 'anthropic',
|
|
32
|
+
apiKey: 'test-key',
|
|
33
|
+
maxTokens: 4096,
|
|
34
|
+
contextLength: 200000,
|
|
35
|
+
isActive: true,
|
|
36
|
+
createdAt: Date.now()
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
name: 'O1 Test',
|
|
40
|
+
modelName: 'o1',
|
|
41
|
+
provider: 'openai',
|
|
42
|
+
apiKey: 'test-key',
|
|
43
|
+
maxTokens: 4096,
|
|
44
|
+
contextLength: 128000,
|
|
45
|
+
isActive: true,
|
|
46
|
+
createdAt: Date.now()
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
name: 'GLM-5 Test',
|
|
50
|
+
modelName: 'glm-5',
|
|
51
|
+
provider: 'custom',
|
|
52
|
+
apiKey: 'test-key',
|
|
53
|
+
maxTokens: 8192,
|
|
54
|
+
contextLength: 128000,
|
|
55
|
+
baseURL: 'https://api.glm.ai/v1',
|
|
56
|
+
isActive: true,
|
|
57
|
+
createdAt: Date.now()
|
|
58
|
+
}
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
console.log('🧪 Testing Model Adapter System\n')
|
|
62
|
+
console.log('=' .repeat(60))
|
|
63
|
+
|
|
64
|
+
testModels.forEach(model => {
|
|
65
|
+
console.log(`\n📊 Testing: ${model.name} (${model.modelName})`)
|
|
66
|
+
console.log('-'.repeat(40))
|
|
67
|
+
|
|
68
|
+
// Get capabilities
|
|
69
|
+
const capabilities = getModelCapabilities(model.modelName)
|
|
70
|
+
console.log(` ✓ API Architecture: ${capabilities.apiArchitecture.primary}`)
|
|
71
|
+
console.log(` ✓ Fallback: ${capabilities.apiArchitecture.fallback || 'none'}`)
|
|
72
|
+
console.log(` ✓ Max Tokens Field: ${capabilities.parameters.maxTokensField}`)
|
|
73
|
+
console.log(` ✓ Tool Calling Mode: ${capabilities.toolCalling.mode}`)
|
|
74
|
+
console.log(` ✓ Supports Freeform: ${capabilities.toolCalling.supportsFreeform}`)
|
|
75
|
+
console.log(` ✓ Supports Streaming: ${capabilities.streaming.supported}`)
|
|
76
|
+
|
|
77
|
+
// Test adapter creation
|
|
78
|
+
const adapter = ModelAdapterFactory.createAdapter(model)
|
|
79
|
+
console.log(` ✓ Adapter Type: ${adapter.constructor.name}`)
|
|
80
|
+
|
|
81
|
+
// Test shouldUseResponsesAPI
|
|
82
|
+
const shouldUseResponses = ModelAdapterFactory.shouldUseResponsesAPI(model)
|
|
83
|
+
console.log(` ✓ Should Use Responses API: ${shouldUseResponses}`)
|
|
84
|
+
|
|
85
|
+
// Test with custom endpoint
|
|
86
|
+
if (model.baseURL) {
|
|
87
|
+
const customModel = { ...model, baseURL: 'https://custom.api.com/v1' }
|
|
88
|
+
const customShouldUseResponses = ModelAdapterFactory.shouldUseResponsesAPI(customModel)
|
|
89
|
+
console.log(` ✓ With Custom Endpoint: ${customShouldUseResponses ? 'Responses API' : 'Chat Completions'}`)
|
|
90
|
+
}
|
|
91
|
+
})
|
|
92
|
+
|
|
93
|
+
console.log('\n' + '='.repeat(60))
|
|
94
|
+
console.log('✅ Adapter System Test Complete!')
|
|
95
|
+
console.log('\nTo enable the new system, set USE_NEW_ADAPTERS=true')
|
|
96
|
+
console.log('To use legacy system, set USE_NEW_ADAPTERS=false')
|
|
@@ -22,7 +22,9 @@ import { debug as debugLogger } from '../../utils/debugLogger'
|
|
|
22
22
|
import { applyMarkdown } from '../../utils/markdown'
|
|
23
23
|
|
|
24
24
|
export const inputSchema = z.strictObject({
|
|
25
|
-
question: z.string().describe(
|
|
25
|
+
question: z.string().describe(
|
|
26
|
+
'COMPLETE SELF-CONTAINED QUESTION: Must include full background context, relevant details, and a clear independent question. The expert model will receive ONLY this content with no access to previous conversation or external context. Structure as: 1) Background/Context 2) Specific situation/problem 3) Clear question. Ensure the expert can fully understand and respond without needing additional information.'
|
|
27
|
+
),
|
|
26
28
|
expert_model: z
|
|
27
29
|
.string()
|
|
28
30
|
.describe(
|
|
@@ -45,29 +47,40 @@ export type Out = {
|
|
|
45
47
|
export const AskExpertModelTool = {
|
|
46
48
|
name: 'AskExpertModel',
|
|
47
49
|
async description() {
|
|
48
|
-
return
|
|
50
|
+
return "Consult external AI models for expert opinions and analysis"
|
|
49
51
|
},
|
|
50
52
|
async prompt() {
|
|
51
|
-
return `
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
53
|
+
return `Ask a question to a specific external AI model for expert analysis.
|
|
54
|
+
|
|
55
|
+
This tool allows you to consult different AI models for their unique perspectives and expertise.
|
|
56
|
+
|
|
57
|
+
CRITICAL REQUIREMENT FOR QUESTION PARAMETER:
|
|
58
|
+
The question MUST be completely self-contained and include:
|
|
59
|
+
1. FULL BACKGROUND CONTEXT - All relevant information the expert needs
|
|
60
|
+
2. SPECIFIC SITUATION - Clear description of the current scenario/problem
|
|
61
|
+
3. INDEPENDENT QUESTION - What exactly you want the expert to analyze/answer
|
|
62
|
+
|
|
63
|
+
The expert model receives ONLY your question content with NO access to:
|
|
64
|
+
- Previous conversation history (unless using existing session)
|
|
65
|
+
- Current codebase or file context
|
|
66
|
+
- User's current task or project details
|
|
67
|
+
|
|
68
|
+
IMPORTANT: This tool is for asking questions to models, not for task execution.
|
|
69
|
+
- Use when you need a specific model's opinion or analysis
|
|
70
|
+
- Use when you want to compare different models' responses
|
|
71
|
+
- Use the @ask-[model] format when available
|
|
72
|
+
|
|
73
|
+
The expert_model parameter accepts:
|
|
74
|
+
- OpenAI: gpt-4, gpt-5, o1-preview
|
|
75
|
+
- Anthropic: claude-3-5-sonnet, claude-3-opus
|
|
76
|
+
- Others: kimi, gemini-pro, mixtral
|
|
77
|
+
|
|
78
|
+
Example of well-structured question:
|
|
79
|
+
"Background: I'm working on a React TypeScript application with performance issues. The app renders a large list of 10,000 items using a simple map() function, causing UI freezing.
|
|
80
|
+
|
|
81
|
+
Current situation: Users report 3-5 second delays when scrolling through the list. The component re-renders the entire list on every state change.
|
|
82
|
+
|
|
83
|
+
Question: What are the most effective React optimization techniques for handling large lists, and how should I prioritize implementing virtualization vs memoization vs other approaches?"`
|
|
71
84
|
},
|
|
72
85
|
isReadOnly() {
|
|
73
86
|
return true
|
|
@@ -89,11 +102,12 @@ IMPORTANT: Always use the precise model name the user requested. The tool will h
|
|
|
89
102
|
question,
|
|
90
103
|
expert_model,
|
|
91
104
|
chat_session_id,
|
|
92
|
-
}): Promise<ValidationResult> {
|
|
105
|
+
}, context?: any): Promise<ValidationResult> {
|
|
93
106
|
if (!question.trim()) {
|
|
94
107
|
return { result: false, message: 'Question cannot be empty' }
|
|
95
108
|
}
|
|
96
109
|
|
|
110
|
+
|
|
97
111
|
if (!expert_model.trim()) {
|
|
98
112
|
return { result: false, message: 'Expert model must be specified' }
|
|
99
113
|
}
|
|
@@ -106,6 +120,35 @@ IMPORTANT: Always use the precise model name the user requested. The tool will h
|
|
|
106
120
|
}
|
|
107
121
|
}
|
|
108
122
|
|
|
123
|
+
// Check if trying to consult the same model we're currently running
|
|
124
|
+
try {
|
|
125
|
+
const modelManager = getModelManager()
|
|
126
|
+
|
|
127
|
+
// Get current model based on context
|
|
128
|
+
let currentModel: string
|
|
129
|
+
if (context?.agentId && context?.options?.model) {
|
|
130
|
+
// In subagent context (Task tool)
|
|
131
|
+
currentModel = context.options.model
|
|
132
|
+
} else {
|
|
133
|
+
// In main agent context or after model switch
|
|
134
|
+
currentModel = modelManager.getModelName('main') || ''
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Normalize model names for comparison
|
|
138
|
+
const normalizedExpert = expert_model.toLowerCase().replace(/[^a-z0-9]/g, '')
|
|
139
|
+
const normalizedCurrent = currentModel.toLowerCase().replace(/[^a-z0-9]/g, '')
|
|
140
|
+
|
|
141
|
+
if (normalizedExpert === normalizedCurrent) {
|
|
142
|
+
return {
|
|
143
|
+
result: false,
|
|
144
|
+
message: `You are already running as ${currentModel}. Consulting the same model would be redundant. Please choose a different model or handle the task directly.`
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
} catch (e) {
|
|
148
|
+
// If we can't determine current model, allow the request
|
|
149
|
+
debugLogger('AskExpertModel', 'Could not determine current model:', e)
|
|
150
|
+
}
|
|
151
|
+
|
|
109
152
|
// Validate that the model exists and is available
|
|
110
153
|
try {
|
|
111
154
|
const modelManager = getModelManager()
|
|
@@ -142,64 +185,72 @@ IMPORTANT: Always use the precise model name the user requested. The tool will h
|
|
|
142
185
|
) {
|
|
143
186
|
if (!question || !expert_model) return null
|
|
144
187
|
const isNewSession = chat_session_id === 'new'
|
|
145
|
-
const sessionDisplay = isNewSession ? 'new session' : chat_session_id
|
|
188
|
+
const sessionDisplay = isNewSession ? 'new session' : `session ${chat_session_id.substring(0, 5)}...`
|
|
189
|
+
const theme = getTheme()
|
|
146
190
|
|
|
147
191
|
if (verbose) {
|
|
148
|
-
const theme = getTheme()
|
|
149
192
|
return (
|
|
150
193
|
<Box flexDirection="column">
|
|
151
|
-
<Text bold color=
|
|
152
|
-
<
|
|
153
|
-
|
|
154
|
-
borderColor="green"
|
|
155
|
-
paddingX={1}
|
|
156
|
-
paddingY={0}
|
|
157
|
-
marginTop={1}
|
|
158
|
-
>
|
|
194
|
+
<Text bold color="yellow">{expert_model}</Text>
|
|
195
|
+
<Text color={theme.secondaryText}>{sessionDisplay}</Text>
|
|
196
|
+
<Box marginTop={1}>
|
|
159
197
|
<Text color={theme.text}>
|
|
160
|
-
{
|
|
198
|
+
{question.length > 300 ? question.substring(0, 300) + '...' : question}
|
|
161
199
|
</Text>
|
|
162
200
|
</Box>
|
|
163
201
|
</Box>
|
|
164
202
|
)
|
|
165
203
|
}
|
|
166
|
-
return
|
|
204
|
+
return (
|
|
205
|
+
<Box flexDirection="column">
|
|
206
|
+
<Text bold color="yellow">{expert_model} </Text>
|
|
207
|
+
<Text color={theme.secondaryText} dimColor>({sessionDisplay})</Text>
|
|
208
|
+
</Box>
|
|
209
|
+
)
|
|
167
210
|
},
|
|
168
211
|
|
|
169
|
-
renderToolResultMessage(content
|
|
212
|
+
renderToolResultMessage(content) {
|
|
213
|
+
const verbose = true // Show more content
|
|
170
214
|
const theme = getTheme()
|
|
171
215
|
|
|
172
216
|
if (typeof content === 'object' && content && 'expertAnswer' in content) {
|
|
173
217
|
const expertResult = content as Out
|
|
174
|
-
const isError = expertResult.expertAnswer.startsWith('
|
|
218
|
+
const isError = expertResult.expertAnswer.startsWith('Error') || expertResult.expertAnswer.includes('failed')
|
|
175
219
|
const isInterrupted = expertResult.chatSessionId === 'interrupted'
|
|
176
220
|
|
|
177
221
|
if (isInterrupted) {
|
|
178
222
|
return (
|
|
179
223
|
<Box flexDirection="row">
|
|
180
|
-
<Text
|
|
181
|
-
<Text color={theme.error}>[Expert consultation interrupted]</Text>
|
|
224
|
+
<Text color={theme.secondaryText}>Consultation interrupted</Text>
|
|
182
225
|
</Box>
|
|
183
226
|
)
|
|
184
227
|
}
|
|
185
228
|
|
|
186
229
|
const answerText = verbose
|
|
187
230
|
? expertResult.expertAnswer.trim()
|
|
188
|
-
: expertResult.expertAnswer.length >
|
|
189
|
-
? expertResult.expertAnswer.substring(0,
|
|
231
|
+
: expertResult.expertAnswer.length > 500
|
|
232
|
+
? expertResult.expertAnswer.substring(0, 500) + '...'
|
|
190
233
|
: expertResult.expertAnswer.trim()
|
|
191
234
|
|
|
235
|
+
if (isError) {
|
|
236
|
+
return (
|
|
237
|
+
<Box flexDirection="column">
|
|
238
|
+
<Text color="red">{answerText}</Text>
|
|
239
|
+
</Box>
|
|
240
|
+
)
|
|
241
|
+
}
|
|
242
|
+
|
|
192
243
|
return (
|
|
193
244
|
<Box flexDirection="column">
|
|
194
|
-
<
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
>
|
|
201
|
-
<Text color={
|
|
202
|
-
|
|
245
|
+
<Text bold color={theme.text}>Response from {expertResult.expertModelName}:</Text>
|
|
246
|
+
<Box marginTop={1}>
|
|
247
|
+
<Text color={theme.text}>
|
|
248
|
+
{applyMarkdown(answerText)}
|
|
249
|
+
</Text>
|
|
250
|
+
</Box>
|
|
251
|
+
<Box marginTop={1}>
|
|
252
|
+
<Text color={theme.secondaryText} dimColor>
|
|
253
|
+
Session: {expertResult.chatSessionId.substring(0, 8)}
|
|
203
254
|
</Text>
|
|
204
255
|
</Box>
|
|
205
256
|
</Box>
|
|
@@ -208,8 +259,7 @@ IMPORTANT: Always use the precise model name the user requested. The tool will h
|
|
|
208
259
|
|
|
209
260
|
return (
|
|
210
261
|
<Box flexDirection="row">
|
|
211
|
-
<Text
|
|
212
|
-
<Text color={theme.secondaryText}>Expert consultation completed</Text>
|
|
262
|
+
<Text color={theme.secondaryText}>Consultation completed</Text>
|
|
213
263
|
</Box>
|
|
214
264
|
)
|
|
215
265
|
},
|
|
@@ -314,6 +364,14 @@ ${output.expertAnswer}`
|
|
|
314
364
|
return yield* this.handleInterrupt()
|
|
315
365
|
}
|
|
316
366
|
|
|
367
|
+
// Yield progress message to show we're connecting
|
|
368
|
+
yield {
|
|
369
|
+
type: 'progress',
|
|
370
|
+
content: createAssistantMessage(
|
|
371
|
+
`Connecting to ${expertModel}... (timeout: 5 minutes)`
|
|
372
|
+
),
|
|
373
|
+
}
|
|
374
|
+
|
|
317
375
|
// Call model with comprehensive error handling and timeout
|
|
318
376
|
let response
|
|
319
377
|
try {
|
|
@@ -332,7 +390,7 @@ ${output.expertAnswer}`
|
|
|
332
390
|
})
|
|
333
391
|
|
|
334
392
|
// Create a timeout promise to prevent hanging
|
|
335
|
-
const timeoutMs =
|
|
393
|
+
const timeoutMs = 300000 // 300 seconds (5 minutes) timeout for external models
|
|
336
394
|
const timeoutPromise = new Promise((_, reject) => {
|
|
337
395
|
setTimeout(() => {
|
|
338
396
|
reject(new Error(`Expert model query timed out after ${timeoutMs/1000}s`))
|
|
@@ -369,19 +427,25 @@ ${output.expertAnswer}`
|
|
|
369
427
|
|
|
370
428
|
if (error.message?.includes('timed out')) {
|
|
371
429
|
throw new Error(
|
|
372
|
-
`Expert model '${expertModel}' timed out
|
|
430
|
+
`Expert model '${expertModel}' timed out after 5 minutes.\n\n` +
|
|
431
|
+
`Suggestions:\n` +
|
|
432
|
+
` - The model might be experiencing high load\n` +
|
|
433
|
+
` - Try a different model or retry later\n` +
|
|
434
|
+
` - Consider breaking down your question into smaller parts`,
|
|
373
435
|
)
|
|
374
436
|
}
|
|
375
437
|
|
|
376
438
|
if (error.message?.includes('rate limit')) {
|
|
377
439
|
throw new Error(
|
|
378
|
-
|
|
440
|
+
`Rate limit exceeded for ${expertModel}.\n\n` +
|
|
441
|
+
`Please wait a moment and try again, or use a different model.`,
|
|
379
442
|
)
|
|
380
443
|
}
|
|
381
444
|
|
|
382
445
|
if (error.message?.includes('invalid api key')) {
|
|
383
446
|
throw new Error(
|
|
384
|
-
|
|
447
|
+
`Invalid API key for ${expertModel}.\n\n` +
|
|
448
|
+
`Please check your model configuration with /model command.`,
|
|
385
449
|
)
|
|
386
450
|
}
|
|
387
451
|
|
|
@@ -38,35 +38,8 @@ export type Out = {
|
|
|
38
38
|
|
|
39
39
|
export const BashTool = {
|
|
40
40
|
name: 'Bash',
|
|
41
|
-
async description(
|
|
42
|
-
|
|
43
|
-
const result = await queryQuick({
|
|
44
|
-
systemPrompt: [
|
|
45
|
-
`You are a command description generator. Write a clear, concise description of what this command does in 5-10 words. Examples:
|
|
46
|
-
|
|
47
|
-
Input: ls
|
|
48
|
-
Output: Lists files in current directory
|
|
49
|
-
|
|
50
|
-
Input: git status
|
|
51
|
-
Output: Shows working tree status
|
|
52
|
-
|
|
53
|
-
Input: npm install
|
|
54
|
-
Output: Installs package dependencies
|
|
55
|
-
|
|
56
|
-
Input: mkdir foo
|
|
57
|
-
Output: Creates directory 'foo'`,
|
|
58
|
-
],
|
|
59
|
-
userPrompt: `Describe this command: ${command}`,
|
|
60
|
-
})
|
|
61
|
-
const description =
|
|
62
|
-
result.message.content[0]?.type === 'text'
|
|
63
|
-
? result.message.content[0].text
|
|
64
|
-
: null
|
|
65
|
-
return description || 'Executes a bash command'
|
|
66
|
-
} catch (error) {
|
|
67
|
-
logError(error)
|
|
68
|
-
return 'Executes a bash command'
|
|
69
|
-
}
|
|
41
|
+
async description() {
|
|
42
|
+
return 'Executes shell commands on your computer'
|
|
70
43
|
},
|
|
71
44
|
async prompt() {
|
|
72
45
|
const config = getGlobalConfig()
|
|
@@ -149,8 +122,8 @@ export const BashTool = {
|
|
|
149
122
|
return <FallbackToolUseRejectedMessage />
|
|
150
123
|
},
|
|
151
124
|
|
|
152
|
-
renderToolResultMessage(content
|
|
153
|
-
return <BashToolResultMessage content={content} verbose={
|
|
125
|
+
renderToolResultMessage(content) {
|
|
126
|
+
return <BashToolResultMessage content={content} verbose={false} />
|
|
154
127
|
},
|
|
155
128
|
renderResultForAssistant({ interrupted, stdout, stderr }) {
|
|
156
129
|
let errorMessage = stderr.trim()
|
|
@@ -9,7 +9,7 @@ type Props = {
|
|
|
9
9
|
verbose: boolean
|
|
10
10
|
}
|
|
11
11
|
|
|
12
|
-
function BashToolResultMessage({ content, verbose }: Props): JSX.Element {
|
|
12
|
+
function BashToolResultMessage({ content, verbose }: Props): React.JSX.Element {
|
|
13
13
|
const { stdout, stdoutLines, stderr, stderrLines } = content
|
|
14
14
|
|
|
15
15
|
return (
|
|
@@ -47,10 +47,8 @@ export const FileEditTool = {
|
|
|
47
47
|
return DESCRIPTION
|
|
48
48
|
},
|
|
49
49
|
inputSchema,
|
|
50
|
-
userFacingName(
|
|
51
|
-
|
|
52
|
-
if (new_string === '') return 'Delete'
|
|
53
|
-
return 'Update'
|
|
50
|
+
userFacingName() {
|
|
51
|
+
return 'Edit'
|
|
54
52
|
},
|
|
55
53
|
async isEnabled() {
|
|
56
54
|
return true
|
|
@@ -67,7 +65,8 @@ export const FileEditTool = {
|
|
|
67
65
|
renderToolUseMessage(input, { verbose }) {
|
|
68
66
|
return `file_path: ${verbose ? input.file_path : relative(getCwd(), input.file_path)}`
|
|
69
67
|
},
|
|
70
|
-
renderToolResultMessage({ filePath, structuredPatch }
|
|
68
|
+
renderToolResultMessage({ filePath, structuredPatch }) {
|
|
69
|
+
const verbose = false // Set default value for verbose
|
|
71
70
|
return (
|
|
72
71
|
<FileEditToolUpdatedMessage
|
|
73
72
|
filePath={filePath}
|