@shareai-lab/kode 1.0.71 → 1.0.75

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/README.md +160 -1
  2. package/README.zh-CN.md +65 -1
  3. package/cli.js +5 -10
  4. package/package.json +6 -2
  5. package/src/ProjectOnboarding.tsx +47 -29
  6. package/src/Tool.ts +33 -4
  7. package/src/commands/agents.tsx +3401 -0
  8. package/src/commands/help.tsx +2 -2
  9. package/src/commands/resume.tsx +2 -1
  10. package/src/commands/terminalSetup.ts +4 -4
  11. package/src/commands.ts +3 -0
  12. package/src/components/ApproveApiKey.tsx +1 -1
  13. package/src/components/Config.tsx +10 -6
  14. package/src/components/ConsoleOAuthFlow.tsx +5 -4
  15. package/src/components/CustomSelect/select-option.tsx +28 -2
  16. package/src/components/CustomSelect/select.tsx +14 -5
  17. package/src/components/CustomSelect/theme.ts +45 -0
  18. package/src/components/Help.tsx +4 -4
  19. package/src/components/InvalidConfigDialog.tsx +1 -1
  20. package/src/components/LogSelector.tsx +1 -1
  21. package/src/components/MCPServerApprovalDialog.tsx +1 -1
  22. package/src/components/Message.tsx +2 -0
  23. package/src/components/ModelListManager.tsx +10 -6
  24. package/src/components/ModelSelector.tsx +201 -23
  25. package/src/components/ModelStatusDisplay.tsx +7 -5
  26. package/src/components/PromptInput.tsx +146 -96
  27. package/src/components/SentryErrorBoundary.ts +9 -3
  28. package/src/components/StickerRequestForm.tsx +16 -0
  29. package/src/components/StructuredDiff.tsx +36 -29
  30. package/src/components/TextInput.tsx +13 -0
  31. package/src/components/TodoItem.tsx +47 -0
  32. package/src/components/TrustDialog.tsx +1 -1
  33. package/src/components/messages/AssistantLocalCommandOutputMessage.tsx +5 -1
  34. package/src/components/messages/AssistantToolUseMessage.tsx +14 -4
  35. package/src/components/messages/TaskProgressMessage.tsx +32 -0
  36. package/src/components/messages/TaskToolMessage.tsx +58 -0
  37. package/src/components/permissions/FallbackPermissionRequest.tsx +2 -4
  38. package/src/components/permissions/FileEditPermissionRequest/FileEditPermissionRequest.tsx +1 -1
  39. package/src/components/permissions/FileEditPermissionRequest/FileEditToolDiff.tsx +5 -3
  40. package/src/components/permissions/FileWritePermissionRequest/FileWritePermissionRequest.tsx +1 -1
  41. package/src/components/permissions/FileWritePermissionRequest/FileWriteToolDiff.tsx +5 -3
  42. package/src/components/permissions/FilesystemPermissionRequest/FilesystemPermissionRequest.tsx +2 -4
  43. package/src/components/permissions/PermissionRequest.tsx +3 -5
  44. package/src/constants/macros.ts +2 -0
  45. package/src/constants/modelCapabilities.ts +179 -0
  46. package/src/constants/models.ts +90 -0
  47. package/src/constants/product.ts +1 -1
  48. package/src/context.ts +7 -7
  49. package/src/entrypoints/cli.tsx +23 -3
  50. package/src/entrypoints/mcp.ts +10 -10
  51. package/src/hooks/useCanUseTool.ts +1 -1
  52. package/src/hooks/useTextInput.ts +5 -2
  53. package/src/hooks/useUnifiedCompletion.ts +1405 -0
  54. package/src/messages.ts +1 -0
  55. package/src/query.ts +3 -0
  56. package/src/screens/ConfigureNpmPrefix.tsx +1 -1
  57. package/src/screens/Doctor.tsx +1 -1
  58. package/src/screens/REPL.tsx +11 -12
  59. package/src/services/adapters/base.ts +38 -0
  60. package/src/services/adapters/chatCompletions.ts +90 -0
  61. package/src/services/adapters/responsesAPI.ts +170 -0
  62. package/src/services/claude.ts +198 -62
  63. package/src/services/customCommands.ts +43 -22
  64. package/src/services/gpt5ConnectionTest.ts +340 -0
  65. package/src/services/mcpClient.ts +1 -1
  66. package/src/services/mentionProcessor.ts +273 -0
  67. package/src/services/modelAdapterFactory.ts +69 -0
  68. package/src/services/openai.ts +534 -14
  69. package/src/services/responseStateManager.ts +90 -0
  70. package/src/services/systemReminder.ts +113 -12
  71. package/src/test/testAdapters.ts +96 -0
  72. package/src/tools/AskExpertModelTool/AskExpertModelTool.tsx +120 -56
  73. package/src/tools/BashTool/BashTool.tsx +4 -31
  74. package/src/tools/BashTool/BashToolResultMessage.tsx +1 -1
  75. package/src/tools/BashTool/OutputLine.tsx +1 -0
  76. package/src/tools/FileEditTool/FileEditTool.tsx +4 -5
  77. package/src/tools/FileReadTool/FileReadTool.tsx +43 -10
  78. package/src/tools/MCPTool/MCPTool.tsx +2 -1
  79. package/src/tools/MultiEditTool/MultiEditTool.tsx +2 -2
  80. package/src/tools/NotebookReadTool/NotebookReadTool.tsx +15 -23
  81. package/src/tools/StickerRequestTool/StickerRequestTool.tsx +1 -1
  82. package/src/tools/TaskTool/TaskTool.tsx +170 -86
  83. package/src/tools/TaskTool/prompt.ts +61 -25
  84. package/src/tools/ThinkTool/ThinkTool.tsx +1 -3
  85. package/src/tools/TodoWriteTool/TodoWriteTool.tsx +65 -41
  86. package/src/tools/lsTool/lsTool.tsx +5 -2
  87. package/src/tools.ts +16 -16
  88. package/src/types/conversation.ts +51 -0
  89. package/src/types/logs.ts +58 -0
  90. package/src/types/modelCapabilities.ts +64 -0
  91. package/src/types/notebook.ts +87 -0
  92. package/src/utils/advancedFuzzyMatcher.ts +290 -0
  93. package/src/utils/agentLoader.ts +284 -0
  94. package/src/utils/ask.tsx +1 -0
  95. package/src/utils/commands.ts +1 -1
  96. package/src/utils/commonUnixCommands.ts +161 -0
  97. package/src/utils/config.ts +173 -2
  98. package/src/utils/conversationRecovery.ts +1 -0
  99. package/src/utils/debugLogger.ts +13 -13
  100. package/src/utils/exampleCommands.ts +1 -0
  101. package/src/utils/fuzzyMatcher.ts +328 -0
  102. package/src/utils/messages.tsx +6 -5
  103. package/src/utils/model.ts +120 -42
  104. package/src/utils/responseState.ts +23 -0
  105. package/src/utils/secureFile.ts +559 -0
  106. package/src/utils/terminal.ts +1 -0
  107. package/src/utils/theme.ts +11 -0
  108. package/src/hooks/useSlashCommandTypeahead.ts +0 -137
@@ -38,35 +38,8 @@ export type Out = {
38
38
 
39
39
  export const BashTool = {
40
40
  name: 'Bash',
41
- async description({ command }) {
42
- try {
43
- const result = await queryQuick({
44
- systemPrompt: [
45
- `You are a command description generator. Write a clear, concise description of what this command does in 5-10 words. Examples:
46
-
47
- Input: ls
48
- Output: Lists files in current directory
49
-
50
- Input: git status
51
- Output: Shows working tree status
52
-
53
- Input: npm install
54
- Output: Installs package dependencies
55
-
56
- Input: mkdir foo
57
- Output: Creates directory 'foo'`,
58
- ],
59
- userPrompt: `Describe this command: ${command}`,
60
- })
61
- const description =
62
- result.message.content[0]?.type === 'text'
63
- ? result.message.content[0].text
64
- : null
65
- return description || 'Executes a bash command'
66
- } catch (error) {
67
- logError(error)
68
- return 'Executes a bash command'
69
- }
41
+ async description() {
42
+ return 'Executes shell commands on your computer'
70
43
  },
71
44
  async prompt() {
72
45
  const config = getGlobalConfig()
@@ -149,8 +122,8 @@ export const BashTool = {
149
122
  return <FallbackToolUseRejectedMessage />
150
123
  },
151
124
 
152
- renderToolResultMessage(content, { verbose }) {
153
- return <BashToolResultMessage content={content} verbose={verbose} />
125
+ renderToolResultMessage(content) {
126
+ return <BashToolResultMessage content={content} verbose={false} />
154
127
  },
155
128
  renderResultForAssistant({ interrupted, stdout, stderr }) {
156
129
  let errorMessage = stderr.trim()
@@ -9,7 +9,7 @@ type Props = {
9
9
  verbose: boolean
10
10
  }
11
11
 
12
- function BashToolResultMessage({ content, verbose }: Props): JSX.Element {
12
+ function BashToolResultMessage({ content, verbose }: Props): React.JSX.Element {
13
13
  const { stdout, stdoutLines, stderr, stderrLines } = content
14
14
 
15
15
  return (
@@ -30,6 +30,7 @@ export function OutputLine({
30
30
  lines: number
31
31
  verbose: boolean
32
32
  isError?: boolean
33
+ key?: React.Key
33
34
  }) {
34
35
  return (
35
36
  <Box justifyContent="space-between" width="100%">
@@ -47,10 +47,8 @@ export const FileEditTool = {
47
47
  return DESCRIPTION
48
48
  },
49
49
  inputSchema,
50
- userFacingName({ old_string, new_string }) {
51
- if (old_string === '') return 'Create'
52
- if (new_string === '') return 'Delete'
53
- return 'Update'
50
+ userFacingName() {
51
+ return 'Edit'
54
52
  },
55
53
  async isEnabled() {
56
54
  return true
@@ -67,7 +65,8 @@ export const FileEditTool = {
67
65
  renderToolUseMessage(input, { verbose }) {
68
66
  return `file_path: ${verbose ? input.file_path : relative(getCwd(), input.file_path)}`
69
67
  },
70
- renderToolResultMessage({ filePath, structuredPatch }, { verbose }) {
68
+ renderToolResultMessage({ filePath, structuredPatch }) {
69
+ const verbose = false // Set default value for verbose
71
70
  return (
72
71
  <FileEditToolUpdatedMessage
73
72
  filePath={filePath}
@@ -1,8 +1,8 @@
1
1
  import { ImageBlockParam } from '@anthropic-ai/sdk/resources/index.mjs'
2
- import { existsSync, readFileSync, statSync } from 'fs'
2
+ import { statSync } from 'node:fs'
3
3
  import { Box, Text } from 'ink'
4
- import * as path from 'path'
5
- import { extname, relative } from 'path'
4
+ import * as path from 'node:path'
5
+ import { extname, relative } from 'node:path'
6
6
  import * as React from 'react'
7
7
  import { z } from 'zod'
8
8
  import { FallbackToolUseRejectedMessage } from '../../components/FallbackToolUseRejectedMessage'
@@ -24,6 +24,7 @@ import {
24
24
  } from '../../services/fileFreshness'
25
25
  import { DESCRIPTION, PROMPT } from './prompt'
26
26
  import { hasReadPermission } from '../../utils/permissions/filesystem'
27
+ import { secureFileService } from '../../utils/secureFile'
27
28
 
28
29
  const MAX_LINES_TO_RENDER = 5
29
30
  const MAX_OUTPUT_SIZE = 0.25 * 1024 * 1024 // 0.25MB in bytes
@@ -93,7 +94,8 @@ export const FileReadTool = {
93
94
  .map(([key, value]) => `${key}: ${JSON.stringify(value)}`)
94
95
  .join(', ')
95
96
  },
96
- renderToolResultMessage(output, { verbose }) {
97
+ renderToolResultMessage(output) {
98
+ const verbose = false // Set default value for verbose
97
99
  // TODO: Render recursively
98
100
  switch (output.type) {
99
101
  case 'image':
@@ -143,7 +145,9 @@ export const FileReadTool = {
143
145
  async validateInput({ file_path, offset, limit }) {
144
146
  const fullFilePath = normalizeFilePath(file_path)
145
147
 
146
- if (!existsSync(fullFilePath)) {
148
+ // Use secure file service to check if file exists and get file info
149
+ const fileCheck = secureFileService.safeGetFileInfo(fullFilePath)
150
+ if (!fileCheck.success) {
147
151
  // Try to find a similar file with a different extension
148
152
  const similarFilename = findSimilarFile(fullFilePath)
149
153
  let message = 'File does not exist.'
@@ -159,8 +163,7 @@ export const FileReadTool = {
159
163
  }
160
164
  }
161
165
 
162
- // Get file stats to check size
163
- const stats = statSync(fullFilePath)
166
+ const stats = fileCheck.stats!
164
167
  const fileSize = stats.size
165
168
  const ext = path.extname(fullFilePath).toLowerCase()
166
169
 
@@ -315,7 +318,18 @@ async function readImage(
315
318
  const sharp = (
316
319
  (await import('sharp')) as unknown as { default: typeof import('sharp') }
317
320
  ).default
318
- const image = sharp(readFileSync(filePath))
321
+
322
+ // Use secure file service to read the file
323
+ const fileReadResult = secureFileService.safeReadFile(filePath, {
324
+ encoding: 'buffer' as BufferEncoding,
325
+ maxFileSize: MAX_IMAGE_SIZE
326
+ })
327
+
328
+ if (!fileReadResult.success) {
329
+ throw new Error(`Failed to read image file: ${fileReadResult.error}`)
330
+ }
331
+
332
+ const image = sharp(fileReadResult.content as Buffer)
319
333
  const metadata = await image.metadata()
320
334
 
321
335
  if (!metadata.width || !metadata.height) {
@@ -335,7 +349,17 @@ async function readImage(
335
349
  width <= MAX_WIDTH &&
336
350
  height <= MAX_HEIGHT
337
351
  ) {
338
- return createImageResponse(readFileSync(filePath), ext)
352
+ // Use secure file service to read the file
353
+ const fileReadResult = secureFileService.safeReadFile(filePath, {
354
+ encoding: 'buffer' as BufferEncoding,
355
+ maxFileSize: MAX_IMAGE_SIZE
356
+ })
357
+
358
+ if (!fileReadResult.success) {
359
+ throw new Error(`Failed to read image file: ${fileReadResult.error}`)
360
+ }
361
+
362
+ return createImageResponse(fileReadResult.content as Buffer, ext)
339
363
  }
340
364
 
341
365
  if (width > MAX_WIDTH) {
@@ -366,6 +390,15 @@ async function readImage(
366
390
  } catch (e) {
367
391
  logError(e)
368
392
  // If any error occurs during processing, return original image
369
- return createImageResponse(readFileSync(filePath), ext)
393
+ const fileReadResult = secureFileService.safeReadFile(filePath, {
394
+ encoding: 'buffer' as BufferEncoding,
395
+ maxFileSize: MAX_IMAGE_SIZE
396
+ })
397
+
398
+ if (!fileReadResult.success) {
399
+ throw new Error(`Failed to read image file: ${fileReadResult.error}`)
400
+ }
401
+
402
+ return createImageResponse(fileReadResult.content as Buffer, ext)
370
403
  }
371
404
  }
@@ -52,7 +52,8 @@ export const MCPTool = {
52
52
  renderToolUseRejectedMessage() {
53
53
  return <FallbackToolUseRejectedMessage />
54
54
  },
55
- renderToolResultMessage(output, { verbose }) {
55
+ renderToolResultMessage(output) {
56
+ const verbose = false // Set default value for verbose
56
57
  if (Array.isArray(output)) {
57
58
  return (
58
59
  <Box flexDirection="column">
@@ -58,8 +58,8 @@ export const MultiEditTool = {
58
58
  return PROMPT
59
59
  },
60
60
  inputSchema,
61
- userFacingName({ edits }) {
62
- return `Multi-Edit (${edits.length} changes)`
61
+ userFacingName() {
62
+ return 'Multi-Edit'
63
63
  },
64
64
  async isEnabled() {
65
65
  return true
@@ -18,7 +18,7 @@ import {
18
18
  NotebookCellSourceOutput,
19
19
  NotebookCellOutput,
20
20
  NotebookCellType,
21
- } from '../../types/notebook.js'
21
+ } from '../../types/notebook'
22
22
  import { formatOutput } from '../BashTool/utils'
23
23
  import { getCwd } from '../../utils/state'
24
24
  import { findSimilarFile } from '../../utils/file'
@@ -36,26 +36,6 @@ const inputSchema = z.strictObject({
36
36
  type In = typeof inputSchema
37
37
  type Out = NotebookCellSource[]
38
38
 
39
- function renderResultForAssistant(data: NotebookCellSource[]) {
40
- const allResults = data.flatMap(getToolResultFromCell)
41
-
42
- // Merge adjacent text blocks
43
- return allResults.reduce<(TextBlockParam | ImageBlockParam)[]>(
44
- (acc, curr) => {
45
- if (acc.length === 0) return [curr]
46
-
47
- const prev = acc[acc.length - 1]
48
- if (prev && prev.type === 'text' && curr.type === 'text') {
49
- // Merge the text blocks
50
- prev.text += '\n' + curr.text
51
- return acc
52
- }
53
-
54
- return [...acc, curr]
55
- },
56
- [],
57
- )
58
- }
59
39
 
60
40
  export const NotebookReadTool = {
61
41
  name: 'ReadNotebook',
@@ -141,11 +121,23 @@ export const NotebookReadTool = {
141
121
 
142
122
  yield {
143
123
  type: 'result',
144
- resultForAssistant: renderResultForAssistant(cells),
124
+ resultForAssistant: this.renderResultForAssistant(cells),
145
125
  data: cells,
146
126
  }
147
127
  },
148
- renderResultForAssistant,
128
+ renderResultForAssistant(data: NotebookCellSource[]) {
129
+ // Convert the complex structure to a string representation for the assistant
130
+ return data.map((cell, index) => {
131
+ let content = `Cell ${index + 1} (${cell.cellType}):\n${cell.source}`
132
+ if (cell.outputs && cell.outputs.length > 0) {
133
+ const outputText = cell.outputs.map(output => output.text).filter(Boolean).join('\n')
134
+ if (outputText) {
135
+ content += `\nOutput:\n${outputText}`
136
+ }
137
+ }
138
+ return content
139
+ }).join('\n\n')
140
+ },
149
141
  } satisfies Tool<In, Out>
150
142
 
151
143
  function processOutputText(text: string | string[] | undefined): string {
@@ -6,7 +6,7 @@ import { DESCRIPTION, PROMPT } from './prompt'
6
6
  import {
7
7
  StickerRequestForm,
8
8
  FormData,
9
- } from '../../components/StickerRequestForm.js'
9
+ } from '../../components/StickerRequestForm'
10
10
  import { checkGate, logEvent } from '../../services/statsig'
11
11
  import { getTheme } from '../../utils/theme'
12
12
 
@@ -2,7 +2,7 @@ import { TextBlock } from '@anthropic-ai/sdk/resources/index.mjs'
2
2
  import chalk from 'chalk'
3
3
  import { last, memoize } from 'lodash-es'
4
4
  import { EOL } from 'os'
5
- import * as React from 'react'
5
+ import React, { useState, useEffect } from 'react'
6
6
  import { Box, Text } from 'ink'
7
7
  import { z } from 'zod'
8
8
  import { Tool, ValidationResult } from '../../Tool'
@@ -32,6 +32,7 @@ import { generateAgentId } from '../../utils/agentStorage'
32
32
  import { debug as debugLogger } from '../../utils/debugLogger'
33
33
  import { getTaskTools, getPrompt } from './prompt'
34
34
  import { TOOL_NAME } from './constants'
35
+ import { getActiveAgents, getAgentByType, getAvailableAgentTypes } from '../../utils/agentLoader'
35
36
 
36
37
  const inputSchema = z.object({
37
38
  description: z
@@ -44,46 +45,28 @@ const inputSchema = z.object({
44
45
  .describe(
45
46
  'Optional: Specific model name to use for this task. If not provided, uses the default task model pointer.',
46
47
  ),
48
+ subagent_type: z
49
+ .string()
50
+ .optional()
51
+ .describe(
52
+ 'The type of specialized agent to use for this task',
53
+ ),
47
54
  })
48
55
 
49
56
  export const TaskTool = {
50
57
  async prompt({ safeMode }) {
58
+ // Match original Claude Code - prompt returns full agent descriptions
51
59
  return await getPrompt(safeMode)
52
60
  },
53
61
  name: TOOL_NAME,
54
62
  async description() {
55
- const modelManager = getModelManager()
56
- const availableModels = modelManager.getAllAvailableModelNames()
57
- const currentTaskModel =
58
- modelManager.getModelName('task') || '<Not configured>'
59
-
60
- if (availableModels.length === 0) {
61
- return `Launch a new agent to handle complex, multi-step tasks autonomously.
62
-
63
- ⚠️ No models configured. Use /model to configure models first.
64
-
65
- Usage: Provide detailed task description for autonomous execution. The agent will return results in a single response.`
66
- }
67
-
68
- return `Launch a new agent to handle complex, multi-step tasks autonomously.
69
-
70
- Available models: ${availableModels.join(', ')}
71
-
72
- When to specify a model_name:
73
- - Specify model_name for tasks requiring specific model capabilities
74
- - If not provided, uses current task default: '${currentTaskModel}'
75
- - Use reasoning models for complex analysis
76
- - Use quick models for simple operations
77
-
78
- The model_name parameter accepts actual model names (like 'claude-3-5-sonnet-20241022', 'gpt-4', etc.)
79
-
80
- Usage: Provide detailed task description for autonomous execution. The agent will return results in a single response.`
63
+ // Match original Claude Code exactly - simple description
64
+ return "Launch a new task"
81
65
  },
82
66
  inputSchema,
83
67
 
84
- // 🔧 ULTRA FIX: Complete revert to original AgentTool pattern
85
68
  async *call(
86
- { description, prompt, model_name },
69
+ { description, prompt, model_name, subagent_type },
87
70
  {
88
71
  abortController,
89
72
  options: { safeMode = false, forkNumber, messageLogName, verbose },
@@ -91,14 +74,72 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
91
74
  },
92
75
  ) {
93
76
  const startTime = Date.now()
94
- const messages: MessageType[] = [createUserMessage(prompt)]
95
- const tools = await getTaskTools(safeMode)
77
+
78
+ // Default to general-purpose if no subagent_type specified
79
+ const agentType = subagent_type || 'general-purpose'
80
+
81
+ // Apply subagent configuration
82
+ let effectivePrompt = prompt
83
+ let effectiveModel = model_name || 'task'
84
+ let toolFilter = null
85
+ let temperature = undefined
86
+
87
+ // Load agent configuration dynamically
88
+ if (agentType) {
89
+ const agentConfig = await getAgentByType(agentType)
90
+
91
+ if (!agentConfig) {
92
+ // If agent type not found, return helpful message instead of throwing
93
+ const availableTypes = await getAvailableAgentTypes()
94
+ const helpMessage = `Agent type '${agentType}' not found.\n\nAvailable agents:\n${availableTypes.map(t => ` • ${t}`).join('\n')}\n\nUse /agents command to manage agent configurations.`
95
+
96
+ yield {
97
+ type: 'result',
98
+ data: { error: helpMessage },
99
+ resultForAssistant: helpMessage,
100
+ }
101
+ return
102
+ }
103
+
104
+ // Apply system prompt if configured
105
+ if (agentConfig.systemPrompt) {
106
+ effectivePrompt = `${agentConfig.systemPrompt}\n\n${prompt}`
107
+ }
108
+
109
+ // Apply model if not overridden by model_name parameter
110
+ if (!model_name && agentConfig.model_name) {
111
+ // Support inherit: keep pointer-based default
112
+ if (agentConfig.model_name !== 'inherit') {
113
+ effectiveModel = agentConfig.model_name as string
114
+ }
115
+ }
116
+
117
+ // Store tool filter for later application
118
+ toolFilter = agentConfig.tools
119
+
120
+ // Note: temperature is not currently in our agent configs
121
+ // but could be added in the future
122
+ }
123
+
124
+ const messages: MessageType[] = [createUserMessage(effectivePrompt)]
125
+ let tools = await getTaskTools(safeMode)
126
+
127
+ // Apply tool filtering if specified by subagent config
128
+ if (toolFilter) {
129
+ // Back-compat: ['*'] means all tools
130
+ const isAllArray = Array.isArray(toolFilter) && toolFilter.length === 1 && toolFilter[0] === '*'
131
+ if (toolFilter === '*' || isAllArray) {
132
+ // no-op, keep all tools
133
+ } else if (Array.isArray(toolFilter)) {
134
+ tools = tools.filter(tool => toolFilter.includes(tool.name))
135
+ }
136
+ }
96
137
 
97
138
  // We yield an initial message immediately so the UI
98
139
  // doesn't move around when messages start streaming back.
99
140
  yield {
100
141
  type: 'progress',
101
- content: createAssistantMessage(chalk.dim('Initializing…')),
142
+ content: createAssistantMessage(chalk.dim(`[${agentType}] ${description}`)),
102
143
  normalizedMessages: normalizeMessages(messages),
103
144
  tools,
104
145
  }
@@ -109,8 +150,8 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
109
150
  getMaxThinkingTokens(messages),
110
151
  ])
111
152
 
112
- // Simple model resolution - match original AgentTool pattern
113
- const modelToUse = model_name || 'task'
153
+ // Model already resolved in effectiveModel variable above
154
+ const modelToUse = effectiveModel
114
155
 
115
156
  // Inject model context to prevent self-referential expert consultations
116
157
  taskPrompt.push(`\nIMPORTANT: You are currently running as ${modelToUse}. You do not need to consult ${modelToUse} via AskExpertModel since you ARE ${modelToUse}. Complete tasks directly using your capabilities.`)
@@ -125,6 +166,23 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
125
166
  const taskId = generateAgentId()
126
167
 
127
168
  // 🔧 ULTRA SIMPLIFIED: Exact original AgentTool pattern
169
+ // Build query options, adding temperature if specified
170
+ const queryOptions = {
171
+ safeMode,
172
+ forkNumber,
173
+ messageLogName,
174
+ tools,
175
+ commands: [],
176
+ verbose,
177
+ maxThinkingTokens,
178
+ model: modelToUse,
179
+ }
180
+
181
+ // Add temperature if specified by subagent config
182
+ if (temperature !== undefined) {
183
+ queryOptions['temperature'] = temperature
184
+ }
185
+
128
186
  for await (const message of query(
129
187
  messages,
130
188
  taskPrompt,
@@ -132,16 +190,7 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
132
190
  hasPermissionsToUseTool,
133
191
  {
134
192
  abortController,
135
- options: {
136
- safeMode,
137
- forkNumber,
138
- messageLogName,
139
- tools,
140
- commands: [],
141
- verbose,
142
- maxThinkingTokens,
143
- model: modelToUse,
144
- },
193
+ options: queryOptions,
145
194
  messageId: getLastAssistantMessageId(messages),
146
195
  agentId: taskId,
147
196
  readFileTimestamps,
@@ -159,22 +208,55 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
159
208
  }
160
209
 
161
210
  const normalizedMessages = normalizeMessages(messages)
211
+
212
+ // Process tool uses and text content for better visibility
162
213
  for (const content of message.message.content) {
163
- if (content.type !== 'tool_use') {
164
- continue
165
- }
166
-
167
- toolUseCount++
168
- yield {
169
- type: 'progress',
170
- content: normalizedMessages.find(
214
+ if (content.type === 'text' && content.text && content.text !== INTERRUPT_MESSAGE) {
215
+ // Show agent's reasoning/responses
216
+ const preview = content.text.length > 200 ? content.text.substring(0, 200) + '...' : content.text
217
+ yield {
218
+ type: 'progress',
219
+ content: createAssistantMessage(`[${agentType}] ${preview}`),
220
+ normalizedMessages,
221
+ tools,
222
+ }
223
+ } else if (content.type === 'tool_use') {
224
+ toolUseCount++
225
+
226
+ // Show which tool is being used with agent context
227
+ const toolMessage = normalizedMessages.find(
171
228
  _ =>
172
229
  _.type === 'assistant' &&
173
230
  _.message.content[0]?.type === 'tool_use' &&
174
231
  _.message.content[0].id === content.id,
175
- ) as AssistantMessage,
176
- normalizedMessages,
177
- tools,
232
+ ) as AssistantMessage
233
+
234
+ if (toolMessage) {
235
+ // Clone and modify the message to show agent context
236
+ const modifiedMessage = {
237
+ ...toolMessage,
238
+ message: {
239
+ ...toolMessage.message,
240
+ content: toolMessage.message.content.map(c => {
241
+ if (c.type === 'tool_use' && c.id === content.id) {
242
+ // Add agent context to tool name display
243
+ return {
244
+ ...c,
245
+ name: c.name // Keep original name, UI will handle display
246
+ }
247
+ }
248
+ return c
249
+ })
250
+ }
251
+ }
252
+
253
+ yield {
254
+ type: 'progress',
255
+ content: modifiedMessage,
256
+ normalizedMessages,
257
+ tools,
258
+ }
259
+ }
178
260
  }
179
261
  }
180
262
  }
@@ -210,7 +292,7 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
210
292
  ]
211
293
  yield {
212
294
  type: 'progress',
213
- content: createAssistantMessage(`Done (${result.join(' · ')})`),
295
+ content: createAssistantMessage(`[${agentType}] Completed (${result.join(' · ')})`),
214
296
  normalizedMessages,
215
297
  tools,
216
298
  }
@@ -265,41 +347,59 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
265
347
  }
266
348
  }
267
349
 
350
+ // Validate subagent_type if provided
351
+ if (input.subagent_type) {
352
+ const availableTypes = await getAvailableAgentTypes()
353
+ if (!availableTypes.includes(input.subagent_type)) {
354
+ return {
355
+ result: false,
356
+ message: `Agent type '${input.subagent_type}' does not exist. Available types: ${availableTypes.join(', ')}`,
357
+ meta: {
358
+ subagent_type: input.subagent_type,
359
+ availableTypes,
360
+ },
361
+ }
362
+ }
363
+ }
364
+
268
365
  return { result: true }
269
366
  },
270
367
  async isEnabled() {
271
368
  return true
272
369
  },
273
- userFacingName() {
274
- return 'Task'
370
+ userFacingName(input?: any) {
371
+ // Return agent name with proper prefix
372
+ const agentType = input?.subagent_type || 'general-purpose'
373
+ return `agent-${agentType}`
275
374
  },
276
375
  needsPermissions() {
277
376
  return false
278
377
  },
279
- renderResultForAssistant(data) {
280
- return data
378
+ renderResultForAssistant(data: TextBlock[]) {
379
+ return data.map(block => block.type === 'text' ? block.text : '').join('\n')
281
380
  },
282
- renderToolUseMessage({ description, prompt, model_name }, { verbose }) {
381
+ renderToolUseMessage({ description, prompt, model_name, subagent_type }, { verbose }) {
283
382
  if (!description || !prompt) return null
284
383
 
285
384
  const modelManager = getModelManager()
286
385
  const defaultTaskModel = modelManager.getModelName('task')
287
386
  const actualModel = model_name || defaultTaskModel
387
+ const agentType = subagent_type || 'general-purpose'
288
388
  const promptPreview =
289
389
  prompt.length > 80 ? prompt.substring(0, 80) + '...' : prompt
290
390
 
391
+ const theme = getTheme()
392
+
291
393
  if (verbose) {
292
- const theme = getTheme()
293
394
  return (
294
395
  <Box flexDirection="column">
295
- <Text bold color={theme.text}>
296
- 🚀 Task ({actualModel}): {description}
396
+ <Text>
397
+ [{agentType}] {actualModel}: {description}
297
398
  </Text>
298
399
  <Box
299
- marginTop={1}
300
400
  paddingLeft={2}
301
401
  borderLeftStyle="single"
302
- borderLeftColor={theme.border}
402
+ borderLeftColor={theme.secondaryBorder}
303
403
  >
304
404
  <Text color={theme.secondaryText}>{promptPreview}</Text>
305
405
  </Box>
@@ -307,12 +407,13 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
307
407
  )
308
408
  }
309
409
 
310
- return `Task (${actualModel}): ${description}`
410
+ // Simple display: agent type, model and description
411
+ return `[${agentType}] ${actualModel}: ${description}`
311
412
  },
312
413
  renderToolUseRejectedMessage() {
313
414
  return <FallbackToolUseRejectedMessage />
314
415
  },
315
- renderToolResultMessage(content, { verbose }) {
416
+ renderToolResultMessage(content) {
316
417
  const theme = getTheme()
317
418
 
318
419
  if (Array.isArray(content)) {
@@ -351,23 +452,6 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
351
452
  )}
352
453
  </Box>
353
454
  </Box>
354
- {verbose && textBlocks.length > 0 && (
355
- <Box
356
- marginTop={1}
357
- paddingLeft={4}
358
- borderLeftStyle="single"
359
- borderLeftColor={theme.border}
360
- >
361
- <Text color={theme.secondaryText}>
362
- {textBlocks
363
- .slice(0, 2)
364
- .map(block => block.text)
365
- .join('\n')
366
- .substring(0, 200)}
367
- {totalLength > 200 ? '...' : ''}
368
- </Text>
369
- </Box>
370
- )}
371
455
  </Box>
372
456
  )
373
457
  }
@@ -379,4 +463,4 @@ Usage: Provide detailed task description for autonomous execution. The agent wil
379
463
  </Box>
380
464
  )
381
465
  },
382
- } satisfies Tool<typeof inputSchema, TextBlock[]>
466
+ } satisfies Tool<typeof inputSchema, TextBlock[]>