kimi-cli 0.44__py3-none-any.whl → 0.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kimi-cli might be problematic. Click here for more details.

kimi_cli/CHANGELOG.md CHANGED
@@ -9,6 +9,29 @@ Internal builds may append content to the Unreleased section.
9
9
  Only write entries that are worth mentioning to users.
10
10
  -->
11
11
 
12
+ ## [0.46] - 2025-11-03
13
+
14
+ ### Added
15
+
16
+ - Introduce Wire over stdio for local IPC (experimental, subject to change)
17
+ - Support Anthropic provider type
18
+
19
+ ### Fixed
20
+
21
+ - Fix binary packed by PyInstaller not working due to wrong entrypoint
22
+
23
+ ## [0.45] - 2025-10-31
24
+
25
+ ### Added
26
+
27
+ - Allow `KIMI_MODEL_CAPABILITIES` environment variable to override model capabilities
28
+ - Add `--no-markdown` option to disable markdown rendering
29
+ - Support `openai_responses` LLM provider type
30
+
31
+ ### Fixed
32
+
33
+ - Fix crash when continuing a session
34
+
12
35
  ## [0.44] - 2025-10-30
13
36
 
14
37
  ### Changed
kimi_cli/app.py CHANGED
@@ -127,7 +127,7 @@ class KimiCLI:
127
127
  finally:
128
128
  os.chdir(original_cwd)
129
129
 
130
- async def run_shell_mode(self, command: str | None = None) -> bool:
130
+ async def run_shell_mode(self, command: str | None = None, markdown: bool = True) -> bool:
131
131
  from kimi_cli.ui.shell import ShellApp, WelcomeInfoItem
132
132
 
133
133
  welcome_info = [
@@ -167,7 +167,7 @@ class KimiCLI:
167
167
  )
168
168
  )
169
169
  with self._app_env():
170
- app = ShellApp(self._soul, welcome_info=welcome_info)
170
+ app = ShellApp(self._soul, welcome_info=welcome_info, markdown=markdown)
171
171
  return await app.run(command)
172
172
 
173
173
  async def run_print_mode(
@@ -193,3 +193,10 @@ class KimiCLI:
193
193
  with self._app_env():
194
194
  app = ACPServer(self._soul)
195
195
  return await app.run()
196
+
197
+ async def run_wire_server(self) -> bool:
198
+ from kimi_cli.ui.wire import WireServer
199
+
200
+ with self._app_env():
201
+ server = WireServer(self._soul)
202
+ return await server.run()
kimi_cli/cli.py CHANGED
@@ -16,7 +16,7 @@ class Reload(Exception):
16
16
  pass
17
17
 
18
18
 
19
- UIMode = Literal["shell", "print", "acp"]
19
+ UIMode = Literal["shell", "print", "acp", "wire"]
20
20
  InputFormat = Literal["text", "stream-json"]
21
21
  OutputFormat = Literal["text", "stream-json"]
22
22
 
@@ -137,6 +137,12 @@ OutputFormat = Literal["text", "stream-json"]
137
137
  default=False,
138
138
  help="Automatically approve all actions. Default: no.",
139
139
  )
140
+ @click.option(
141
+ "--markdown/--no-markdown",
142
+ is_flag=True,
143
+ default=True,
144
+ help="Enable/disable markdown rendering in shell UI. Default: yes.",
145
+ )
140
146
  def kimi(
141
147
  verbose: bool,
142
148
  debug: bool,
@@ -151,6 +157,7 @@ def kimi(
151
157
  mcp_config_file: list[Path],
152
158
  mcp_config: list[str],
153
159
  yolo: bool,
160
+ markdown: bool,
154
161
  ):
155
162
  """Kimi, your next CLI agent."""
156
163
  from kimi_cli.app import KimiCLI
@@ -163,9 +170,12 @@ def kimi(
163
170
 
164
171
  echo: Callable[..., None] = click.echo if verbose else _noop_echo
165
172
 
173
+ if debug:
174
+ logger.enable("kosong")
166
175
  logger.add(
167
176
  get_share_dir() / "logs" / "kimi.log",
168
- level="DEBUG" if debug else "INFO",
177
+ # FIXME: configure level for different modules
178
+ level="TRACE" if debug else "INFO",
169
179
  rotation="06:00",
170
180
  retention="10 days",
171
181
  )
@@ -220,7 +230,7 @@ def kimi(
220
230
  )
221
231
  match ui:
222
232
  case "shell":
223
- return await instance.run_shell_mode(command)
233
+ return await instance.run_shell_mode(command, markdown=markdown)
224
234
  case "print":
225
235
  return await instance.run_print_mode(
226
236
  input_format or "text",
@@ -231,6 +241,10 @@ def kimi(
231
241
  if command is not None:
232
242
  logger.warning("ACP server ignores command argument")
233
243
  return await instance.run_acp_server()
244
+ case "wire":
245
+ if command is not None:
246
+ logger.warning("Wire server ignores command argument")
247
+ return await instance.run_wire_server()
234
248
 
235
249
  while True:
236
250
  try:
kimi_cli/config.py CHANGED
@@ -12,7 +12,7 @@ from kimi_cli.utils.logging import logger
12
12
  class LLMProvider(BaseModel):
13
13
  """LLM provider configuration."""
14
14
 
15
- type: Literal["kimi", "openai_legacy", "_chaos"]
15
+ type: Literal["kimi", "openai_legacy", "openai_responses", "anthropic", "_chaos"]
16
16
  """Provider type"""
17
17
  base_url: str
18
18
  """API base URL"""
kimi_cli/llm.py CHANGED
@@ -1,5 +1,5 @@
1
1
  import os
2
- from typing import NamedTuple
2
+ from typing import NamedTuple, cast, get_args
3
3
 
4
4
  from kosong.base.chat_provider import ChatProvider
5
5
  from pydantic import SecretStr
@@ -41,11 +41,19 @@ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> di
41
41
  applied["KIMI_API_KEY"] = "******"
42
42
  if model_name := os.getenv("KIMI_MODEL_NAME"):
43
43
  model.model = model_name
44
- applied["KIMI_MODEL_NAME"] = model.model
44
+ applied["KIMI_MODEL_NAME"] = model_name
45
45
  if max_context_size := os.getenv("KIMI_MODEL_MAX_CONTEXT_SIZE"):
46
46
  model.max_context_size = int(max_context_size)
47
- applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = str(model.max_context_size)
48
- case "openai_legacy":
47
+ applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = max_context_size
48
+ if capabilities := os.getenv("KIMI_MODEL_CAPABILITIES"):
49
+ caps_lower = (cap.strip().lower() for cap in capabilities.split(",") if cap.strip())
50
+ model.capabilities = set(
51
+ cast(LLMModelCapability, cap)
52
+ for cap in caps_lower
53
+ if cap in get_args(LLMModelCapability)
54
+ )
55
+ applied["KIMI_MODEL_CAPABILITIES"] = capabilities
56
+ case "openai_legacy" | "openai_responses":
49
57
  if base_url := os.getenv("OPENAI_BASE_URL"):
50
58
  provider.base_url = base_url
51
59
  if api_key := os.getenv("OPENAI_API_KEY"):
@@ -88,6 +96,29 @@ def create_llm(
88
96
  api_key=provider.api_key.get_secret_value(),
89
97
  stream=stream,
90
98
  )
99
+ case "openai_responses":
100
+ from kosong.chat_provider.openai_responses import OpenAIResponses
101
+
102
+ chat_provider = OpenAIResponses(
103
+ model=model.model,
104
+ base_url=provider.base_url,
105
+ api_key=provider.api_key.get_secret_value(),
106
+ stream=stream,
107
+ )
108
+ case "anthropic":
109
+ from kosong.chat_provider.anthropic import Anthropic
110
+
111
+ chat_provider = Anthropic(
112
+ model=model.model,
113
+ base_url=provider.base_url,
114
+ api_key=provider.api_key.get_secret_value(),
115
+ stream=stream,
116
+ default_max_tokens=50000,
117
+ ).with_generation_kwargs(
118
+ # TODO: support configurable values
119
+ thinking={"type": "enabled", "budget_tokens": 1024},
120
+ beta_features=["interleaved-thinking-2025-05-14"],
121
+ )
91
122
  case "_chaos":
92
123
  from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
93
124
 
kimi_cli/soul/toolset.py CHANGED
@@ -2,7 +2,8 @@ from contextvars import ContextVar
2
2
  from typing import override
3
3
 
4
4
  from kosong.base.message import ToolCall
5
- from kosong.tooling import HandleResult, SimpleToolset
5
+ from kosong.tooling import HandleResult
6
+ from kosong.tooling.simple import SimpleToolset
6
7
 
7
8
  current_tool_call = ContextVar[ToolCall | None]("current_tool_call", default=None)
8
9
 
@@ -1,7 +1,8 @@
1
1
  import json
2
2
  from pathlib import Path
3
+ from typing import cast
3
4
 
4
- import streamingjson
5
+ import streamingjson # pyright: ignore[reportMissingTypeStubs]
5
6
  from kosong.utils.typing import JsonType
6
7
 
7
8
  from kimi_cli.utils.string import shorten_middle
@@ -29,15 +30,15 @@ def extract_subtitle(lexer: streamingjson.Lexer, tool_name: str) -> str | None:
29
30
  case "SetTodoList":
30
31
  if not isinstance(curr_args, dict) or not curr_args.get("todos"):
31
32
  return None
32
- if not isinstance(curr_args["todos"], list):
33
+
34
+ from kimi_cli.tools.todo import Params
35
+
36
+ try:
37
+ todo_params = Params.model_validate(curr_args)
38
+ for todo in todo_params.todos:
39
+ subtitle += f"• {todo.title} [{todo.status}]\n"
40
+ except Exception:
33
41
  return None
34
- for todo in curr_args["todos"]:
35
- if not isinstance(todo, dict) or not todo.get("title"):
36
- continue
37
- subtitle += f"• {todo['title']}"
38
- if todo.get("status"):
39
- subtitle += f" [{todo['status']}]"
40
- subtitle += "\n"
41
42
  return "\n" + subtitle.strip()
42
43
  case "Bash":
43
44
  if not isinstance(curr_args, dict) or not curr_args.get("command"):
@@ -72,7 +73,9 @@ def extract_subtitle(lexer: streamingjson.Lexer, tool_name: str) -> str | None:
72
73
  return None
73
74
  subtitle = str(curr_args["url"])
74
75
  case _:
75
- subtitle = "".join(lexer.json_content)
76
+ # lexer.json_content is list[str] based on streamingjson source code
77
+ content: list[str] = cast(list[str], lexer.json_content) # pyright: ignore[reportUnknownMemberType]
78
+ subtitle = "".join(content)
76
79
  if tool_name not in ["SetTodoList"]:
77
80
  subtitle = shorten_middle(subtitle, width=50)
78
81
  return subtitle
@@ -1,6 +1,7 @@
1
1
  import asyncio
2
+ from collections.abc import Callable
2
3
  from pathlib import Path
3
- from typing import override
4
+ from typing import Any, override
4
5
 
5
6
  from kosong.tooling import CallableTool2, ToolReturnType
6
7
  from pydantic import BaseModel, Field
@@ -29,7 +30,7 @@ class Bash(CallableTool2[Params]):
29
30
  description: str = load_desc(Path(__file__).parent / "bash.md", {})
30
31
  params: type[Params] = Params
31
32
 
32
- def __init__(self, approval: Approval, **kwargs):
33
+ def __init__(self, approval: Approval, **kwargs: Any):
33
34
  super().__init__(**kwargs)
34
35
  self._approval = approval
35
36
 
@@ -71,8 +72,13 @@ class Bash(CallableTool2[Params]):
71
72
  )
72
73
 
73
74
 
74
- async def _stream_subprocess(command: str, stdout_cb, stderr_cb, timeout: int) -> int:
75
- async def _read_stream(stream, cb):
75
+ async def _stream_subprocess(
76
+ command: str,
77
+ stdout_cb: Callable[[bytes], None],
78
+ stderr_cb: Callable[[bytes], None],
79
+ timeout: int,
80
+ ) -> int:
81
+ async def _read_stream(stream: asyncio.StreamReader, cb: Callable[[bytes], None]):
76
82
  while True:
77
83
  line = await stream.readline()
78
84
  if line:
@@ -85,6 +91,9 @@ async def _stream_subprocess(command: str, stdout_cb, stderr_cb, timeout: int) -
85
91
  command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
86
92
  )
87
93
 
94
+ assert process.stdout is not None, "stdout is None"
95
+ assert process.stderr is not None, "stderr is None"
96
+
88
97
  try:
89
98
  await asyncio.wait_for(
90
99
  asyncio.gather(
@@ -1,5 +1,5 @@
1
1
  from pathlib import Path
2
- from typing import override
2
+ from typing import Any, override
3
3
 
4
4
  from kosong.tooling import CallableTool2, ToolError, ToolReturnType
5
5
 
@@ -8,12 +8,12 @@ from kimi_cli.soul.denwarenji import DenwaRenji, DenwaRenjiError, DMail
8
8
  NAME = "SendDMail"
9
9
 
10
10
 
11
- class SendDMail(CallableTool2):
11
+ class SendDMail(CallableTool2[DMail]):
12
12
  name: str = NAME
13
13
  description: str = (Path(__file__).parent / "dmail.md").read_text(encoding="utf-8")
14
14
  params: type[DMail] = DMail
15
15
 
16
- def __init__(self, denwa_renji: DenwaRenji, **kwargs):
16
+ def __init__(self, denwa_renji: DenwaRenji, **kwargs: Any) -> None:
17
17
  super().__init__(**kwargs)
18
18
  self._denwa_renji = denwa_renji
19
19
 
@@ -2,7 +2,7 @@
2
2
 
3
3
  import asyncio
4
4
  from pathlib import Path
5
- from typing import override
5
+ from typing import Any, override
6
6
 
7
7
  import aiofiles.os
8
8
  from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
@@ -38,7 +38,7 @@ class Glob(CallableTool2[Params]):
38
38
  )
39
39
  params: type[Params] = Params
40
40
 
41
- def __init__(self, builtin_args: BuiltinSystemPromptArgs, **kwargs):
41
+ def __init__(self, builtin_args: BuiltinSystemPromptArgs, **kwargs: Any) -> None:
42
42
  super().__init__(**kwargs)
43
43
  self._work_dir = builtin_args.KIMI_WORK_DIR
44
44
 
@@ -9,7 +9,7 @@ from pathlib import Path
9
9
  from typing import override
10
10
 
11
11
  import aiohttp
12
- import ripgrepy
12
+ import ripgrepy # pyright: ignore[reportMissingTypeStubs]
13
13
  from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
14
14
  from pydantic import BaseModel, Field
15
15
 
@@ -1,8 +1,8 @@
1
1
  from pathlib import Path
2
- from typing import override
2
+ from typing import Any, Literal, override
3
3
 
4
4
  import aiofiles
5
- import patch_ng
5
+ import patch_ng # pyright: ignore[reportMissingTypeStubs]
6
6
  from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
7
7
  from pydantic import BaseModel, Field
8
8
 
@@ -12,6 +12,36 @@ from kimi_cli.tools.file import FileActions
12
12
  from kimi_cli.tools.utils import ToolRejectedError
13
13
 
14
14
 
15
+ def _parse_patch(diff_bytes: bytes) -> patch_ng.PatchSet | None:
16
+ """Parse patch from bytes, returning PatchSet or None on error.
17
+
18
+ This wrapper provides type hints for the untyped patch_ng.fromstring function.
19
+ """
20
+ result: patch_ng.PatchSet | Literal[False] = patch_ng.fromstring(diff_bytes) # pyright: ignore[reportUnknownMemberType]
21
+ return result if result is not False else None
22
+
23
+
24
+ def _count_hunks(patch_set: patch_ng.PatchSet) -> int:
25
+ """Count total hunks across all items in a PatchSet.
26
+
27
+ This wrapper provides type hints for the untyped patch_ng library.
28
+ From source code inspection: PatchSet.items is list[Patch], Patch.hunks is list[Hunk].
29
+ Type ignore needed because patch_ng lacks type annotations.
30
+ """
31
+ items: list[patch_ng.Patch] = patch_set.items # pyright: ignore[reportUnknownMemberType]
32
+ # Each Patch has a hunks attribute (list[Hunk])
33
+ return sum(len(item.hunks) for item in items) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType]
34
+
35
+
36
+ def _apply_patch(patch_set: patch_ng.PatchSet, root: str) -> bool:
37
+ """Apply a patch to files under the given root directory.
38
+
39
+ This wrapper provides type hints for the untyped patch_ng.apply method.
40
+ """
41
+ success: Any = patch_set.apply(root=root) # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType]
42
+ return bool(success) # pyright: ignore[reportUnknownArgumentType]
43
+
44
+
15
45
  class Params(BaseModel):
16
46
  path: str = Field(description="The absolute path to the file to apply the patch to.")
17
47
  diff: str = Field(description="The diff content in unified format to apply.")
@@ -22,7 +52,7 @@ class PatchFile(CallableTool2[Params]):
22
52
  description: str = (Path(__file__).parent / "patch.md").read_text(encoding="utf-8")
23
53
  params: type[Params] = Params
24
54
 
25
- def __init__(self, builtin_args: BuiltinSystemPromptArgs, approval: Approval, **kwargs):
55
+ def __init__(self, builtin_args: BuiltinSystemPromptArgs, approval: Approval, **kwargs: Any):
26
56
  super().__init__(**kwargs)
27
57
  self._work_dir = builtin_args.KIMI_WORK_DIR
28
58
  self._approval = approval
@@ -87,10 +117,10 @@ class PatchFile(CallableTool2[Params]):
87
117
  original_content = await f.read()
88
118
 
89
119
  # Create patch object directly from string (no temporary file needed!)
90
- patch_set = patch_ng.fromstring(params.diff.encode("utf-8"))
120
+ patch_set = _parse_patch(params.diff.encode("utf-8"))
91
121
 
92
- # Handle case where patch_ng.fromstring returns False on parse errors
93
- if not patch_set or patch_set is True:
122
+ # Handle case where parsing failed
123
+ if patch_set is None:
94
124
  return ToolError(
95
125
  message=(
96
126
  "Failed to parse diff content: invalid patch format or no valid hunks found"
@@ -99,7 +129,7 @@ class PatchFile(CallableTool2[Params]):
99
129
  )
100
130
 
101
131
  # Count total hunks across all items
102
- total_hunks = sum(len(item.hunks) for item in patch_set.items)
132
+ total_hunks = _count_hunks(patch_set)
103
133
 
104
134
  if total_hunks == 0:
105
135
  return ToolError(
@@ -108,7 +138,7 @@ class PatchFile(CallableTool2[Params]):
108
138
  )
109
139
 
110
140
  # Apply the patch
111
- success = patch_set.apply(root=str(p.parent))
141
+ success = _apply_patch(patch_set, str(p.parent))
112
142
 
113
143
  if not success:
114
144
  return ToolError(
@@ -1,5 +1,5 @@
1
1
  from pathlib import Path
2
- from typing import override
2
+ from typing import Any, override
3
3
 
4
4
  import aiofiles
5
5
  from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
@@ -47,8 +47,9 @@ class ReadFile(CallableTool2[Params]):
47
47
  )
48
48
  params: type[Params] = Params
49
49
 
50
- def __init__(self, builtin_args: BuiltinSystemPromptArgs, **kwargs):
50
+ def __init__(self, builtin_args: BuiltinSystemPromptArgs, **kwargs: Any) -> None:
51
51
  super().__init__(**kwargs)
52
+
52
53
  self._work_dir = builtin_args.KIMI_WORK_DIR
53
54
 
54
55
  @override
@@ -84,7 +85,7 @@ class ReadFile(CallableTool2[Params]):
84
85
 
85
86
  lines: list[str] = []
86
87
  n_bytes = 0
87
- truncated_line_numbers = []
88
+ truncated_line_numbers: list[int] = []
88
89
  max_lines_reached = False
89
90
  max_bytes_reached = False
90
91
  async with aiofiles.open(p, encoding="utf-8", errors="replace") as f:
@@ -108,7 +109,7 @@ class ReadFile(CallableTool2[Params]):
108
109
  break
109
110
 
110
111
  # Format output with line numbers like `cat -n`
111
- lines_with_no = []
112
+ lines_with_no: list[str] = []
112
113
  for line_num, line in zip(
113
114
  range(params.line_offset, params.line_offset + len(lines)), lines, strict=True
114
115
  ):
@@ -1,5 +1,5 @@
1
1
  from pathlib import Path
2
- from typing import override
2
+ from typing import Any, override
3
3
 
4
4
  import aiofiles
5
5
  from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
@@ -32,7 +32,7 @@ class StrReplaceFile(CallableTool2[Params]):
32
32
  description: str = (Path(__file__).parent / "replace.md").read_text(encoding="utf-8")
33
33
  params: type[Params] = Params
34
34
 
35
- def __init__(self, builtin_args: BuiltinSystemPromptArgs, approval: Approval, **kwargs):
35
+ def __init__(self, builtin_args: BuiltinSystemPromptArgs, approval: Approval, **kwargs: Any):
36
36
  super().__init__(**kwargs)
37
37
  self._work_dir = builtin_args.KIMI_WORK_DIR
38
38
  self._approval = approval
@@ -1,5 +1,5 @@
1
1
  from pathlib import Path
2
- from typing import Literal, override
2
+ from typing import Any, Literal, override
3
3
 
4
4
  import aiofiles
5
5
  from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
@@ -29,7 +29,7 @@ class WriteFile(CallableTool2[Params]):
29
29
  description: str = (Path(__file__).parent / "write.md").read_text(encoding="utf-8")
30
30
  params: type[Params] = Params
31
31
 
32
- def __init__(self, builtin_args: BuiltinSystemPromptArgs, approval: Approval, **kwargs):
32
+ def __init__(self, builtin_args: BuiltinSystemPromptArgs, approval: Approval, **kwargs: Any):
33
33
  super().__init__(**kwargs)
34
34
  self._work_dir = builtin_args.KIMI_WORK_DIR
35
35
  self._approval = approval
kimi_cli/tools/mcp.py CHANGED
@@ -1,12 +1,15 @@
1
+ from typing import Any
2
+
1
3
  import fastmcp
2
4
  import mcp
3
5
  from fastmcp.client.client import CallToolResult
6
+ from fastmcp.client.transports import ClientTransport
4
7
  from kosong.base.message import AudioURLPart, ContentPart, ImageURLPart, TextPart
5
8
  from kosong.tooling import CallableTool, ToolOk, ToolReturnType
6
9
 
7
10
 
8
- class MCPTool(CallableTool):
9
- def __init__(self, mcp_tool: mcp.Tool, client: fastmcp.Client, **kwargs):
11
+ class MCPTool[T: ClientTransport](CallableTool):
12
+ def __init__(self, mcp_tool: mcp.Tool, client: fastmcp.Client[T], **kwargs: Any):
10
13
  super().__init__(
11
14
  name=mcp_tool.name,
12
15
  description=mcp_tool.description or "",
@@ -16,7 +19,7 @@ class MCPTool(CallableTool):
16
19
  self._mcp_tool = mcp_tool
17
20
  self._client = client
18
21
 
19
- async def __call__(self, *args, **kwargs) -> ToolReturnType:
22
+ async def __call__(self, *args: Any, **kwargs: Any) -> ToolReturnType:
20
23
  async with self._client as client:
21
24
  result = await client.call_tool(self._mcp_tool.name, kwargs, timeout=20)
22
25
  return convert_tool_result(result)
@@ -1,6 +1,6 @@
1
1
  import asyncio
2
2
  from pathlib import Path
3
- from typing import override
3
+ from typing import Any, override
4
4
 
5
5
  from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
6
6
  from pydantic import BaseModel, Field
@@ -49,7 +49,7 @@ class Task(CallableTool2[Params]):
49
49
  name: str = "Task"
50
50
  params: type[Params] = Params
51
51
 
52
- def __init__(self, agent_spec: ResolvedAgentSpec, runtime: Runtime, **kwargs):
52
+ def __init__(self, agent_spec: ResolvedAgentSpec, runtime: Runtime, **kwargs: Any):
53
53
  super().__init__(
54
54
  description=load_desc(
55
55
  Path(__file__).parent / "task.md",
@@ -1,5 +1,5 @@
1
1
  from pathlib import Path
2
- from typing import override
2
+ from typing import Any, override
3
3
 
4
4
  from kosong.tooling import CallableTool2, ToolReturnType
5
5
  from pydantic import BaseModel, Field, ValidationError
@@ -39,7 +39,7 @@ class SearchWeb(CallableTool2[Params]):
39
39
  description: str = load_desc(Path(__file__).parent / "search.md", {})
40
40
  params: type[Params] = Params
41
41
 
42
- def __init__(self, config: Config, **kwargs):
42
+ def __init__(self, config: Config, **kwargs: Any):
43
43
  super().__init__(**kwargs)
44
44
  if config.services.moonshot_search is not None:
45
45
  self._base_url = config.services.moonshot_search.base_url
@@ -24,10 +24,16 @@ from kimi_cli.utils.signals import install_sigint_handler
24
24
 
25
25
 
26
26
  class ShellApp:
27
- def __init__(self, soul: Soul, welcome_info: list["WelcomeInfoItem"] | None = None):
27
+ def __init__(
28
+ self,
29
+ soul: Soul,
30
+ welcome_info: list["WelcomeInfoItem"] | None = None,
31
+ markdown: bool = True,
32
+ ):
28
33
  self.soul = soul
29
34
  self._welcome_info = list(welcome_info or [])
30
35
  self._background_tasks: set[asyncio.Task[Any]] = set()
36
+ self._markdown = markdown
31
37
 
32
38
  async def run(self, command: str | None = None) -> bool:
33
39
  if command is not None:
@@ -168,7 +174,10 @@ class ShellApp:
168
174
  self.soul,
169
175
  user_input,
170
176
  lambda wire: visualize(
171
- wire, initial_status=self.soul.status, cancel_event=cancel_event
177
+ wire,
178
+ initial_status=self.soul.status,
179
+ cancel_event=cancel_event,
180
+ markdown=self._markdown,
172
181
  ),
173
182
  cancel_event,
174
183
  )
@@ -1,5 +1,6 @@
1
1
  import asyncio
2
2
  from collections import deque
3
+ from typing import Literal
3
4
 
4
5
  import streamingjson
5
6
  from kosong.base.message import ToolCall, ToolCallPart
@@ -129,6 +130,7 @@ class StepLiveView:
129
130
  def __init__(self, status: StatusSnapshot, cancel_event: asyncio.Event | None = None):
130
131
  # message content
131
132
  self._line_buffer = Text("")
133
+ self._last_text_mode: Literal["text", "think", ""] = ""
132
134
 
133
135
  # tool call
134
136
  self._tool_calls: dict[str, _ToolCallDisplay] = {}
@@ -187,7 +189,21 @@ class StepLiveView:
187
189
  """
188
190
  console.print(renderable)
189
191
 
190
- def append_text(self, text: str):
192
+ def append_text(self, text: str, mode: Literal["text", "think"] = "text"):
193
+ if not text:
194
+ # Ignore empty message
195
+ return
196
+ if self._last_text_mode != mode:
197
+ if self._line_buffer:
198
+ self._push_out(self._line_buffer)
199
+ self._push_out("") # Add extra line between different modes
200
+ self._line_buffer.plain = ""
201
+ self._last_text_mode = mode
202
+ match mode:
203
+ case "text":
204
+ self._line_buffer.style = ""
205
+ case "think":
206
+ self._line_buffer.style = "grey50 italic"
191
207
  lines = text.split("\n")
192
208
  prev_is_empty = not self._line_buffer
193
209
  for line in lines[:-1]:
@@ -313,7 +329,14 @@ class StepLiveViewWithMarkdown(StepLiveView):
313
329
  self._buffer_status_active = False
314
330
  self._buffer_status_obj: Status | None = None
315
331
 
316
- def append_text(self, text: str):
332
+ def append_text(self, text: str, mode: Literal["text", "think"] = "text"):
333
+ if not text:
334
+ # Ignore empty message
335
+ return
336
+ if self._last_text_mode != mode:
337
+ if self._flush_markdown():
338
+ self._push_out("") # Add extra line between different modes
339
+ self._last_text_mode = mode
317
340
  if not self._pending_markdown_parts:
318
341
  self._show_thinking_status()
319
342
  self._pending_markdown_parts.append(text)
@@ -334,14 +357,22 @@ class StepLiveViewWithMarkdown(StepLiveView):
334
357
  self._flush_markdown()
335
358
  return super().__exit__(exc_type, exc_value, traceback)
336
359
 
337
- def _flush_markdown(self):
360
+ def _flush_markdown(self) -> bool:
338
361
  self._hide_thinking_status()
339
362
  if not self._pending_markdown_parts:
340
- return
363
+ return False
341
364
  markdown_text = "".join(self._pending_markdown_parts)
342
365
  self._pending_markdown_parts.clear()
343
366
  if markdown_text.strip():
344
- self._push_out(_LeftAlignedMarkdown(markdown_text, justify="left"))
367
+ self._push_out(
368
+ _LeftAlignedMarkdown(
369
+ markdown_text,
370
+ justify="left",
371
+ style="grey50 italic" if self._last_text_mode == "think" else "none",
372
+ )
373
+ )
374
+ return True
375
+ return False
345
376
 
346
377
  def _show_thinking_status(self):
347
378
  if self._buffer_status_active: