kimi-cli 0.45__tar.gz → 0.51__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kimi-cli might be problematic. Click here for more details.

Files changed (100) hide show
  1. {kimi_cli-0.45 → kimi_cli-0.51}/PKG-INFO +5 -4
  2. {kimi_cli-0.45 → kimi_cli-0.51}/README.md +1 -0
  3. {kimi_cli-0.45 → kimi_cli-0.51}/pyproject.toml +6 -6
  4. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/CHANGELOG.md +52 -0
  5. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/app.py +17 -4
  6. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/cli.py +15 -7
  7. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/config.py +4 -6
  8. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/llm.py +41 -17
  9. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/metadata.py +1 -1
  10. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/__init__.py +15 -8
  11. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/agent.py +6 -1
  12. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/kimisoul.py +31 -3
  13. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/toolset.py +2 -1
  14. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/__init__.py +34 -30
  15. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/bash/__init__.py +15 -6
  16. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/dmail/__init__.py +5 -4
  17. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/glob.py +2 -2
  18. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/grep.py +34 -18
  19. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/patch.py +40 -10
  20. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/read.py +5 -4
  21. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/replace.py +4 -4
  22. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/write.py +4 -4
  23. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/mcp.py +6 -3
  24. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/task/__init__.py +14 -4
  25. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/todo/__init__.py +4 -2
  26. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/web/search.py +8 -10
  27. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/acp/__init__.py +62 -28
  28. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/__init__.py +23 -13
  29. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/metacmd.py +5 -5
  30. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/prompt.py +129 -140
  31. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/setup.py +10 -8
  32. kimi_cli-0.51/src/kimi_cli/ui/shell/visualize.py +576 -0
  33. kimi_cli-0.51/src/kimi_cli/ui/wire/README.md +109 -0
  34. kimi_cli-0.51/src/kimi_cli/ui/wire/__init__.py +340 -0
  35. kimi_cli-0.51/src/kimi_cli/ui/wire/jsonrpc.py +48 -0
  36. kimi_cli-0.51/src/kimi_cli/utils/__init__.py +0 -0
  37. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/changelog.py +3 -1
  38. kimi_cli-0.51/src/kimi_cli/utils/clipboard.py +10 -0
  39. kimi_cli-0.51/src/kimi_cli/utils/rich/__init__.py +33 -0
  40. kimi_cli-0.51/src/kimi_cli/utils/rich/markdown.py +882 -0
  41. kimi_cli-0.51/src/kimi_cli/utils/rich/markdown_sample.md +108 -0
  42. kimi_cli-0.51/src/kimi_cli/utils/rich/markdown_sample_short.md +2 -0
  43. kimi_cli-0.51/src/kimi_cli/utils/term.py +114 -0
  44. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/wire/__init__.py +4 -1
  45. kimi_cli-0.51/src/kimi_cli/wire/message.py +191 -0
  46. kimi_cli-0.45/src/kimi_cli/ui/shell/liveview.py +0 -417
  47. kimi_cli-0.45/src/kimi_cli/ui/shell/visualize.py +0 -115
  48. kimi_cli-0.45/src/kimi_cli/wire/message.py +0 -91
  49. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/__init__.py +0 -0
  50. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/agents/default/agent.yaml +0 -0
  51. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/agents/default/sub.yaml +0 -0
  52. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/agents/default/system.md +0 -0
  53. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/agentspec.py +0 -0
  54. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/constant.py +0 -0
  55. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/exception.py +0 -0
  56. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/prompts/__init__.py +0 -0
  57. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/prompts/compact.md +0 -0
  58. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/prompts/init.md +0 -0
  59. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/py.typed +0 -0
  60. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/session.py +0 -0
  61. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/share.py +0 -0
  62. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/approval.py +0 -0
  63. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/compaction.py +0 -0
  64. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/context.py +0 -0
  65. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/denwarenji.py +0 -0
  66. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/message.py +0 -0
  67. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/soul/runtime.py +0 -0
  68. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/bash/bash.md +0 -0
  69. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/dmail/dmail.md +0 -0
  70. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/__init__.py +0 -0
  71. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/glob.md +0 -0
  72. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/grep.md +0 -0
  73. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/patch.md +0 -0
  74. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/read.md +0 -0
  75. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/replace.md +0 -0
  76. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/file/write.md +0 -0
  77. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/task/task.md +0 -0
  78. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/test.py +0 -0
  79. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/think/__init__.py +0 -0
  80. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/think/think.md +0 -0
  81. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/todo/set_todo_list.md +0 -0
  82. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/utils.py +0 -0
  83. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/web/__init__.py +0 -0
  84. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/web/fetch.md +0 -0
  85. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/web/fetch.py +0 -0
  86. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/tools/web/search.md +0 -0
  87. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/__init__.py +0 -0
  88. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/print/__init__.py +0 -0
  89. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/console.py +0 -0
  90. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/debug.py +0 -0
  91. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/keyboard.py +0 -0
  92. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/replay.py +0 -0
  93. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/ui/shell/update.py +0 -0
  94. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/aiohttp.py +0 -0
  95. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/logging.py +0 -0
  96. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/message.py +0 -0
  97. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/path.py +0 -0
  98. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/pyinstaller.py +0 -0
  99. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/signals.py +0 -0
  100. {kimi_cli-0.45 → kimi_cli-0.51}/src/kimi_cli/utils/string.py +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: kimi-cli
3
- Version: 0.45
3
+ Version: 0.51
4
4
  Summary: Kimi CLI is your next CLI agent.
5
- Requires-Dist: agent-client-protocol==0.6.2
5
+ Requires-Dist: agent-client-protocol==0.6.3
6
6
  Requires-Dist: aiofiles==25.1.0
7
7
  Requires-Dist: aiohttp==3.13.2
8
8
  Requires-Dist: click==8.3.0
9
- Requires-Dist: kosong==0.16.2
9
+ Requires-Dist: kosong==0.19.0
10
10
  Requires-Dist: loguru==0.7.3
11
11
  Requires-Dist: patch-ng==1.19.0
12
12
  Requires-Dist: prompt-toolkit==3.0.52
@@ -18,7 +18,7 @@ Requires-Dist: streamingjson==0.0.5
18
18
  Requires-Dist: trafilatura==2.0.0
19
19
  Requires-Dist: tenacity==9.1.2
20
20
  Requires-Dist: fastmcp==2.12.5
21
- Requires-Dist: pydantic==2.12.3
21
+ Requires-Dist: pydantic==2.12.4
22
22
  Requires-Dist: httpx[socks]==0.28.1
23
23
  Requires-Python: >=3.13
24
24
  Description-Content-Type: text/markdown
@@ -29,6 +29,7 @@ Description-Content-Type: text/markdown
29
29
  [![Checks](https://img.shields.io/github/check-runs/MoonshotAI/kimi-cli/main)](https://github.com/MoonshotAI/kimi-cli/actions)
30
30
  [![Version](https://img.shields.io/pypi/v/kimi-cli)](https://pypi.org/project/kimi-cli/)
31
31
  [![Downloads](https://img.shields.io/pypi/dw/kimi-cli)](https://pypistats.org/packages/kimi-cli)
32
+ [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/MoonshotAI/kimi-cli)
32
33
 
33
34
  [中文](https://www.kimi.com/coding/docs/kimi-cli.html)
34
35
 
@@ -4,6 +4,7 @@
4
4
  [![Checks](https://img.shields.io/github/check-runs/MoonshotAI/kimi-cli/main)](https://github.com/MoonshotAI/kimi-cli/actions)
5
5
  [![Version](https://img.shields.io/pypi/v/kimi-cli)](https://pypi.org/project/kimi-cli/)
6
6
  [![Downloads](https://img.shields.io/pypi/dw/kimi-cli)](https://pypistats.org/packages/kimi-cli)
7
+ [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/MoonshotAI/kimi-cli)
7
8
 
8
9
  [中文](https://www.kimi.com/coding/docs/kimi-cli.html)
9
10
 
@@ -1,15 +1,15 @@
1
1
  [project]
2
2
  name = "kimi-cli"
3
- version = "0.45"
3
+ version = "0.51"
4
4
  description = "Kimi CLI is your next CLI agent."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.13"
7
7
  dependencies = [
8
- "agent-client-protocol==0.6.2",
8
+ "agent-client-protocol==0.6.3",
9
9
  "aiofiles==25.1.0",
10
10
  "aiohttp==3.13.2",
11
11
  "click==8.3.0",
12
- "kosong==0.16.2",
12
+ "kosong==0.19.0",
13
13
  "loguru==0.7.3",
14
14
  "patch-ng==1.19.0",
15
15
  "prompt-toolkit==3.0.52",
@@ -21,18 +21,18 @@ dependencies = [
21
21
  "trafilatura==2.0.0",
22
22
  "tenacity==9.1.2",
23
23
  "fastmcp==2.12.5",
24
- "pydantic==2.12.3",
24
+ "pydantic==2.12.4",
25
25
  "httpx[socks]==0.28.1",
26
26
  ]
27
27
 
28
28
  [dependency-groups]
29
29
  dev = [
30
- "inline-snapshot[black]>=0.31.0",
30
+ "inline-snapshot[black]>=0.31.1",
31
31
  "pyinstaller>=6.16.0",
32
32
  "pyright>=1.1.407",
33
33
  "pytest>=8.4.2",
34
34
  "pytest-asyncio>=1.2.0",
35
- "ruff>=0.14.1",
35
+ "ruff>=0.14.4",
36
36
  ]
37
37
 
38
38
  [build-system]
@@ -9,6 +9,58 @@ Internal builds may append content to the Unreleased section.
9
9
  Only write entries that are worth mentioning to users.
10
10
  -->
11
11
 
12
+ ## [0.51] - 2025-11-8
13
+
14
+ - Lib: Rename `Soul.model` to `Soul.model_name`
15
+ - Lib: Rename `LLMModelCapability` to `ModelCapability` and move to `kimi_cli.llm`
16
+ - Lib: Add `"thinking"` to `ModelCapability`
17
+ - Lib: Remove `LLM.supports_image_in` property
18
+ - Lib: Add required `Soul.model_capabilities` property
19
+ - Lib: Add `thinking: bool` parameter to `ShellApp.run` method
20
+ - Lib: Rename `KimiSoul.set_thinking_mode` to `KimiSoul.set_thinking`
21
+ - UI: Better checks and notices for LLM model capabilities
22
+ - UI: Clear the screen for `/clear` meta command
23
+ - Tool: Support auto-downloading ripgrep on Windows
24
+ - CLI: Add `--thinking` option to start in thinking mode
25
+ - ACP: Support thinking content in ACP mode
26
+
27
+ ## [0.50] - 2025-11-07
28
+
29
+ ### Changed
30
+
31
+ - Improve UI look and feel
32
+ - Improve Task tool observability
33
+
34
+ ## [0.49] - 2025-11-06
35
+
36
+ ### Fixed
37
+
38
+ - Minor UX improvements
39
+
40
+ ## [0.48] - 2025-11-06
41
+
42
+ ### Added
43
+
44
+ - Support Kimi K2 thinking mode
45
+
46
+ ## [0.47] - 2025-11-05
47
+
48
+ ### Fixed
49
+
50
+ - Fix Ctrl-W not working in some environments
51
+ - Do not load SearchWeb tool when the search service is not configured
52
+
53
+ ## [0.46] - 2025-11-03
54
+
55
+ ### Added
56
+
57
+ - Introduce Wire over stdio for local IPC (experimental, subject to change)
58
+ - Support Anthropic provider type
59
+
60
+ ### Fixed
61
+
62
+ - Fix binary packed by PyInstaller not working due to wrong entrypoint
63
+
12
64
  ## [0.45] - 2025-10-31
13
65
 
14
66
  ### Added
@@ -12,6 +12,7 @@ from kimi_cli.cli import InputFormat, OutputFormat
12
12
  from kimi_cli.config import LLMModel, LLMProvider, load_config
13
13
  from kimi_cli.llm import augment_provider_with_env_vars, create_llm
14
14
  from kimi_cli.session import Session
15
+ from kimi_cli.soul import LLMNotSet, LLMNotSupported
15
16
  from kimi_cli.soul.agent import load_agent
16
17
  from kimi_cli.soul.context import Context
17
18
  from kimi_cli.soul.kimisoul import KimiSoul
@@ -29,6 +30,7 @@ class KimiCLI:
29
30
  mcp_configs: list[dict[str, Any]] | None = None,
30
31
  config_file: Path | None = None,
31
32
  model_name: str | None = None,
33
+ thinking: bool = False,
32
34
  agent_file: Path | None = None,
33
35
  ) -> "KimiCLI":
34
36
  """
@@ -93,6 +95,10 @@ class KimiCLI:
93
95
  runtime,
94
96
  context=context,
95
97
  )
98
+ try:
99
+ soul.set_thinking(thinking)
100
+ except (LLMNotSet, LLMNotSupported) as e:
101
+ logger.warning("Failed to enable thinking mode: {error}", error=e)
96
102
  return KimiCLI(soul, runtime, env_overrides)
97
103
 
98
104
  def __init__(
@@ -127,7 +133,7 @@ class KimiCLI:
127
133
  finally:
128
134
  os.chdir(original_cwd)
129
135
 
130
- async def run_shell_mode(self, command: str | None = None, markdown: bool = True) -> bool:
136
+ async def run_shell_mode(self, command: str | None = None) -> bool:
131
137
  from kimi_cli.ui.shell import ShellApp, WelcomeInfoItem
132
138
 
133
139
  welcome_info = [
@@ -154,7 +160,7 @@ class KimiCLI:
154
160
  welcome_info.append(
155
161
  WelcomeInfoItem(
156
162
  name="Model",
157
- value=f"{self._soul.model} (from KIMI_MODEL_NAME)",
163
+ value=f"{self._soul.model_name} (from KIMI_MODEL_NAME)",
158
164
  level=WelcomeInfoItem.Level.WARN,
159
165
  )
160
166
  )
@@ -162,12 +168,12 @@ class KimiCLI:
162
168
  welcome_info.append(
163
169
  WelcomeInfoItem(
164
170
  name="Model",
165
- value=self._soul.model,
171
+ value=self._soul.model_name,
166
172
  level=WelcomeInfoItem.Level.INFO,
167
173
  )
168
174
  )
169
175
  with self._app_env():
170
- app = ShellApp(self._soul, welcome_info=welcome_info, markdown=markdown)
176
+ app = ShellApp(self._soul, welcome_info=welcome_info)
171
177
  return await app.run(command)
172
178
 
173
179
  async def run_print_mode(
@@ -193,3 +199,10 @@ class KimiCLI:
193
199
  with self._app_env():
194
200
  app = ACPServer(self._soul)
195
201
  return await app.run()
202
+
203
+ async def run_wire_server(self) -> bool:
204
+ from kimi_cli.ui.wire import WireServer
205
+
206
+ with self._app_env():
207
+ server = WireServer(self._soul)
208
+ return await server.run()
@@ -16,7 +16,7 @@ class Reload(Exception):
16
16
  pass
17
17
 
18
18
 
19
- UIMode = Literal["shell", "print", "acp"]
19
+ UIMode = Literal["shell", "print", "acp", "wire"]
20
20
  InputFormat = Literal["text", "stream-json"]
21
21
  OutputFormat = Literal["text", "stream-json"]
22
22
 
@@ -138,10 +138,10 @@ OutputFormat = Literal["text", "stream-json"]
138
138
  help="Automatically approve all actions. Default: no.",
139
139
  )
140
140
  @click.option(
141
- "--markdown/--no-markdown",
141
+ "--thinking",
142
142
  is_flag=True,
143
- default=True,
144
- help="Enable/disable markdown rendering in shell UI. Default: yes.",
143
+ default=False,
144
+ help="Enable thinking mode if supported. Default: no.",
145
145
  )
146
146
  def kimi(
147
147
  verbose: bool,
@@ -157,7 +157,7 @@ def kimi(
157
157
  mcp_config_file: list[Path],
158
158
  mcp_config: list[str],
159
159
  yolo: bool,
160
- markdown: bool,
160
+ thinking: bool,
161
161
  ):
162
162
  """Kimi, your next CLI agent."""
163
163
  from kimi_cli.app import KimiCLI
@@ -170,9 +170,12 @@ def kimi(
170
170
 
171
171
  echo: Callable[..., None] = click.echo if verbose else _noop_echo
172
172
 
173
+ if debug:
174
+ logger.enable("kosong")
173
175
  logger.add(
174
176
  get_share_dir() / "logs" / "kimi.log",
175
- level="DEBUG" if debug else "INFO",
177
+ # FIXME: configure level for different modules
178
+ level="TRACE" if debug else "INFO",
176
179
  rotation="06:00",
177
180
  retention="10 days",
178
181
  )
@@ -223,11 +226,12 @@ def kimi(
223
226
  stream=ui != "print", # use non-streaming mode only for print UI
224
227
  mcp_configs=mcp_configs,
225
228
  model_name=model_name,
229
+ thinking=thinking,
226
230
  agent_file=agent_file,
227
231
  )
228
232
  match ui:
229
233
  case "shell":
230
- return await instance.run_shell_mode(command, markdown=markdown)
234
+ return await instance.run_shell_mode(command)
231
235
  case "print":
232
236
  return await instance.run_print_mode(
233
237
  input_format or "text",
@@ -238,6 +242,10 @@ def kimi(
238
242
  if command is not None:
239
243
  logger.warning("ACP server ignores command argument")
240
244
  return await instance.run_acp_server()
245
+ case "wire":
246
+ if command is not None:
247
+ logger.warning("Wire server ignores command argument")
248
+ return await instance.run_wire_server()
241
249
 
242
250
  while True:
243
251
  try:
@@ -1,10 +1,11 @@
1
1
  import json
2
2
  from pathlib import Path
3
- from typing import Literal, Self
3
+ from typing import Self
4
4
 
5
5
  from pydantic import BaseModel, Field, SecretStr, ValidationError, field_serializer, model_validator
6
6
 
7
7
  from kimi_cli.exception import ConfigError
8
+ from kimi_cli.llm import ModelCapability, ProviderType
8
9
  from kimi_cli.share import get_share_dir
9
10
  from kimi_cli.utils.logging import logger
10
11
 
@@ -12,7 +13,7 @@ from kimi_cli.utils.logging import logger
12
13
  class LLMProvider(BaseModel):
13
14
  """LLM provider configuration."""
14
15
 
15
- type: Literal["kimi", "openai_legacy", "openai_responses", "_chaos"]
16
+ type: ProviderType
16
17
  """Provider type"""
17
18
  base_url: str
18
19
  """API base URL"""
@@ -26,9 +27,6 @@ class LLMProvider(BaseModel):
26
27
  return v.get_secret_value()
27
28
 
28
29
 
29
- LLMModelCapability = Literal["image_in"]
30
-
31
-
32
30
  class LLMModel(BaseModel):
33
31
  """LLM model configuration."""
34
32
 
@@ -38,7 +36,7 @@ class LLMModel(BaseModel):
38
36
  """Model name"""
39
37
  max_context_size: int
40
38
  """Maximum context size (unit: tokens)"""
41
- capabilities: set[LLMModelCapability] | None = None
39
+ capabilities: set[ModelCapability] | None = None
42
40
  """Model capabilities"""
43
41
 
44
42
 
@@ -1,29 +1,33 @@
1
1
  import os
2
- from typing import NamedTuple, cast, get_args
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Literal, cast, get_args
3
4
 
4
5
  from kosong.base.chat_provider import ChatProvider
5
6
  from pydantic import SecretStr
6
7
 
7
- from kimi_cli.config import LLMModel, LLMModelCapability, LLMProvider
8
8
  from kimi_cli.constant import USER_AGENT
9
9
 
10
+ if TYPE_CHECKING:
11
+ from kimi_cli.config import LLMModel, LLMProvider
10
12
 
11
- class LLM(NamedTuple):
13
+ type ProviderType = Literal["kimi", "openai_legacy", "openai_responses", "anthropic", "_chaos"]
14
+
15
+ type ModelCapability = Literal["image_in", "thinking"]
16
+ ALL_MODEL_CAPABILITIES: set[ModelCapability] = set(get_args(ModelCapability))
17
+
18
+
19
+ @dataclass(slots=True)
20
+ class LLM:
12
21
  chat_provider: ChatProvider
13
22
  max_context_size: int
14
- capabilities: set[LLMModelCapability]
15
- # TODO: these additional fields should be moved to ChatProvider
23
+ capabilities: set[ModelCapability]
16
24
 
17
25
  @property
18
26
  def model_name(self) -> str:
19
27
  return self.chat_provider.model_name
20
28
 
21
- @property
22
- def supports_image_in(self) -> bool:
23
- return "image_in" in self.capabilities
24
29
 
25
-
26
- def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> dict[str, str]:
30
+ def augment_provider_with_env_vars(provider: "LLMProvider", model: "LLMModel") -> dict[str, str]:
27
31
  """Override provider/model settings from environment variables.
28
32
 
29
33
  Returns:
@@ -48,9 +52,9 @@ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> di
48
52
  if capabilities := os.getenv("KIMI_MODEL_CAPABILITIES"):
49
53
  caps_lower = (cap.strip().lower() for cap in capabilities.split(",") if cap.strip())
50
54
  model.capabilities = set(
51
- cast(LLMModelCapability, cap)
55
+ cast(ModelCapability, cap)
52
56
  for cap in caps_lower
53
- if cap in get_args(LLMModelCapability)
57
+ if cap in get_args(ModelCapability)
54
58
  )
55
59
  applied["KIMI_MODEL_CAPABILITIES"] = capabilities
56
60
  case "openai_legacy" | "openai_responses":
@@ -65,8 +69,8 @@ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> di
65
69
 
66
70
 
67
71
  def create_llm(
68
- provider: LLMProvider,
69
- model: LLMModel,
72
+ provider: "LLMProvider",
73
+ model: "LLMModel",
70
74
  *,
71
75
  stream: bool = True,
72
76
  session_id: str | None = None,
@@ -88,7 +92,7 @@ def create_llm(
88
92
  if session_id:
89
93
  chat_provider = chat_provider.with_generation_kwargs(prompt_cache_key=session_id)
90
94
  case "openai_legacy":
91
- from kosong.chat_provider.openai_legacy import OpenAILegacy
95
+ from kosong.contrib.chat_provider.openai_legacy import OpenAILegacy
92
96
 
93
97
  chat_provider = OpenAILegacy(
94
98
  model=model.model,
@@ -97,7 +101,7 @@ def create_llm(
97
101
  stream=stream,
98
102
  )
99
103
  case "openai_responses":
100
- from kosong.chat_provider.openai_responses import OpenAIResponses
104
+ from kosong.contrib.chat_provider.openai_responses import OpenAIResponses
101
105
 
102
106
  chat_provider = OpenAIResponses(
103
107
  model=model.model,
@@ -105,6 +109,16 @@ def create_llm(
105
109
  api_key=provider.api_key.get_secret_value(),
106
110
  stream=stream,
107
111
  )
112
+ case "anthropic":
113
+ from kosong.contrib.chat_provider.anthropic import Anthropic
114
+
115
+ chat_provider = Anthropic(
116
+ model=model.model,
117
+ base_url=provider.base_url,
118
+ api_key=provider.api_key.get_secret_value(),
119
+ stream=stream,
120
+ default_max_tokens=50000,
121
+ )
108
122
  case "_chaos":
109
123
  from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
110
124
 
@@ -121,5 +135,15 @@ def create_llm(
121
135
  return LLM(
122
136
  chat_provider=chat_provider,
123
137
  max_context_size=model.max_context_size,
124
- capabilities=model.capabilities or set(),
138
+ capabilities=_derive_capabilities(provider, model),
125
139
  )
140
+
141
+
142
+ def _derive_capabilities(provider: "LLMProvider", model: "LLMModel") -> set[ModelCapability]:
143
+ capabilities = model.capabilities or set()
144
+ if provider.type != "kimi":
145
+ return capabilities
146
+
147
+ if model.model == "kimi-for-coding" or "thinking" in model.model:
148
+ capabilities.add("thinking")
149
+ return capabilities
@@ -23,7 +23,7 @@ class WorkDirMeta(BaseModel):
23
23
 
24
24
  @property
25
25
  def sessions_dir(self) -> Path:
26
- path = get_share_dir() / "sessions" / md5(self.path.encode()).hexdigest()
26
+ path = get_share_dir() / "sessions" / md5(self.path.encode(encoding="utf-8")).hexdigest()
27
27
  path.mkdir(parents=True, exist_ok=True)
28
28
  return path
29
29
 
@@ -2,15 +2,17 @@ import asyncio
2
2
  import contextlib
3
3
  from collections.abc import Callable, Coroutine
4
4
  from contextvars import ContextVar
5
- from typing import Any, NamedTuple, Protocol, runtime_checkable
5
+ from typing import TYPE_CHECKING, Any, NamedTuple, Protocol, runtime_checkable
6
6
 
7
7
  from kosong.base.message import ContentPart
8
8
 
9
- from kimi_cli.llm import LLM
10
9
  from kimi_cli.utils.logging import logger
11
10
  from kimi_cli.wire import Wire, WireUISide
12
11
  from kimi_cli.wire.message import WireMessage
13
12
 
13
+ if TYPE_CHECKING:
14
+ from kimi_cli.llm import LLM, ModelCapability
15
+
14
16
 
15
17
  class LLMNotSet(Exception):
16
18
  """Raised when the LLM is not set."""
@@ -21,12 +23,12 @@ class LLMNotSet(Exception):
21
23
  class LLMNotSupported(Exception):
22
24
  """Raised when the LLM does not have required capabilities."""
23
25
 
24
- def __init__(self, llm: LLM, capabilities: list[str]):
26
+ def __init__(self, llm: "LLM", capabilities: "list[ModelCapability]"):
25
27
  self.llm = llm
26
28
  self.capabilities = capabilities
27
29
  capabilities_str = "capability" if len(capabilities) == 1 else "capabilities"
28
30
  super().__init__(
29
- f"The LLM model '{llm.model_name}' does not support required {capabilities_str}: "
31
+ f"LLM model '{llm.model_name}' does not support required {capabilities_str}: "
30
32
  f"{', '.join(capabilities)}."
31
33
  )
32
34
 
@@ -54,8 +56,13 @@ class Soul(Protocol):
54
56
  ...
55
57
 
56
58
  @property
57
- def model(self) -> str:
58
- """The LLM model used by the soul. Empty string indicates no LLM configured."""
59
+ def model_name(self) -> str:
60
+ """The name of the LLM model used by the soul. Empty string indicates no LLM configured."""
61
+ ...
62
+
63
+ @property
64
+ def model_capabilities(self) -> "set[ModelCapability] | None":
65
+ """The capabilities of the LLM model used by the soul. None indicates no LLM configured."""
59
66
  ...
60
67
 
61
68
  @property
@@ -147,8 +154,8 @@ async def run_soul(
147
154
  pass
148
155
  except TimeoutError:
149
156
  logger.warning("UI loop timed out")
150
-
151
- _current_wire.reset(wire_token)
157
+ finally:
158
+ _current_wire.reset(wire_token)
152
159
 
153
160
 
154
161
  _current_wire = ContextVar[Wire | None]("current_wire", default=None)
@@ -13,6 +13,7 @@ from kimi_cli.soul.approval import Approval
13
13
  from kimi_cli.soul.denwarenji import DenwaRenji
14
14
  from kimi_cli.soul.runtime import BuiltinSystemPromptArgs, Runtime
15
15
  from kimi_cli.soul.toolset import CustomToolset
16
+ from kimi_cli.tools import SkipThisTool
16
17
  from kimi_cli.utils.logging import logger
17
18
 
18
19
 
@@ -99,7 +100,11 @@ def _load_tools(
99
100
  ) -> list[str]:
100
101
  bad_tools: list[str] = []
101
102
  for tool_path in tool_paths:
102
- tool = _load_tool(tool_path, dependencies)
103
+ try:
104
+ tool = _load_tool(tool_path, dependencies)
105
+ except SkipThisTool:
106
+ logger.info("Skipping tool: {tool_path}", tool_path=tool_path)
107
+ continue
103
108
  if tool:
104
109
  toolset += tool
105
110
  else:
@@ -6,6 +6,7 @@ from typing import TYPE_CHECKING
6
6
  import kosong
7
7
  import tenacity
8
8
  from kosong import StepResult
9
+ from kosong.base.chat_provider import ThinkingEffort
9
10
  from kosong.base.message import ContentPart, ImageURLPart, Message
10
11
  from kosong.chat_provider import (
11
12
  APIConnectionError,
@@ -16,6 +17,7 @@ from kosong.chat_provider import (
16
17
  from kosong.tooling import ToolResult
17
18
  from tenacity import RetryCallState, retry_if_exception, stop_after_attempt, wait_exponential_jitter
18
19
 
20
+ from kimi_cli.llm import ModelCapability
19
21
  from kimi_cli.soul import (
20
22
  LLMNotSet,
21
23
  LLMNotSupported,
@@ -71,6 +73,7 @@ class KimiSoul(Soul):
71
73
  self._reserved_tokens = RESERVED_TOKENS
72
74
  if self._runtime.llm is not None:
73
75
  assert self._reserved_tokens <= self._runtime.llm.max_context_size
76
+ self._thinking_effort: ThinkingEffort = "off"
74
77
 
75
78
  for tool in agent.toolset.tools:
76
79
  if tool.name == SendDMail_NAME:
@@ -84,9 +87,15 @@ class KimiSoul(Soul):
84
87
  return self._agent.name
85
88
 
86
89
  @property
87
- def model(self) -> str:
90
+ def model_name(self) -> str:
88
91
  return self._runtime.llm.chat_provider.model_name if self._runtime.llm else ""
89
92
 
93
+ @property
94
+ def model_capabilities(self) -> set[ModelCapability] | None:
95
+ if self._runtime.llm is None:
96
+ return None
97
+ return self._runtime.llm.capabilities
98
+
90
99
  @property
91
100
  def status(self) -> StatusSnapshot:
92
101
  return StatusSnapshot(context_usage=self._context_usage)
@@ -101,6 +110,25 @@ class KimiSoul(Soul):
101
110
  return self._context.token_count / self._runtime.llm.max_context_size
102
111
  return 0.0
103
112
 
113
+ @property
114
+ def thinking(self) -> bool:
115
+ """Whether thinking mode is enabled."""
116
+ return self._thinking_effort != "off"
117
+
118
+ def set_thinking(self, enabled: bool) -> None:
119
+ """
120
+ Enable/disable thinking mode for the soul.
121
+
122
+ Raises:
123
+ LLMNotSet: When the LLM is not set.
124
+ LLMNotSupported: When the LLM does not support thinking mode.
125
+ """
126
+ if self._runtime.llm is None:
127
+ raise LLMNotSet()
128
+ if enabled and "thinking" not in self._runtime.llm.capabilities:
129
+ raise LLMNotSupported(self._runtime.llm, ["thinking"])
130
+ self._thinking_effort = "high" if enabled else "off"
131
+
104
132
  async def _checkpoint(self):
105
133
  await self._context.checkpoint(self._checkpoint_with_user_message)
106
134
 
@@ -111,7 +139,7 @@ class KimiSoul(Soul):
111
139
  if (
112
140
  isinstance(user_input, list)
113
141
  and any(isinstance(part, ImageURLPart) for part in user_input)
114
- and not self._runtime.llm.supports_image_in
142
+ and "image_in" not in self._runtime.llm.capabilities
115
143
  ):
116
144
  raise LLMNotSupported(self._runtime.llm, ["image_in"])
117
145
 
@@ -187,7 +215,7 @@ class KimiSoul(Soul):
187
215
  async def _kosong_step_with_retry() -> StepResult:
188
216
  # run an LLM step (may be interrupted)
189
217
  return await kosong.step(
190
- chat_provider,
218
+ chat_provider.with_thinking(self._thinking_effort),
191
219
  self._agent.system_prompt,
192
220
  self._agent.toolset,
193
221
  self._context.history,
@@ -2,7 +2,8 @@ from contextvars import ContextVar
2
2
  from typing import override
3
3
 
4
4
  from kosong.base.message import ToolCall
5
- from kosong.tooling import HandleResult, SimpleToolset
5
+ from kosong.tooling import HandleResult
6
+ from kosong.tooling.simple import SimpleToolset
6
7
 
7
8
  current_tool_call = ContextVar[ToolCall | None]("current_tool_call", default=None)
8
9