kimi-cli 0.43__py3-none-any.whl → 0.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kimi-cli might be problematic. Click here for more details.

kimi_cli/CHANGELOG.md CHANGED
@@ -9,6 +9,28 @@ Internal builds may append content to the Unreleased section.
9
9
  Only write entries that are worth mentioning to users.
10
10
  -->
11
11
 
12
+ ## [0.45] - 2025-10-31
13
+
14
+ ### Added
15
+
16
+ - Allow `KIMI_MODEL_CAPABILITIES` environment variable to override model capabilities
17
+ - Add `--no-markdown` option to disable markdown rendering
18
+ - Support `openai_responses` LLM provider type
19
+
20
+ ### Fixed
21
+
22
+ - Fix crash when continuing a session
23
+
24
+ ## [0.44] - 2025-10-30
25
+
26
+ ### Changed
27
+
28
+ - Improve startup time
29
+
30
+ ### Fixed
31
+
32
+ - Fix potential invalid bytes in user input
33
+
12
34
  ## [0.43] - 2025-10-30
13
35
 
14
36
  ### Added
kimi_cli/__init__.py CHANGED
@@ -1,192 +0,0 @@
1
- import contextlib
2
- import os
3
- import warnings
4
- from collections.abc import Generator
5
- from pathlib import Path
6
- from typing import Any
7
-
8
- from pydantic import SecretStr
9
-
10
- from kimi_cli.agentspec import DEFAULT_AGENT_FILE
11
- from kimi_cli.config import LLMModel, LLMProvider, load_config
12
- from kimi_cli.llm import augment_provider_with_env_vars, create_llm
13
- from kimi_cli.session import Session
14
- from kimi_cli.soul.agent import load_agent
15
- from kimi_cli.soul.context import Context
16
- from kimi_cli.soul.kimisoul import KimiSoul
17
- from kimi_cli.soul.runtime import Runtime
18
- from kimi_cli.ui.acp import ACPServer
19
- from kimi_cli.ui.print import InputFormat, OutputFormat, PrintApp
20
- from kimi_cli.ui.shell import ShellApp, WelcomeInfoItem
21
- from kimi_cli.utils.logging import StreamToLogger, logger
22
-
23
-
24
- class KimiCLI:
25
- @staticmethod
26
- async def create(
27
- session: Session,
28
- *,
29
- yolo: bool = False,
30
- stream: bool = True, # TODO: remove this when we have a correct print mode impl
31
- mcp_configs: list[dict[str, Any]] | None = None,
32
- config_file: Path | None = None,
33
- model_name: str | None = None,
34
- agent_file: Path | None = None,
35
- ) -> "KimiCLI":
36
- """
37
- Create a KimiCLI instance.
38
-
39
- Args:
40
- session (Session): A session created by `Session.create` or `Session.continue_`.
41
- yolo (bool, optional): Approve all actions without confirmation. Defaults to False.
42
- stream (bool, optional): Use stream mode when calling LLM API. Defaults to True.
43
- config_file (Path | None, optional): Path to the configuration file. Defaults to None.
44
- model_name (str | None, optional): Name of the model to use. Defaults to None.
45
- agent_file (Path | None, optional): Path to the agent file. Defaults to None.
46
-
47
- Raises:
48
- FileNotFoundError: When the agent file is not found.
49
- ConfigError(KimiCLIException): When the configuration is invalid.
50
- AgentSpecError(KimiCLIException): When the agent specification is invalid.
51
- """
52
- config = load_config(config_file)
53
- logger.info("Loaded config: {config}", config=config)
54
-
55
- model: LLMModel | None = None
56
- provider: LLMProvider | None = None
57
-
58
- # try to use config file
59
- if not model_name and config.default_model:
60
- # no --model specified && default model is set in config
61
- model = config.models[config.default_model]
62
- provider = config.providers[model.provider]
63
- if model_name and model_name in config.models:
64
- # --model specified && model is set in config
65
- model = config.models[model_name]
66
- provider = config.providers[model.provider]
67
-
68
- if not model:
69
- model = LLMModel(provider="", model="", max_context_size=100_000)
70
- provider = LLMProvider(type="kimi", base_url="", api_key=SecretStr(""))
71
-
72
- # try overwrite with environment variables
73
- assert provider is not None
74
- assert model is not None
75
- env_overrides = augment_provider_with_env_vars(provider, model)
76
-
77
- if not provider.base_url or not model.model:
78
- llm = None
79
- else:
80
- logger.info("Using LLM provider: {provider}", provider=provider)
81
- logger.info("Using LLM model: {model}", model=model)
82
- llm = create_llm(provider, model, stream=stream, session_id=session.id)
83
-
84
- welcome_info = [
85
- WelcomeInfoItem(name="Directory", value=str(session.work_dir)),
86
- WelcomeInfoItem(name="Session", value=session.id),
87
- ]
88
- if base_url := env_overrides.get("KIMI_BASE_URL"):
89
- welcome_info.append(
90
- WelcomeInfoItem(
91
- name="API URL",
92
- value=f"{base_url} (from KIMI_BASE_URL)",
93
- level=WelcomeInfoItem.Level.WARN,
94
- )
95
- )
96
- if not llm:
97
- welcome_info.append(
98
- WelcomeInfoItem(
99
- name="Model",
100
- value="not set, send /setup to configure",
101
- level=WelcomeInfoItem.Level.WARN,
102
- )
103
- )
104
- elif "KIMI_MODEL_NAME" in env_overrides:
105
- welcome_info.append(
106
- WelcomeInfoItem(
107
- name="Model",
108
- value=f"{model.model} (from KIMI_MODEL_NAME)",
109
- level=WelcomeInfoItem.Level.WARN,
110
- )
111
- )
112
- else:
113
- welcome_info.append(
114
- WelcomeInfoItem(
115
- name="Model",
116
- value=model.model,
117
- level=WelcomeInfoItem.Level.INFO,
118
- )
119
- )
120
-
121
- runtime = await Runtime.create(config, llm, session, yolo)
122
-
123
- if agent_file is None:
124
- agent_file = DEFAULT_AGENT_FILE
125
- agent = await load_agent(agent_file, runtime, mcp_configs=mcp_configs or [])
126
-
127
- context = Context(session.history_file)
128
- await context.restore()
129
-
130
- soul = KimiSoul(
131
- agent,
132
- runtime,
133
- context=context,
134
- )
135
- return KimiCLI(soul, session, welcome_info)
136
-
137
- def __init__(
138
- self,
139
- soul: KimiSoul,
140
- session: Session,
141
- welcome_info: list[WelcomeInfoItem],
142
- ) -> None:
143
- self._soul = soul
144
- self._session = session
145
- self._welcome_info = welcome_info
146
-
147
- @property
148
- def soul(self) -> KimiSoul:
149
- """Get the KimiSoul instance."""
150
- return self._soul
151
-
152
- @property
153
- def session(self) -> Session:
154
- """Get the Session instance."""
155
- return self._session
156
-
157
- @contextlib.contextmanager
158
- def _app_env(self) -> Generator[None]:
159
- original_cwd = Path.cwd()
160
- os.chdir(self._session.work_dir)
161
- try:
162
- # to ignore possible warnings from dateparser
163
- warnings.filterwarnings("ignore", category=DeprecationWarning)
164
- with contextlib.redirect_stderr(StreamToLogger()):
165
- yield
166
- finally:
167
- os.chdir(original_cwd)
168
-
169
- async def run_shell_mode(self, command: str | None = None) -> bool:
170
- with self._app_env():
171
- app = ShellApp(self._soul, welcome_info=self._welcome_info)
172
- return await app.run(command)
173
-
174
- async def run_print_mode(
175
- self,
176
- input_format: InputFormat,
177
- output_format: OutputFormat,
178
- command: str | None = None,
179
- ) -> bool:
180
- with self._app_env():
181
- app = PrintApp(
182
- self._soul,
183
- input_format,
184
- output_format,
185
- self._session.history_file,
186
- )
187
- return await app.run(command)
188
-
189
- async def run_acp_server(self) -> bool:
190
- with self._app_env():
191
- app = ACPServer(self._soul)
192
- return await app.run()
kimi_cli/app.py ADDED
@@ -0,0 +1,195 @@
1
+ import contextlib
2
+ import os
3
+ import warnings
4
+ from collections.abc import Generator
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ from pydantic import SecretStr
9
+
10
+ from kimi_cli.agentspec import DEFAULT_AGENT_FILE
11
+ from kimi_cli.cli import InputFormat, OutputFormat
12
+ from kimi_cli.config import LLMModel, LLMProvider, load_config
13
+ from kimi_cli.llm import augment_provider_with_env_vars, create_llm
14
+ from kimi_cli.session import Session
15
+ from kimi_cli.soul.agent import load_agent
16
+ from kimi_cli.soul.context import Context
17
+ from kimi_cli.soul.kimisoul import KimiSoul
18
+ from kimi_cli.soul.runtime import Runtime
19
+ from kimi_cli.utils.logging import StreamToLogger, logger
20
+
21
+
22
+ class KimiCLI:
23
+ @staticmethod
24
+ async def create(
25
+ session: Session,
26
+ *,
27
+ yolo: bool = False,
28
+ stream: bool = True, # TODO: remove this when we have a correct print mode impl
29
+ mcp_configs: list[dict[str, Any]] | None = None,
30
+ config_file: Path | None = None,
31
+ model_name: str | None = None,
32
+ agent_file: Path | None = None,
33
+ ) -> "KimiCLI":
34
+ """
35
+ Create a KimiCLI instance.
36
+
37
+ Args:
38
+ session (Session): A session created by `Session.create` or `Session.continue_`.
39
+ yolo (bool, optional): Approve all actions without confirmation. Defaults to False.
40
+ stream (bool, optional): Use stream mode when calling LLM API. Defaults to True.
41
+ config_file (Path | None, optional): Path to the configuration file. Defaults to None.
42
+ model_name (str | None, optional): Name of the model to use. Defaults to None.
43
+ agent_file (Path | None, optional): Path to the agent file. Defaults to None.
44
+
45
+ Raises:
46
+ FileNotFoundError: When the agent file is not found.
47
+ ConfigError(KimiCLIException): When the configuration is invalid.
48
+ AgentSpecError(KimiCLIException): When the agent specification is invalid.
49
+ """
50
+ config = load_config(config_file)
51
+ logger.info("Loaded config: {config}", config=config)
52
+
53
+ model: LLMModel | None = None
54
+ provider: LLMProvider | None = None
55
+
56
+ # try to use config file
57
+ if not model_name and config.default_model:
58
+ # no --model specified && default model is set in config
59
+ model = config.models[config.default_model]
60
+ provider = config.providers[model.provider]
61
+ if model_name and model_name in config.models:
62
+ # --model specified && model is set in config
63
+ model = config.models[model_name]
64
+ provider = config.providers[model.provider]
65
+
66
+ if not model:
67
+ model = LLMModel(provider="", model="", max_context_size=100_000)
68
+ provider = LLMProvider(type="kimi", base_url="", api_key=SecretStr(""))
69
+
70
+ # try overwrite with environment variables
71
+ assert provider is not None
72
+ assert model is not None
73
+ env_overrides = augment_provider_with_env_vars(provider, model)
74
+
75
+ if not provider.base_url or not model.model:
76
+ llm = None
77
+ else:
78
+ logger.info("Using LLM provider: {provider}", provider=provider)
79
+ logger.info("Using LLM model: {model}", model=model)
80
+ llm = create_llm(provider, model, stream=stream, session_id=session.id)
81
+
82
+ runtime = await Runtime.create(config, llm, session, yolo)
83
+
84
+ if agent_file is None:
85
+ agent_file = DEFAULT_AGENT_FILE
86
+ agent = await load_agent(agent_file, runtime, mcp_configs=mcp_configs or [])
87
+
88
+ context = Context(session.history_file)
89
+ await context.restore()
90
+
91
+ soul = KimiSoul(
92
+ agent,
93
+ runtime,
94
+ context=context,
95
+ )
96
+ return KimiCLI(soul, runtime, env_overrides)
97
+
98
+ def __init__(
99
+ self,
100
+ _soul: KimiSoul,
101
+ _runtime: Runtime,
102
+ _env_overrides: dict[str, str],
103
+ ) -> None:
104
+ self._soul = _soul
105
+ self._runtime = _runtime
106
+ self._env_overrides = _env_overrides
107
+
108
+ @property
109
+ def soul(self) -> KimiSoul:
110
+ """Get the KimiSoul instance."""
111
+ return self._soul
112
+
113
+ @property
114
+ def session(self) -> Session:
115
+ """Get the Session instance."""
116
+ return self._runtime.session
117
+
118
+ @contextlib.contextmanager
119
+ def _app_env(self) -> Generator[None]:
120
+ original_cwd = Path.cwd()
121
+ os.chdir(self._runtime.session.work_dir)
122
+ try:
123
+ # to ignore possible warnings from dateparser
124
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
125
+ with contextlib.redirect_stderr(StreamToLogger()):
126
+ yield
127
+ finally:
128
+ os.chdir(original_cwd)
129
+
130
+ async def run_shell_mode(self, command: str | None = None, markdown: bool = True) -> bool:
131
+ from kimi_cli.ui.shell import ShellApp, WelcomeInfoItem
132
+
133
+ welcome_info = [
134
+ WelcomeInfoItem(name="Directory", value=str(self._runtime.session.work_dir)),
135
+ WelcomeInfoItem(name="Session", value=self._runtime.session.id),
136
+ ]
137
+ if base_url := self._env_overrides.get("KIMI_BASE_URL"):
138
+ welcome_info.append(
139
+ WelcomeInfoItem(
140
+ name="API URL",
141
+ value=f"{base_url} (from KIMI_BASE_URL)",
142
+ level=WelcomeInfoItem.Level.WARN,
143
+ )
144
+ )
145
+ if not self._runtime.llm:
146
+ welcome_info.append(
147
+ WelcomeInfoItem(
148
+ name="Model",
149
+ value="not set, send /setup to configure",
150
+ level=WelcomeInfoItem.Level.WARN,
151
+ )
152
+ )
153
+ elif "KIMI_MODEL_NAME" in self._env_overrides:
154
+ welcome_info.append(
155
+ WelcomeInfoItem(
156
+ name="Model",
157
+ value=f"{self._soul.model} (from KIMI_MODEL_NAME)",
158
+ level=WelcomeInfoItem.Level.WARN,
159
+ )
160
+ )
161
+ else:
162
+ welcome_info.append(
163
+ WelcomeInfoItem(
164
+ name="Model",
165
+ value=self._soul.model,
166
+ level=WelcomeInfoItem.Level.INFO,
167
+ )
168
+ )
169
+ with self._app_env():
170
+ app = ShellApp(self._soul, welcome_info=welcome_info, markdown=markdown)
171
+ return await app.run(command)
172
+
173
+ async def run_print_mode(
174
+ self,
175
+ input_format: InputFormat,
176
+ output_format: OutputFormat,
177
+ command: str | None = None,
178
+ ) -> bool:
179
+ from kimi_cli.ui.print import PrintApp
180
+
181
+ with self._app_env():
182
+ app = PrintApp(
183
+ self._soul,
184
+ input_format,
185
+ output_format,
186
+ self._runtime.session.history_file,
187
+ )
188
+ return await app.run(command)
189
+
190
+ async def run_acp_server(self) -> bool:
191
+ from kimi_cli.ui.acp import ACPServer
192
+
193
+ with self._app_env():
194
+ app = ACPServer(self._soul)
195
+ return await app.run()
kimi_cli/cli.py CHANGED
@@ -7,12 +7,7 @@ from typing import Any, Literal, get_args
7
7
 
8
8
  import click
9
9
 
10
- from kimi_cli import KimiCLI
11
10
  from kimi_cli.constant import VERSION
12
- from kimi_cli.session import Session
13
- from kimi_cli.share import get_share_dir
14
- from kimi_cli.ui.print import InputFormat, OutputFormat
15
- from kimi_cli.utils.logging import logger
16
11
 
17
12
 
18
13
  class Reload(Exception):
@@ -22,6 +17,8 @@ class Reload(Exception):
22
17
 
23
18
 
24
19
  UIMode = Literal["shell", "print", "acp"]
20
+ InputFormat = Literal["text", "stream-json"]
21
+ OutputFormat = Literal["text", "stream-json"]
25
22
 
26
23
 
27
24
  @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@@ -140,6 +137,12 @@ UIMode = Literal["shell", "print", "acp"]
140
137
  default=False,
141
138
  help="Automatically approve all actions. Default: no.",
142
139
  )
140
+ @click.option(
141
+ "--markdown/--no-markdown",
142
+ is_flag=True,
143
+ default=True,
144
+ help="Enable/disable markdown rendering in shell UI. Default: yes.",
145
+ )
143
146
  def kimi(
144
147
  verbose: bool,
145
148
  debug: bool,
@@ -154,8 +157,13 @@ def kimi(
154
157
  mcp_config_file: list[Path],
155
158
  mcp_config: list[str],
156
159
  yolo: bool,
160
+ markdown: bool,
157
161
  ):
158
162
  """Kimi, your next CLI agent."""
163
+ from kimi_cli.app import KimiCLI
164
+ from kimi_cli.session import Session
165
+ from kimi_cli.share import get_share_dir
166
+ from kimi_cli.utils.logging import logger
159
167
 
160
168
  def _noop_echo(*args: Any, **kwargs: Any):
161
169
  pass
@@ -219,7 +227,7 @@ def kimi(
219
227
  )
220
228
  match ui:
221
229
  case "shell":
222
- return await instance.run_shell_mode(command)
230
+ return await instance.run_shell_mode(command, markdown=markdown)
223
231
  case "print":
224
232
  return await instance.run_print_mode(
225
233
  input_format or "text",
kimi_cli/config.py CHANGED
@@ -12,7 +12,7 @@ from kimi_cli.utils.logging import logger
12
12
  class LLMProvider(BaseModel):
13
13
  """LLM provider configuration."""
14
14
 
15
- type: Literal["kimi", "openai_legacy", "_chaos"]
15
+ type: Literal["kimi", "openai_legacy", "openai_responses", "_chaos"]
16
16
  """Provider type"""
17
17
  base_url: str
18
18
  """API base URL"""
kimi_cli/llm.py CHANGED
@@ -1,10 +1,7 @@
1
1
  import os
2
- from typing import NamedTuple
2
+ from typing import NamedTuple, cast, get_args
3
3
 
4
4
  from kosong.base.chat_provider import ChatProvider
5
- from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
6
- from kosong.chat_provider.kimi import Kimi
7
- from kosong.chat_provider.openai_legacy import OpenAILegacy
8
5
  from pydantic import SecretStr
9
6
 
10
7
  from kimi_cli.config import LLMModel, LLMModelCapability, LLMProvider
@@ -44,11 +41,19 @@ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> di
44
41
  applied["KIMI_API_KEY"] = "******"
45
42
  if model_name := os.getenv("KIMI_MODEL_NAME"):
46
43
  model.model = model_name
47
- applied["KIMI_MODEL_NAME"] = model.model
44
+ applied["KIMI_MODEL_NAME"] = model_name
48
45
  if max_context_size := os.getenv("KIMI_MODEL_MAX_CONTEXT_SIZE"):
49
46
  model.max_context_size = int(max_context_size)
50
- applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = str(model.max_context_size)
51
- case "openai_legacy":
47
+ applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = max_context_size
48
+ if capabilities := os.getenv("KIMI_MODEL_CAPABILITIES"):
49
+ caps_lower = (cap.strip().lower() for cap in capabilities.split(",") if cap.strip())
50
+ model.capabilities = set(
51
+ cast(LLMModelCapability, cap)
52
+ for cap in caps_lower
53
+ if cap in get_args(LLMModelCapability)
54
+ )
55
+ applied["KIMI_MODEL_CAPABILITIES"] = capabilities
56
+ case "openai_legacy" | "openai_responses":
52
57
  if base_url := os.getenv("OPENAI_BASE_URL"):
53
58
  provider.base_url = base_url
54
59
  if api_key := os.getenv("OPENAI_API_KEY"):
@@ -68,6 +73,8 @@ def create_llm(
68
73
  ) -> LLM:
69
74
  match provider.type:
70
75
  case "kimi":
76
+ from kosong.chat_provider.kimi import Kimi
77
+
71
78
  chat_provider = Kimi(
72
79
  model=model.model,
73
80
  base_url=provider.base_url,
@@ -81,13 +88,26 @@ def create_llm(
81
88
  if session_id:
82
89
  chat_provider = chat_provider.with_generation_kwargs(prompt_cache_key=session_id)
83
90
  case "openai_legacy":
91
+ from kosong.chat_provider.openai_legacy import OpenAILegacy
92
+
84
93
  chat_provider = OpenAILegacy(
85
94
  model=model.model,
86
95
  base_url=provider.base_url,
87
96
  api_key=provider.api_key.get_secret_value(),
88
97
  stream=stream,
89
98
  )
99
+ case "openai_responses":
100
+ from kosong.chat_provider.openai_responses import OpenAIResponses
101
+
102
+ chat_provider = OpenAIResponses(
103
+ model=model.model,
104
+ base_url=provider.base_url,
105
+ api_key=provider.api_key.get_secret_value(),
106
+ stream=stream,
107
+ )
90
108
  case "_chaos":
109
+ from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
110
+
91
111
  chat_provider = ChaosChatProvider(
92
112
  model=model.model,
93
113
  base_url=provider.base_url,
kimi_cli/soul/agent.py CHANGED
@@ -4,7 +4,6 @@ import string
4
4
  from pathlib import Path
5
5
  from typing import Any, NamedTuple
6
6
 
7
- import fastmcp
8
7
  from kosong.tooling import CallableTool, CallableTool2, Toolset
9
8
 
10
9
  from kimi_cli.agentspec import ResolvedAgentSpec, load_agent_spec
@@ -14,7 +13,6 @@ from kimi_cli.soul.approval import Approval
14
13
  from kimi_cli.soul.denwarenji import DenwaRenji
15
14
  from kimi_cli.soul.runtime import BuiltinSystemPromptArgs, Runtime
16
15
  from kimi_cli.soul.toolset import CustomToolset
17
- from kimi_cli.tools.mcp import MCPTool
18
16
  from kimi_cli.utils.logging import logger
19
17
 
20
18
 
@@ -143,6 +141,10 @@ async def _load_mcp_tools(
143
141
  ValueError: If the MCP config is not valid.
144
142
  RuntimeError: If the MCP server cannot be connected.
145
143
  """
144
+ import fastmcp
145
+
146
+ from kimi_cli.tools.mcp import MCPTool
147
+
146
148
  for mcp_config in mcp_configs:
147
149
  logger.info("Loading MCP tools from: {mcp_config}", mcp_config=mcp_config)
148
150
  client = fastmcp.Client(mcp_config)
kimi_cli/soul/runtime.py CHANGED
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import subprocess
2
3
  import sys
3
4
  from datetime import datetime
@@ -75,9 +76,10 @@ class Runtime(NamedTuple):
75
76
  session: Session,
76
77
  yolo: bool,
77
78
  ) -> "Runtime":
78
- # FIXME: do these asynchronously
79
- ls_output = _list_work_dir(session.work_dir)
80
- agents_md = load_agents_md(session.work_dir) or ""
79
+ ls_output, agents_md = await asyncio.gather(
80
+ asyncio.to_thread(_list_work_dir, session.work_dir),
81
+ asyncio.to_thread(load_agents_md, session.work_dir),
82
+ )
81
83
 
82
84
  return Runtime(
83
85
  config=config,
@@ -87,7 +89,7 @@ class Runtime(NamedTuple):
87
89
  KIMI_NOW=datetime.now().astimezone().isoformat(),
88
90
  KIMI_WORK_DIR=session.work_dir,
89
91
  KIMI_WORK_DIR_LS=ls_output,
90
- KIMI_AGENTS_MD=agents_md,
92
+ KIMI_AGENTS_MD=agents_md or "",
91
93
  ),
92
94
  denwa_renji=DenwaRenji(),
93
95
  approval=Approval(yolo=yolo),
@@ -3,13 +3,13 @@ import json
3
3
  import sys
4
4
  from functools import partial
5
5
  from pathlib import Path
6
- from typing import Literal
7
6
 
8
7
  import aiofiles
9
8
  from kosong.base.message import Message
10
9
  from kosong.chat_provider import ChatProviderError
11
10
  from rich import print
12
11
 
12
+ from kimi_cli.cli import InputFormat, OutputFormat
13
13
  from kimi_cli.soul import LLMNotSet, MaxStepsReached, RunCancelled, Soul, run_soul
14
14
  from kimi_cli.utils.logging import logger
15
15
  from kimi_cli.utils.message import message_extract_text
@@ -17,9 +17,6 @@ from kimi_cli.utils.signals import install_sigint_handler
17
17
  from kimi_cli.wire import WireUISide
18
18
  from kimi_cli.wire.message import StepInterrupted
19
19
 
20
- InputFormat = Literal["text", "stream-json"]
21
- OutputFormat = Literal["text", "stream-json"]
22
-
23
20
 
24
21
  class PrintApp:
25
22
  """
@@ -24,10 +24,16 @@ from kimi_cli.utils.signals import install_sigint_handler
24
24
 
25
25
 
26
26
  class ShellApp:
27
- def __init__(self, soul: Soul, welcome_info: list["WelcomeInfoItem"] | None = None):
27
+ def __init__(
28
+ self,
29
+ soul: Soul,
30
+ welcome_info: list["WelcomeInfoItem"] | None = None,
31
+ markdown: bool = True,
32
+ ):
28
33
  self.soul = soul
29
34
  self._welcome_info = list(welcome_info or [])
30
35
  self._background_tasks: set[asyncio.Task[Any]] = set()
36
+ self._markdown = markdown
31
37
 
32
38
  async def run(self, command: str | None = None) -> bool:
33
39
  if command is not None:
@@ -168,7 +174,10 @@ class ShellApp:
168
174
  self.soul,
169
175
  user_input,
170
176
  lambda wire: visualize(
171
- wire, initial_status=self.soul.status, cancel_event=cancel_event
177
+ wire,
178
+ initial_status=self.soul.status,
179
+ cancel_event=cancel_event,
180
+ markdown=self._markdown,
172
181
  ),
173
182
  cancel_event,
174
183
  )
@@ -1,5 +1,6 @@
1
1
  import asyncio
2
2
  from collections import deque
3
+ from typing import Literal
3
4
 
4
5
  import streamingjson
5
6
  from kosong.base.message import ToolCall, ToolCallPart
@@ -129,6 +130,7 @@ class StepLiveView:
129
130
  def __init__(self, status: StatusSnapshot, cancel_event: asyncio.Event | None = None):
130
131
  # message content
131
132
  self._line_buffer = Text("")
133
+ self._last_text_mode: Literal["text", "think", ""] = ""
132
134
 
133
135
  # tool call
134
136
  self._tool_calls: dict[str, _ToolCallDisplay] = {}
@@ -187,7 +189,21 @@ class StepLiveView:
187
189
  """
188
190
  console.print(renderable)
189
191
 
190
- def append_text(self, text: str):
192
+ def append_text(self, text: str, mode: Literal["text", "think"] = "text"):
193
+ if not text:
194
+ # Ignore empty message
195
+ return
196
+ if self._last_text_mode != mode:
197
+ if self._line_buffer:
198
+ self._push_out(self._line_buffer)
199
+ self._push_out("") # Add extra line between different modes
200
+ self._line_buffer.plain = ""
201
+ self._last_text_mode = mode
202
+ match mode:
203
+ case "text":
204
+ self._line_buffer.style = ""
205
+ case "think":
206
+ self._line_buffer.style = "grey50 italic"
191
207
  lines = text.split("\n")
192
208
  prev_is_empty = not self._line_buffer
193
209
  for line in lines[:-1]:
@@ -313,7 +329,14 @@ class StepLiveViewWithMarkdown(StepLiveView):
313
329
  self._buffer_status_active = False
314
330
  self._buffer_status_obj: Status | None = None
315
331
 
316
- def append_text(self, text: str):
332
+ def append_text(self, text: str, mode: Literal["text", "think"] = "text"):
333
+ if not text:
334
+ # Ignore empty message
335
+ return
336
+ if self._last_text_mode != mode:
337
+ if self._flush_markdown():
338
+ self._push_out("") # Add extra line between different modes
339
+ self._last_text_mode = mode
317
340
  if not self._pending_markdown_parts:
318
341
  self._show_thinking_status()
319
342
  self._pending_markdown_parts.append(text)
@@ -334,14 +357,22 @@ class StepLiveViewWithMarkdown(StepLiveView):
334
357
  self._flush_markdown()
335
358
  return super().__exit__(exc_type, exc_value, traceback)
336
359
 
337
- def _flush_markdown(self):
360
+ def _flush_markdown(self) -> bool:
338
361
  self._hide_thinking_status()
339
362
  if not self._pending_markdown_parts:
340
- return
363
+ return False
341
364
  markdown_text = "".join(self._pending_markdown_parts)
342
365
  self._pending_markdown_parts.clear()
343
366
  if markdown_text.strip():
344
- self._push_out(_LeftAlignedMarkdown(markdown_text, justify="left"))
367
+ self._push_out(
368
+ _LeftAlignedMarkdown(
369
+ markdown_text,
370
+ justify="left",
371
+ style="grey50 italic" if self._last_text_mode == "think" else "none",
372
+ )
373
+ )
374
+ return True
375
+ return False
345
376
 
346
377
  def _show_thinking_status(self):
347
378
  if self._buffer_status_active:
@@ -572,6 +572,7 @@ class CustomPromptSession:
572
572
  async def prompt(self) -> UserInput:
573
573
  with patch_stdout():
574
574
  command = str(await self._session.prompt_async()).strip()
575
+ command = command.replace("\x00", "") # just in case null bytes are somehow inserted
575
576
  self._append_history_entry(command)
576
577
 
577
578
  # Parse rich content parts
@@ -26,7 +26,7 @@ class _Platform(NamedTuple):
26
26
  _PLATFORMS = [
27
27
  _Platform(
28
28
  id="kimi-for-coding",
29
- name="Kimi For Coding (CN)",
29
+ name="Kimi For Coding",
30
30
  base_url="https://api.kimi.com/coding/v1",
31
31
  search_url="https://api.kimi.com/coding/v1/search",
32
32
  ),
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  from contextlib import asynccontextmanager, suppress
3
3
 
4
- from kosong.base.message import ContentPart, TextPart, ToolCall, ToolCallPart
4
+ from kosong.base.message import ContentPart, TextPart, ThinkPart, ToolCall, ToolCallPart
5
5
  from kosong.tooling import ToolResult
6
6
 
7
7
  from kimi_cli.soul import StatusSnapshot
@@ -42,6 +42,7 @@ async def visualize(
42
42
  *,
43
43
  initial_status: StatusSnapshot,
44
44
  cancel_event: asyncio.Event | None = None,
45
+ markdown: bool = True,
45
46
  ):
46
47
  """
47
48
  A loop to consume agent events and visualize the agent behavior.
@@ -60,7 +61,8 @@ async def visualize(
60
61
  while True:
61
62
  # TODO: Maybe we can always have a StepLiveView here.
62
63
  # No need to recreate for each step.
63
- with StepLiveViewWithMarkdown(latest_status, cancel_event) as step:
64
+ LiveView = StepLiveViewWithMarkdown if markdown else StepLiveView
65
+ with LiveView(latest_status, cancel_event) as step:
64
66
  async with _keyboard_listener(step):
65
67
  # spin the moon at the beginning of each step
66
68
  with console.status("", spinner="moon"):
@@ -78,7 +80,9 @@ async def visualize(
78
80
  while True:
79
81
  match msg:
80
82
  case TextPart(text=text):
81
- step.append_text(text)
83
+ step.append_text(text, mode="text")
84
+ case ThinkPart(think=think):
85
+ step.append_text(think, mode="think")
82
86
  case ContentPart():
83
87
  # TODO: support more content parts
84
88
  step.append_text(f"[{msg.__class__.__name__}]")
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: kimi-cli
3
- Version: 0.43
3
+ Version: 0.45
4
4
  Summary: Kimi CLI is your next CLI agent.
5
5
  Requires-Dist: agent-client-protocol==0.6.2
6
6
  Requires-Dist: aiofiles==25.1.0
7
7
  Requires-Dist: aiohttp==3.13.2
8
8
  Requires-Dist: click==8.3.0
9
- Requires-Dist: kosong==0.16.1
9
+ Requires-Dist: kosong==0.16.2
10
10
  Requires-Dist: loguru==0.7.3
11
11
  Requires-Dist: patch-ng==1.19.0
12
12
  Requires-Dist: prompt-toolkit==3.0.52
@@ -1,14 +1,15 @@
1
- kimi_cli/CHANGELOG.md,sha256=079d43c65c761148f411503690acb8ebf8b2e87b7a430778bd010752b22c5a05,8208
2
- kimi_cli/__init__.py,sha256=fcd01c57f5222328008dd982640f931a84eda4a2afea357aab79aad9a893aa61,6876
1
+ kimi_cli/CHANGELOG.md,sha256=0affdbd19f99b6dedb4cc2cac63eb2f9f942d8cfab44f01b445374764b572a10,8603
2
+ kimi_cli/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
3
3
  kimi_cli/agents/default/agent.yaml,sha256=6e5c51987ef5cfc0c4c4e34cc20b6fc975953ee219623fccae81a19155aab7b3,709
4
4
  kimi_cli/agents/default/sub.yaml,sha256=e0c1ea34fdb04b0d6dc635709f0f130aff25d7f9fb97e238470143c8145be251,634
5
5
  kimi_cli/agents/default/system.md,sha256=1d8fd4956b2442215396b5e9651771c9da8f9505ccbd3b6d5e91b1ac4ff35418,5001
6
6
  kimi_cli/agentspec.py,sha256=1148b5184ca610b2fb261ce365a63eb2fc9d09497330fe0ea4b2567fc98d5657,4307
7
- kimi_cli/cli.py,sha256=5db63b9299b7ede52c7d758e627579d9082e4466f34fbe5e8bcd0910e1e98e67,6658
8
- kimi_cli/config.py,sha256=1bdd90554d33c45848d9ed9499a7914aecdf51e9166abe1b211d95aaa05a6382,4992
7
+ kimi_cli/app.py,sha256=34fde6911d3b4fa6ada013f5ae714cdf354be479da9adb26c6460de0a9ba10c3,7053
8
+ kimi_cli/cli.py,sha256=5731276d9c13e58962c39cdf9db3a57bd875a7a09d560c64498454df62b717a0,6910
9
+ kimi_cli/config.py,sha256=5524c2dd7d3d10e7965901253c754185a4e4b1d30937598ff2b321a070c760be,5012
9
10
  kimi_cli/constant.py,sha256=78e25b9304cca7b6f70bb08bf0e1fee4b066297a05386e56dd6935ba42027cd9,110
10
11
  kimi_cli/exception.py,sha256=a3fec07566da7d2d34be8cc454fb825f34109bbde3cddf69e1ece6ab21b4b219,259
11
- kimi_cli/llm.py,sha256=0f62baadd7b6e685372f3d781fed04a2f7dcc5a96c2695e0c259eb071ab65165,3589
12
+ kimi_cli/llm.py,sha256=fdb4f944d6ccec9fe5b8bc1f80bb283df629663659fcdbffe405af2f61f73733,4442
12
13
  kimi_cli/metadata.py,sha256=9e9d4bc12ff26fc34e0e09d9068be989f2ff3c8b682ef453de69e442f8f672a1,1557
13
14
  kimi_cli/prompts/__init__.py,sha256=6dc5ed2d841f145c09550075f30853cdc51f00d2f5d9aa1097f8edea770536e7,174
14
15
  kimi_cli/prompts/compact.md,sha256=6655bd7d8270b24d8f97b51ef7c471cf71d686c56f8ec9a5cc9e47caa3aae87c,1877
@@ -17,14 +18,14 @@ kimi_cli/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991
17
18
  kimi_cli/session.py,sha256=c0623b0cec8ce311f1ddc7bb47e1e452499e13f816b7c3023342499a436dcfc5,2853
18
19
  kimi_cli/share.py,sha256=4292df7f44177419c45c772a933b5f36e2b7533f8cef9842629a438bc7856dc0,204
19
20
  kimi_cli/soul/__init__.py,sha256=9888a937570bf8bc6e49087a0694f121957210eaa14a7efb4042eb513e136caa,5499
20
- kimi_cli/soul/agent.py,sha256=637aa6c8c52fd334fb20f15db6f54ef47dc1731eec14efd54d42c29f53bbe13b,4833
21
+ kimi_cli/soul/agent.py,sha256=776e92f46cff2ac5ec0acd06acb445ddabbee05fcc358f9cde54d439e505d300,4843
21
22
  kimi_cli/soul/approval.py,sha256=48cd230dff81dfd70bd85f1ad2b99604d5569cf617a4c79c444f9772bbc89ce6,2552
22
23
  kimi_cli/soul/compaction.py,sha256=dab17979060fceeed4a7a344373833022dc7abac04282364f2a1b20e6edd4581,3558
23
24
  kimi_cli/soul/context.py,sha256=541759a65f8f87a3424a6da160ffb2043046e6f6b714124d94d82a77635df9bc,5855
24
25
  kimi_cli/soul/denwarenji.py,sha256=66b95f052a1fa844e2347972d34e1916a7be24d3e493701b451f5380b0375c9f,1384
25
26
  kimi_cli/soul/kimisoul.py,sha256=68c65fe0f97700832a42018ae00892d38dc79f14759265042f47b707dbc0c749,11822
26
27
  kimi_cli/soul/message.py,sha256=7a52a6d4d63ef1a3621d93d5ff86887baa7e67019bf2e9a08c374fc130b8d152,2541
27
- kimi_cli/soul/runtime.py,sha256=5104f501e39f082115ff1a09da797f0dc9ae7763b76719fc4af73420e4a98634,2629
28
+ kimi_cli/soul/runtime.py,sha256=9421a3ce6882587a95ecdf77b3d75f3b7ecab55cf68dc57e3e563b93e5a02e46,2690
28
29
  kimi_cli/soul/toolset.py,sha256=60166d89ef0efac690fa6866e88afe70fbe80ad862ba2524d70ddf657a730d14,744
29
30
  kimi_cli/tools/__init__.py,sha256=4d612402814eede7182e0a55e7dd21c4532b5dd44700dc744763a8308c2f74f8,3280
30
31
  kimi_cli/tools/bash/__init__.py,sha256=de21b19c714bda53f6c89e3348c4c82fb4278040130fed1d261b3ab203054e8c,3028
@@ -60,18 +61,18 @@ kimi_cli/tools/web/search.md,sha256=24049f9e90d37083e0fc78b8b2e3a5f6fadf09bea00f
60
61
  kimi_cli/tools/web/search.py,sha256=85de343b20bc9e58de8a09aba7c3aac619d6fc6d30a6a5b565108faeb4500faf,4517
61
62
  kimi_cli/ui/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
62
63
  kimi_cli/ui/acp/__init__.py,sha256=a35e17273e943168882865f90d922180c5a1f01de0d128c899ffcfe55a9db3c3,17120
63
- kimi_cli/ui/print/__init__.py,sha256=cea17f8db2b52eee6ccb632636f3e9588ae0193a7a2f723b42c515ed0a2d28aa,5614
64
- kimi_cli/ui/shell/__init__.py,sha256=aa30b512004be5ad9c8b8ab420bbdb574532dc90968067682dab58ff19e8b660,11289
64
+ kimi_cli/ui/print/__init__.py,sha256=ca402bec701a253acd6de5b57d59635ac0b05d4013cebc26877c5aa4aa2c27c7,5546
65
+ kimi_cli/ui/shell/__init__.py,sha256=c9644dbc27f899baea38aeee1b4b5ae3b82f84d0321f939477bd03f2ad7532b9,11471
65
66
  kimi_cli/ui/shell/console.py,sha256=bcbf7efd214cba3d2259f2a2c1842250cde96d49e4f9f1e0b60273cf1c366be3,842
66
67
  kimi_cli/ui/shell/debug.py,sha256=cd4e7259c83f099b5c6519713be5306580f30d3fa4944e07916d4468e960c9c7,5562
67
68
  kimi_cli/ui/shell/keyboard.py,sha256=23e5fbc4b6acda4c0f3b5297a0ae6eb09a90f4b5b37b2e95b7ce86a2da0d5dca,5160
68
- kimi_cli/ui/shell/liveview.py,sha256=f4e6ac37c446740b5c55cf37d5ebd327b5d41334d41d5e54ad8ad15445c1a492,14239
69
+ kimi_cli/ui/shell/liveview.py,sha256=2f780323dfc7d9b35491a17ccc2b4f6320176ae00c596b426ea7d7f513d3e42c,15478
69
70
  kimi_cli/ui/shell/metacmd.py,sha256=a0e52e9cbd8758c1ba13f025599341aa59dd5bc5e244840da2ff9bb71f952a20,7678
70
- kimi_cli/ui/shell/prompt.py,sha256=f33a27646ddafdd1afb6f16a63a6c164f39019893e96764243d9e58650ff8d9b,25710
71
+ kimi_cli/ui/shell/prompt.py,sha256=d18003de33da0c9711bf54496cb4d5f6f5aa6b2179b1618e4475b1f020208f6c,25808
71
72
  kimi_cli/ui/shell/replay.py,sha256=e54f58acebc46ad944e1a2cdf54d81559262d2cf8baf5da391ed903926f1ccf1,3767
72
- kimi_cli/ui/shell/setup.py,sha256=36be348c8cdbb908f1b896f7dfd93802df4d76eeb8572d39b81a34dfd5ee2a3c,5374
73
+ kimi_cli/ui/shell/setup.py,sha256=8fbf2935fc5b972d2c3946e8dc9f4a7e9d2953810b57c0fb6f22172abf3e6fb5,5369
73
74
  kimi_cli/ui/shell/update.py,sha256=56dcb0bd1da82b98c22bfdddca717a2805bd8ac3e93bf23fb3b508549c41fae8,7340
74
- kimi_cli/ui/shell/visualize.py,sha256=1abaa53cf78836f71b1f83085b6bfe3f86624951fa3297d5086a06bafa97f960,3884
75
+ kimi_cli/ui/shell/visualize.py,sha256=1ca7a1e766f96f108318640d48552111d42b433e4ddb551af869d331c074d950,4112
75
76
  kimi_cli/utils/aiohttp.py,sha256=f8f61e3beaf6439e949c33c3a10db3035bf88136e882b09c858ea92a4c888e00,245
76
77
  kimi_cli/utils/changelog.py,sha256=bfcf5a5a360b13648bb7a6abc83e427270caa502646b5acc950d62148510641c,3402
77
78
  kimi_cli/utils/logging.py,sha256=129298ac214ecd8d913c3431cc05d754f9c4c8c4042c458618bf9e8ddebdb763,399
@@ -82,7 +83,7 @@ kimi_cli/utils/signals.py,sha256=20e0d158a1043189d44815fe3624cd0bfe41e99620a18ac
82
83
  kimi_cli/utils/string.py,sha256=0d437d3633199df1051813af8b49a2f808c6525547310cc5c3d427710d2eae06,593
83
84
  kimi_cli/wire/__init__.py,sha256=9f1d7eb58f76885edaf76f769371c363ec801b46cada03883eeb3536fa2677f7,1896
84
85
  kimi_cli/wire/message.py,sha256=72222d3f3d7228a323dbba7b1084f35018104c58e4bb2aa51d0827984791841d,2398
85
- kimi_cli-0.43.dist-info/WHEEL,sha256=70ab3c2925fe316809860cb034f99ba13c4b49819b339959274aab755cc084a8,78
86
- kimi_cli-0.43.dist-info/entry_points.txt,sha256=97e051756296e9db3167f6dce61d6c88e58d170314a2d63d18c84c73a5c1333b,44
87
- kimi_cli-0.43.dist-info/METADATA,sha256=7319672d68a983b488b93d9b4e21bde14f5b4c23069bff3316df481f2856f61f,5217
88
- kimi_cli-0.43.dist-info/RECORD,,
86
+ kimi_cli-0.45.dist-info/WHEEL,sha256=70ab3c2925fe316809860cb034f99ba13c4b49819b339959274aab755cc084a8,78
87
+ kimi_cli-0.45.dist-info/entry_points.txt,sha256=97e051756296e9db3167f6dce61d6c88e58d170314a2d63d18c84c73a5c1333b,44
88
+ kimi_cli-0.45.dist-info/METADATA,sha256=bac942d1519d1a5f2650199cb6add467e99274392d3e9b839707915a1acd58d0,5217
89
+ kimi_cli-0.45.dist-info/RECORD,,