kimi-cli 0.42__py3-none-any.whl → 0.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kimi-cli might be problematic. Click here for more details.

kimi_cli/CHANGELOG.md CHANGED
@@ -9,6 +9,19 @@ Internal builds may append content to the Unreleased section.
9
9
  Only write entries that are worth mentioning to users.
10
10
  -->
11
11
 
12
+ ## [0.43] - 2025-10-30
13
+
14
+ ### Added
15
+
16
+ - Basic Windows support (experimental)
17
+ - Display warnings when base URL or API key is overridden in environment variables
18
+ - Support image input if the LLM model supports it
19
+ - Replay recent context history when continuing a session
20
+
21
+ ### Fixed
22
+
23
+ - Ensure new line after executing shell commands
24
+
12
25
  ## [0.42] - 2025-10-28
13
26
 
14
27
  ### Added
kimi_cli/__init__.py CHANGED
@@ -17,7 +17,7 @@ from kimi_cli.soul.kimisoul import KimiSoul
17
17
  from kimi_cli.soul.runtime import Runtime
18
18
  from kimi_cli.ui.acp import ACPServer
19
19
  from kimi_cli.ui.print import InputFormat, OutputFormat, PrintApp
20
- from kimi_cli.ui.shell import ShellApp
20
+ from kimi_cli.ui.shell import ShellApp, WelcomeInfoItem
21
21
  from kimi_cli.utils.logging import StreamToLogger, logger
22
22
 
23
23
 
@@ -72,7 +72,7 @@ class KimiCLI:
72
72
  # try overwrite with environment variables
73
73
  assert provider is not None
74
74
  assert model is not None
75
- augment_provider_with_env_vars(provider, model)
75
+ env_overrides = augment_provider_with_env_vars(provider, model)
76
76
 
77
77
  if not provider.base_url or not model.model:
78
78
  llm = None
@@ -81,6 +81,43 @@ class KimiCLI:
81
81
  logger.info("Using LLM model: {model}", model=model)
82
82
  llm = create_llm(provider, model, stream=stream, session_id=session.id)
83
83
 
84
+ welcome_info = [
85
+ WelcomeInfoItem(name="Directory", value=str(session.work_dir)),
86
+ WelcomeInfoItem(name="Session", value=session.id),
87
+ ]
88
+ if base_url := env_overrides.get("KIMI_BASE_URL"):
89
+ welcome_info.append(
90
+ WelcomeInfoItem(
91
+ name="API URL",
92
+ value=f"{base_url} (from KIMI_BASE_URL)",
93
+ level=WelcomeInfoItem.Level.WARN,
94
+ )
95
+ )
96
+ if not llm:
97
+ welcome_info.append(
98
+ WelcomeInfoItem(
99
+ name="Model",
100
+ value="not set, send /setup to configure",
101
+ level=WelcomeInfoItem.Level.WARN,
102
+ )
103
+ )
104
+ elif "KIMI_MODEL_NAME" in env_overrides:
105
+ welcome_info.append(
106
+ WelcomeInfoItem(
107
+ name="Model",
108
+ value=f"{model.model} (from KIMI_MODEL_NAME)",
109
+ level=WelcomeInfoItem.Level.WARN,
110
+ )
111
+ )
112
+ else:
113
+ welcome_info.append(
114
+ WelcomeInfoItem(
115
+ name="Model",
116
+ value=model.model,
117
+ level=WelcomeInfoItem.Level.INFO,
118
+ )
119
+ )
120
+
84
121
  runtime = await Runtime.create(config, llm, session, yolo)
85
122
 
86
123
  if agent_file is None:
@@ -95,11 +132,17 @@ class KimiCLI:
95
132
  runtime,
96
133
  context=context,
97
134
  )
98
- return KimiCLI(soul, session)
135
+ return KimiCLI(soul, session, welcome_info)
99
136
 
100
- def __init__(self, soul: KimiSoul, session: Session) -> None:
137
+ def __init__(
138
+ self,
139
+ soul: KimiSoul,
140
+ session: Session,
141
+ welcome_info: list[WelcomeInfoItem],
142
+ ) -> None:
101
143
  self._soul = soul
102
144
  self._session = session
145
+ self._welcome_info = welcome_info
103
146
 
104
147
  @property
105
148
  def soul(self) -> KimiSoul:
@@ -125,13 +168,7 @@ class KimiCLI:
125
168
 
126
169
  async def run_shell_mode(self, command: str | None = None) -> bool:
127
170
  with self._app_env():
128
- app = ShellApp(
129
- self._soul,
130
- welcome_info={
131
- "Directory": str(self._session.work_dir),
132
- "Session": self._session.id,
133
- },
134
- )
171
+ app = ShellApp(self._soul, welcome_info=self._welcome_info)
135
172
  return await app.run(command)
136
173
 
137
174
  async def run_print_mode(
kimi_cli/config.py CHANGED
@@ -18,7 +18,7 @@ class LLMProvider(BaseModel):
18
18
  """API base URL"""
19
19
  api_key: SecretStr
20
20
  """API key"""
21
- custom_headers: dict[str, str] = Field(default_factory=dict)
21
+ custom_headers: dict[str, str] | None = None
22
22
  """Custom headers to include in API requests"""
23
23
 
24
24
  @field_serializer("api_key", when_used="json")
@@ -26,6 +26,9 @@ class LLMProvider(BaseModel):
26
26
  return v.get_secret_value()
27
27
 
28
28
 
29
+ LLMModelCapability = Literal["image_in"]
30
+
31
+
29
32
  class LLMModel(BaseModel):
30
33
  """LLM model configuration."""
31
34
 
@@ -35,6 +38,8 @@ class LLMModel(BaseModel):
35
38
  """Model name"""
36
39
  max_context_size: int
37
40
  """Maximum context size (unit: tokens)"""
41
+ capabilities: set[LLMModelCapability] | None = None
42
+ """Model capabilities"""
38
43
 
39
44
 
40
45
  class LoopControl(BaseModel):
@@ -53,7 +58,7 @@ class MoonshotSearchConfig(BaseModel):
53
58
  """Base URL for Moonshot Search service."""
54
59
  api_key: SecretStr
55
60
  """API key for Moonshot Search service."""
56
- custom_headers: dict[str, str] = Field(default_factory=dict)
61
+ custom_headers: dict[str, str] | None = None
57
62
  """Custom headers to include in API requests."""
58
63
 
59
64
  @field_serializer("api_key", when_used="json")
kimi_cli/llm.py CHANGED
@@ -7,26 +7,47 @@ from kosong.chat_provider.kimi import Kimi
7
7
  from kosong.chat_provider.openai_legacy import OpenAILegacy
8
8
  from pydantic import SecretStr
9
9
 
10
- from kimi_cli.config import LLMModel, LLMProvider
10
+ from kimi_cli.config import LLMModel, LLMModelCapability, LLMProvider
11
11
  from kimi_cli.constant import USER_AGENT
12
12
 
13
13
 
14
14
  class LLM(NamedTuple):
15
15
  chat_provider: ChatProvider
16
16
  max_context_size: int
17
+ capabilities: set[LLMModelCapability]
18
+ # TODO: these additional fields should be moved to ChatProvider
17
19
 
20
+ @property
21
+ def model_name(self) -> str:
22
+ return self.chat_provider.model_name
23
+
24
+ @property
25
+ def supports_image_in(self) -> bool:
26
+ return "image_in" in self.capabilities
27
+
28
+
29
+ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> dict[str, str]:
30
+ """Override provider/model settings from environment variables.
31
+
32
+ Returns:
33
+ Mapping of environment variables that were applied.
34
+ """
35
+ applied: dict[str, str] = {}
18
36
 
19
- def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel):
20
37
  match provider.type:
21
38
  case "kimi":
22
39
  if base_url := os.getenv("KIMI_BASE_URL"):
23
40
  provider.base_url = base_url
41
+ applied["KIMI_BASE_URL"] = base_url
24
42
  if api_key := os.getenv("KIMI_API_KEY"):
25
43
  provider.api_key = SecretStr(api_key)
44
+ applied["KIMI_API_KEY"] = "******"
26
45
  if model_name := os.getenv("KIMI_MODEL_NAME"):
27
46
  model.model = model_name
47
+ applied["KIMI_MODEL_NAME"] = model.model
28
48
  if max_context_size := os.getenv("KIMI_MODEL_MAX_CONTEXT_SIZE"):
29
49
  model.max_context_size = int(max_context_size)
50
+ applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = str(model.max_context_size)
30
51
  case "openai_legacy":
31
52
  if base_url := os.getenv("OPENAI_BASE_URL"):
32
53
  provider.base_url = base_url
@@ -35,6 +56,8 @@ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel):
35
56
  case _:
36
57
  pass
37
58
 
59
+ return applied
60
+
38
61
 
39
62
  def create_llm(
40
63
  provider: LLMProvider,
@@ -52,7 +75,7 @@ def create_llm(
52
75
  stream=stream,
53
76
  default_headers={
54
77
  "User-Agent": USER_AGENT,
55
- **provider.custom_headers,
78
+ **(provider.custom_headers or {}),
56
79
  },
57
80
  )
58
81
  if session_id:
@@ -75,4 +98,8 @@ def create_llm(
75
98
  ),
76
99
  )
77
100
 
78
- return LLM(chat_provider=chat_provider, max_context_size=model.max_context_size)
101
+ return LLM(
102
+ chat_provider=chat_provider,
103
+ max_context_size=model.max_context_size,
104
+ capabilities=model.capabilities or set(),
105
+ )
kimi_cli/soul/__init__.py CHANGED
@@ -4,6 +4,9 @@ from collections.abc import Callable, Coroutine
4
4
  from contextvars import ContextVar
5
5
  from typing import Any, NamedTuple, Protocol, runtime_checkable
6
6
 
7
+ from kosong.base.message import ContentPart
8
+
9
+ from kimi_cli.llm import LLM
7
10
  from kimi_cli.utils.logging import logger
8
11
  from kimi_cli.wire import Wire, WireUISide
9
12
  from kimi_cli.wire.message import WireMessage
@@ -15,6 +18,19 @@ class LLMNotSet(Exception):
15
18
  pass
16
19
 
17
20
 
21
+ class LLMNotSupported(Exception):
22
+ """Raised when the LLM does not have required capabilities."""
23
+
24
+ def __init__(self, llm: LLM, capabilities: list[str]):
25
+ self.llm = llm
26
+ self.capabilities = capabilities
27
+ capabilities_str = "capability" if len(capabilities) == 1 else "capabilities"
28
+ super().__init__(
29
+ f"The LLM model '{llm.model_name}' does not support required {capabilities_str}: "
30
+ f"{', '.join(capabilities)}."
31
+ )
32
+
33
+
18
34
  class MaxStepsReached(Exception):
19
35
  """Raised when the maximum number of steps is reached."""
20
36
 
@@ -47,15 +63,16 @@ class Soul(Protocol):
47
63
  """The current status of the soul. The returned value is immutable."""
48
64
  ...
49
65
 
50
- async def run(self, user_input: str):
66
+ async def run(self, user_input: str | list[ContentPart]):
51
67
  """
52
68
  Run the agent with the given user input until the max steps or no more tool calls.
53
69
 
54
70
  Args:
55
- user_input (str): The user input to the agent.
71
+ user_input (str | list[ContentPart]): The user input to the agent.
56
72
 
57
73
  Raises:
58
74
  LLMNotSet: When the LLM is not set.
75
+ LLMNotSupported: When the LLM does not have required capabilities.
59
76
  ChatProviderError: When the LLM provider returns an error.
60
77
  MaxStepsReached: When the maximum number of steps is reached.
61
78
  asyncio.CancelledError: When the run is cancelled by user.
@@ -73,7 +90,7 @@ class RunCancelled(Exception):
73
90
 
74
91
  async def run_soul(
75
92
  soul: "Soul",
76
- user_input: str,
93
+ user_input: str | list[ContentPart],
77
94
  ui_loop_fn: UILoopFn,
78
95
  cancel_event: asyncio.Event,
79
96
  ) -> None:
@@ -85,6 +102,7 @@ async def run_soul(
85
102
 
86
103
  Raises:
87
104
  LLMNotSet: When the LLM is not set.
105
+ LLMNotSupported: When the LLM does not have required capabilities.
88
106
  ChatProviderError: When the LLM provider returns an error.
89
107
  MaxStepsReached: When the maximum number of steps is reached.
90
108
  RunCancelled: When the run is cancelled by the cancel event.
@@ -125,7 +143,7 @@ async def run_soul(
125
143
  try:
126
144
  await asyncio.wait_for(ui_task, timeout=0.5)
127
145
  except asyncio.QueueShutDown:
128
- # expected
146
+ logger.debug("UI loop shut down")
129
147
  pass
130
148
  except TimeoutError:
131
149
  logger.warning("UI loop timed out")
kimi_cli/soul/kimisoul.py CHANGED
@@ -6,7 +6,7 @@ from typing import TYPE_CHECKING
6
6
  import kosong
7
7
  import tenacity
8
8
  from kosong import StepResult
9
- from kosong.base.message import Message
9
+ from kosong.base.message import ContentPart, ImageURLPart, Message
10
10
  from kosong.chat_provider import (
11
11
  APIConnectionError,
12
12
  APIStatusError,
@@ -16,7 +16,14 @@ from kosong.chat_provider import (
16
16
  from kosong.tooling import ToolResult
17
17
  from tenacity import RetryCallState, retry_if_exception, stop_after_attempt, wait_exponential_jitter
18
18
 
19
- from kimi_cli.soul import LLMNotSet, MaxStepsReached, Soul, StatusSnapshot, wire_send
19
+ from kimi_cli.soul import (
20
+ LLMNotSet,
21
+ LLMNotSupported,
22
+ MaxStepsReached,
23
+ Soul,
24
+ StatusSnapshot,
25
+ wire_send,
26
+ )
20
27
  from kimi_cli.soul.agent import Agent
21
28
  from kimi_cli.soul.compaction import SimpleCompaction
22
29
  from kimi_cli.soul.context import Context
@@ -53,7 +60,6 @@ class KimiSoul(Soul):
53
60
  agent (Agent): The agent to run.
54
61
  runtime (Runtime): Runtime parameters and states.
55
62
  context (Context): The context of the agent.
56
- loop_control (LoopControl): The control parameters for the agent loop.
57
63
  """
58
64
  self._agent = agent
59
65
  self._runtime = runtime
@@ -85,6 +91,10 @@ class KimiSoul(Soul):
85
91
  def status(self) -> StatusSnapshot:
86
92
  return StatusSnapshot(context_usage=self._context_usage)
87
93
 
94
+ @property
95
+ def context(self) -> Context:
96
+ return self._context
97
+
88
98
  @property
89
99
  def _context_usage(self) -> float:
90
100
  if self._runtime.llm is not None:
@@ -94,10 +104,17 @@ class KimiSoul(Soul):
94
104
  async def _checkpoint(self):
95
105
  await self._context.checkpoint(self._checkpoint_with_user_message)
96
106
 
97
- async def run(self, user_input: str):
107
+ async def run(self, user_input: str | list[ContentPart]):
98
108
  if self._runtime.llm is None:
99
109
  raise LLMNotSet()
100
110
 
111
+ if (
112
+ isinstance(user_input, list)
113
+ and any(isinstance(part, ImageURLPart) for part in user_input)
114
+ and not self._runtime.llm.supports_image_in
115
+ ):
116
+ raise LLMNotSupported(self._runtime.llm, ["image_in"])
117
+
101
118
  await self._checkpoint() # this creates the checkpoint 0 on first run
102
119
  await self._context.append_message(Message(role="user", content=user_input))
103
120
  logger.debug("Appended user message to context")
kimi_cli/soul/message.py CHANGED
@@ -14,7 +14,7 @@ def tool_result_to_messages(tool_result: ToolResult) -> list[Message]:
14
14
  message = tool_result.result.message
15
15
  if isinstance(tool_result.result, ToolRuntimeError):
16
16
  message += "\nThis is an unexpected error and the tool is probably not working."
17
- content: list[ContentPart] = [system(message)]
17
+ content: list[ContentPart] = [system(f"ERROR: {message}")]
18
18
  if tool_result.result.output:
19
19
  content.append(TextPart(text=tool_result.result.output))
20
20
  return [
kimi_cli/soul/runtime.py CHANGED
@@ -59,7 +59,7 @@ def _list_work_dir(work_dir: Path) -> str:
59
59
 
60
60
 
61
61
  class Runtime(NamedTuple):
62
- """Agent globals."""
62
+ """Agent runtime."""
63
63
 
64
64
  config: Config
65
65
  llm: LLM | None
@@ -68,7 +68,8 @@ class Task(CallableTool2[Params]):
68
68
  self._subagents: dict[str, Agent] = {}
69
69
 
70
70
  try:
71
- self._load_task = asyncio.create_task(self._load_subagents(agent_spec.subagents))
71
+ loop = asyncio.get_running_loop()
72
+ self._load_task = loop.create_task(self._load_subagents(agent_spec.subagents))
72
73
  except RuntimeError:
73
74
  # In case there's no running event loop, e.g., during synchronous tests
74
75
  self._load_task = None
@@ -44,7 +44,7 @@ class SearchWeb(CallableTool2[Params]):
44
44
  if config.services.moonshot_search is not None:
45
45
  self._base_url = config.services.moonshot_search.base_url
46
46
  self._api_key = config.services.moonshot_search.api_key.get_secret_value()
47
- self._custom_headers = config.services.moonshot_search.custom_headers
47
+ self._custom_headers = config.services.moonshot_search.custom_headers or {}
48
48
  else:
49
49
  self._base_url = ""
50
50
  self._api_key = ""
@@ -172,33 +172,29 @@ class ACPAgent:
172
172
  self.run_state.cancel_event.set()
173
173
 
174
174
  async def _stream_events(self, wire: WireUISide):
175
- try:
176
- # expect a StepBegin
177
- assert isinstance(await wire.receive(), StepBegin)
178
-
179
- while True:
180
- msg = await wire.receive()
181
-
182
- if isinstance(msg, TextPart):
183
- await self._send_text(msg.text)
184
- elif isinstance(msg, ContentPart):
185
- logger.warning("Unsupported content part: {part}", part=msg)
186
- await self._send_text(f"[{msg.__class__.__name__}]")
187
- elif isinstance(msg, ToolCall):
188
- await self._send_tool_call(msg)
189
- elif isinstance(msg, ToolCallPart):
190
- await self._send_tool_call_part(msg)
191
- elif isinstance(msg, ToolResult):
192
- await self._send_tool_result(msg)
193
- elif isinstance(msg, ApprovalRequest):
194
- await self._handle_approval_request(msg)
195
- elif isinstance(msg, StatusUpdate):
196
- # TODO: stream status if needed
197
- pass
198
- elif isinstance(msg, StepInterrupted):
199
- break
200
- except asyncio.QueueShutDown:
201
- logger.debug("Event stream loop shutting down")
175
+ assert isinstance(await wire.receive(), StepBegin)
176
+
177
+ while True:
178
+ msg = await wire.receive()
179
+
180
+ if isinstance(msg, TextPart):
181
+ await self._send_text(msg.text)
182
+ elif isinstance(msg, ContentPart):
183
+ logger.warning("Unsupported content part: {part}", part=msg)
184
+ await self._send_text(f"[{msg.__class__.__name__}]")
185
+ elif isinstance(msg, ToolCall):
186
+ await self._send_tool_call(msg)
187
+ elif isinstance(msg, ToolCallPart):
188
+ await self._send_tool_call_part(msg)
189
+ elif isinstance(msg, ToolResult):
190
+ await self._send_tool_result(msg)
191
+ elif isinstance(msg, ApprovalRequest):
192
+ await self._handle_approval_request(msg)
193
+ elif isinstance(msg, StatusUpdate):
194
+ # TODO: stream status if needed
195
+ pass
196
+ elif isinstance(msg, StepInterrupted):
197
+ break
202
198
 
203
199
  async def _send_text(self, text: str):
204
200
  """Send text chunk to client."""
@@ -321,7 +317,7 @@ class ACPAgent:
321
317
  # Create permission request with options
322
318
  permission_request = acp.RequestPermissionRequest(
323
319
  sessionId=self.session_id,
324
- toolCall=acp.schema.ToolCallUpdate(
320
+ toolCall=acp.schema.ToolCall(
325
321
  toolCallId=state.acp_tool_call_id,
326
322
  content=[
327
323
  acp.schema.ContentToolCallContent(
@@ -1,6 +1,5 @@
1
1
  import asyncio
2
2
  import json
3
- import signal
4
3
  import sys
5
4
  from functools import partial
6
5
  from pathlib import Path
@@ -9,10 +8,12 @@ from typing import Literal
9
8
  import aiofiles
10
9
  from kosong.base.message import Message
11
10
  from kosong.chat_provider import ChatProviderError
11
+ from rich import print
12
12
 
13
13
  from kimi_cli.soul import LLMNotSet, MaxStepsReached, RunCancelled, Soul, run_soul
14
14
  from kimi_cli.utils.logging import logger
15
15
  from kimi_cli.utils.message import message_extract_text
16
+ from kimi_cli.utils.signals import install_sigint_handler
16
17
  from kimi_cli.wire import WireUISide
17
18
  from kimi_cli.wire.message import StepInterrupted
18
19
 
@@ -51,7 +52,7 @@ class PrintApp:
51
52
  cancel_event.set()
52
53
 
53
54
  loop = asyncio.get_running_loop()
54
- loop.add_signal_handler(signal.SIGINT, _handler)
55
+ remove_sigint = install_sigint_handler(loop, _handler)
55
56
 
56
57
  if command is None and not sys.stdin.isatty() and self.input_format == "text":
57
58
  command = sys.stdin.read().strip()
@@ -98,7 +99,7 @@ class PrintApp:
98
99
  print(f"Unknown error: {e}")
99
100
  raise
100
101
  finally:
101
- loop.remove_signal_handler(signal.SIGINT)
102
+ remove_sigint()
102
103
  return False
103
104
 
104
105
  def _read_next_command(self) -> str | None:
@@ -127,35 +128,29 @@ class PrintApp:
127
128
  logger.warning("Ignoring invalid user message: {json_line}", json_line=json_line)
128
129
 
129
130
  async def _visualize_text(self, wire: WireUISide):
130
- try:
131
- while True:
132
- msg = await wire.receive()
133
- print(msg)
134
- if isinstance(msg, StepInterrupted):
135
- break
136
- except asyncio.QueueShutDown:
137
- logger.debug("Visualization loop shutting down")
131
+ while True:
132
+ msg = await wire.receive()
133
+ print(msg)
134
+ if isinstance(msg, StepInterrupted):
135
+ break
138
136
 
139
137
  async def _visualize_stream_json(self, wire: WireUISide, start_position: int):
140
138
  # TODO: be aware of context compaction
141
139
  # FIXME: this is only a temporary impl, may miss the last lines of the context file
142
140
  if not self.context_file.exists():
143
141
  self.context_file.touch()
144
- try:
145
- async with aiofiles.open(self.context_file, encoding="utf-8") as f:
146
- await f.seek(start_position)
147
- while True:
148
- should_end = False
149
- while (msg := wire.receive_nowait()) is not None:
150
- if isinstance(msg, StepInterrupted):
151
- should_end = True
152
-
153
- line = await f.readline()
154
- if not line:
155
- if should_end:
156
- break
157
- await asyncio.sleep(0.1)
158
- continue
159
- print(line, end="")
160
- except asyncio.QueueShutDown:
161
- logger.debug("Visualization loop shutting down")
142
+ async with aiofiles.open(self.context_file, encoding="utf-8") as f:
143
+ await f.seek(start_position)
144
+ while True:
145
+ should_end = False
146
+ while (msg := wire.receive_nowait()) is not None:
147
+ if isinstance(msg, StepInterrupted):
148
+ should_end = True
149
+
150
+ line = await f.readline()
151
+ if not line:
152
+ if should_end:
153
+ break
154
+ await asyncio.sleep(0.1)
155
+ continue
156
+ print(line, end="")