kimi-cli 0.42__py3-none-any.whl → 0.44__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kimi-cli might be problematic. Click here for more details.

kimi_cli/CHANGELOG.md CHANGED
@@ -9,6 +9,29 @@ Internal builds may append content to the Unreleased section.
9
9
  Only write entries that are worth mentioning to users.
10
10
  -->
11
11
 
12
+ ## [0.44] - 2025-10-30
13
+
14
+ ### Changed
15
+
16
+ - Improve startup time
17
+
18
+ ### Fixed
19
+
20
+ - Fix potential invalid bytes in user input
21
+
22
+ ## [0.43] - 2025-10-30
23
+
24
+ ### Added
25
+
26
+ - Basic Windows support (experimental)
27
+ - Display warnings when base URL or API key is overridden in environment variables
28
+ - Support image input if the LLM model supports it
29
+ - Replay recent context history when continuing a session
30
+
31
+ ### Fixed
32
+
33
+ - Ensure new line after executing shell commands
34
+
12
35
  ## [0.42] - 2025-10-28
13
36
 
14
37
  ### Added
kimi_cli/__init__.py CHANGED
@@ -1,155 +0,0 @@
1
- import contextlib
2
- import os
3
- import warnings
4
- from collections.abc import Generator
5
- from pathlib import Path
6
- from typing import Any
7
-
8
- from pydantic import SecretStr
9
-
10
- from kimi_cli.agentspec import DEFAULT_AGENT_FILE
11
- from kimi_cli.config import LLMModel, LLMProvider, load_config
12
- from kimi_cli.llm import augment_provider_with_env_vars, create_llm
13
- from kimi_cli.session import Session
14
- from kimi_cli.soul.agent import load_agent
15
- from kimi_cli.soul.context import Context
16
- from kimi_cli.soul.kimisoul import KimiSoul
17
- from kimi_cli.soul.runtime import Runtime
18
- from kimi_cli.ui.acp import ACPServer
19
- from kimi_cli.ui.print import InputFormat, OutputFormat, PrintApp
20
- from kimi_cli.ui.shell import ShellApp
21
- from kimi_cli.utils.logging import StreamToLogger, logger
22
-
23
-
24
- class KimiCLI:
25
- @staticmethod
26
- async def create(
27
- session: Session,
28
- *,
29
- yolo: bool = False,
30
- stream: bool = True, # TODO: remove this when we have a correct print mode impl
31
- mcp_configs: list[dict[str, Any]] | None = None,
32
- config_file: Path | None = None,
33
- model_name: str | None = None,
34
- agent_file: Path | None = None,
35
- ) -> "KimiCLI":
36
- """
37
- Create a KimiCLI instance.
38
-
39
- Args:
40
- session (Session): A session created by `Session.create` or `Session.continue_`.
41
- yolo (bool, optional): Approve all actions without confirmation. Defaults to False.
42
- stream (bool, optional): Use stream mode when calling LLM API. Defaults to True.
43
- config_file (Path | None, optional): Path to the configuration file. Defaults to None.
44
- model_name (str | None, optional): Name of the model to use. Defaults to None.
45
- agent_file (Path | None, optional): Path to the agent file. Defaults to None.
46
-
47
- Raises:
48
- FileNotFoundError: When the agent file is not found.
49
- ConfigError(KimiCLIException): When the configuration is invalid.
50
- AgentSpecError(KimiCLIException): When the agent specification is invalid.
51
- """
52
- config = load_config(config_file)
53
- logger.info("Loaded config: {config}", config=config)
54
-
55
- model: LLMModel | None = None
56
- provider: LLMProvider | None = None
57
-
58
- # try to use config file
59
- if not model_name and config.default_model:
60
- # no --model specified && default model is set in config
61
- model = config.models[config.default_model]
62
- provider = config.providers[model.provider]
63
- if model_name and model_name in config.models:
64
- # --model specified && model is set in config
65
- model = config.models[model_name]
66
- provider = config.providers[model.provider]
67
-
68
- if not model:
69
- model = LLMModel(provider="", model="", max_context_size=100_000)
70
- provider = LLMProvider(type="kimi", base_url="", api_key=SecretStr(""))
71
-
72
- # try overwrite with environment variables
73
- assert provider is not None
74
- assert model is not None
75
- augment_provider_with_env_vars(provider, model)
76
-
77
- if not provider.base_url or not model.model:
78
- llm = None
79
- else:
80
- logger.info("Using LLM provider: {provider}", provider=provider)
81
- logger.info("Using LLM model: {model}", model=model)
82
- llm = create_llm(provider, model, stream=stream, session_id=session.id)
83
-
84
- runtime = await Runtime.create(config, llm, session, yolo)
85
-
86
- if agent_file is None:
87
- agent_file = DEFAULT_AGENT_FILE
88
- agent = await load_agent(agent_file, runtime, mcp_configs=mcp_configs or [])
89
-
90
- context = Context(session.history_file)
91
- await context.restore()
92
-
93
- soul = KimiSoul(
94
- agent,
95
- runtime,
96
- context=context,
97
- )
98
- return KimiCLI(soul, session)
99
-
100
- def __init__(self, soul: KimiSoul, session: Session) -> None:
101
- self._soul = soul
102
- self._session = session
103
-
104
- @property
105
- def soul(self) -> KimiSoul:
106
- """Get the KimiSoul instance."""
107
- return self._soul
108
-
109
- @property
110
- def session(self) -> Session:
111
- """Get the Session instance."""
112
- return self._session
113
-
114
- @contextlib.contextmanager
115
- def _app_env(self) -> Generator[None]:
116
- original_cwd = Path.cwd()
117
- os.chdir(self._session.work_dir)
118
- try:
119
- # to ignore possible warnings from dateparser
120
- warnings.filterwarnings("ignore", category=DeprecationWarning)
121
- with contextlib.redirect_stderr(StreamToLogger()):
122
- yield
123
- finally:
124
- os.chdir(original_cwd)
125
-
126
- async def run_shell_mode(self, command: str | None = None) -> bool:
127
- with self._app_env():
128
- app = ShellApp(
129
- self._soul,
130
- welcome_info={
131
- "Directory": str(self._session.work_dir),
132
- "Session": self._session.id,
133
- },
134
- )
135
- return await app.run(command)
136
-
137
- async def run_print_mode(
138
- self,
139
- input_format: InputFormat,
140
- output_format: OutputFormat,
141
- command: str | None = None,
142
- ) -> bool:
143
- with self._app_env():
144
- app = PrintApp(
145
- self._soul,
146
- input_format,
147
- output_format,
148
- self._session.history_file,
149
- )
150
- return await app.run(command)
151
-
152
- async def run_acp_server(self) -> bool:
153
- with self._app_env():
154
- app = ACPServer(self._soul)
155
- return await app.run()
kimi_cli/app.py ADDED
@@ -0,0 +1,195 @@
1
+ import contextlib
2
+ import os
3
+ import warnings
4
+ from collections.abc import Generator
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ from pydantic import SecretStr
9
+
10
+ from kimi_cli.agentspec import DEFAULT_AGENT_FILE
11
+ from kimi_cli.cli import InputFormat, OutputFormat
12
+ from kimi_cli.config import LLMModel, LLMProvider, load_config
13
+ from kimi_cli.llm import augment_provider_with_env_vars, create_llm
14
+ from kimi_cli.session import Session
15
+ from kimi_cli.soul.agent import load_agent
16
+ from kimi_cli.soul.context import Context
17
+ from kimi_cli.soul.kimisoul import KimiSoul
18
+ from kimi_cli.soul.runtime import Runtime
19
+ from kimi_cli.utils.logging import StreamToLogger, logger
20
+
21
+
22
+ class KimiCLI:
23
+ @staticmethod
24
+ async def create(
25
+ session: Session,
26
+ *,
27
+ yolo: bool = False,
28
+ stream: bool = True, # TODO: remove this when we have a correct print mode impl
29
+ mcp_configs: list[dict[str, Any]] | None = None,
30
+ config_file: Path | None = None,
31
+ model_name: str | None = None,
32
+ agent_file: Path | None = None,
33
+ ) -> "KimiCLI":
34
+ """
35
+ Create a KimiCLI instance.
36
+
37
+ Args:
38
+ session (Session): A session created by `Session.create` or `Session.continue_`.
39
+ yolo (bool, optional): Approve all actions without confirmation. Defaults to False.
40
+ stream (bool, optional): Use stream mode when calling LLM API. Defaults to True.
41
+ config_file (Path | None, optional): Path to the configuration file. Defaults to None.
42
+ model_name (str | None, optional): Name of the model to use. Defaults to None.
43
+ agent_file (Path | None, optional): Path to the agent file. Defaults to None.
44
+
45
+ Raises:
46
+ FileNotFoundError: When the agent file is not found.
47
+ ConfigError(KimiCLIException): When the configuration is invalid.
48
+ AgentSpecError(KimiCLIException): When the agent specification is invalid.
49
+ """
50
+ config = load_config(config_file)
51
+ logger.info("Loaded config: {config}", config=config)
52
+
53
+ model: LLMModel | None = None
54
+ provider: LLMProvider | None = None
55
+
56
+ # try to use config file
57
+ if not model_name and config.default_model:
58
+ # no --model specified && default model is set in config
59
+ model = config.models[config.default_model]
60
+ provider = config.providers[model.provider]
61
+ if model_name and model_name in config.models:
62
+ # --model specified && model is set in config
63
+ model = config.models[model_name]
64
+ provider = config.providers[model.provider]
65
+
66
+ if not model:
67
+ model = LLMModel(provider="", model="", max_context_size=100_000)
68
+ provider = LLMProvider(type="kimi", base_url="", api_key=SecretStr(""))
69
+
70
+ # try overwrite with environment variables
71
+ assert provider is not None
72
+ assert model is not None
73
+ env_overrides = augment_provider_with_env_vars(provider, model)
74
+
75
+ if not provider.base_url or not model.model:
76
+ llm = None
77
+ else:
78
+ logger.info("Using LLM provider: {provider}", provider=provider)
79
+ logger.info("Using LLM model: {model}", model=model)
80
+ llm = create_llm(provider, model, stream=stream, session_id=session.id)
81
+
82
+ runtime = await Runtime.create(config, llm, session, yolo)
83
+
84
+ if agent_file is None:
85
+ agent_file = DEFAULT_AGENT_FILE
86
+ agent = await load_agent(agent_file, runtime, mcp_configs=mcp_configs or [])
87
+
88
+ context = Context(session.history_file)
89
+ await context.restore()
90
+
91
+ soul = KimiSoul(
92
+ agent,
93
+ runtime,
94
+ context=context,
95
+ )
96
+ return KimiCLI(soul, runtime, env_overrides)
97
+
98
+ def __init__(
99
+ self,
100
+ _soul: KimiSoul,
101
+ _runtime: Runtime,
102
+ _env_overrides: dict[str, str],
103
+ ) -> None:
104
+ self._soul = _soul
105
+ self._runtime = _runtime
106
+ self._env_overrides = _env_overrides
107
+
108
+ @property
109
+ def soul(self) -> KimiSoul:
110
+ """Get the KimiSoul instance."""
111
+ return self._soul
112
+
113
+ @property
114
+ def session(self) -> Session:
115
+ """Get the Session instance."""
116
+ return self._runtime.session
117
+
118
+ @contextlib.contextmanager
119
+ def _app_env(self) -> Generator[None]:
120
+ original_cwd = Path.cwd()
121
+ os.chdir(self._runtime.session.work_dir)
122
+ try:
123
+ # to ignore possible warnings from dateparser
124
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
125
+ with contextlib.redirect_stderr(StreamToLogger()):
126
+ yield
127
+ finally:
128
+ os.chdir(original_cwd)
129
+
130
+ async def run_shell_mode(self, command: str | None = None) -> bool:
131
+ from kimi_cli.ui.shell import ShellApp, WelcomeInfoItem
132
+
133
+ welcome_info = [
134
+ WelcomeInfoItem(name="Directory", value=str(self._runtime.session.work_dir)),
135
+ WelcomeInfoItem(name="Session", value=self._runtime.session.id),
136
+ ]
137
+ if base_url := self._env_overrides.get("KIMI_BASE_URL"):
138
+ welcome_info.append(
139
+ WelcomeInfoItem(
140
+ name="API URL",
141
+ value=f"{base_url} (from KIMI_BASE_URL)",
142
+ level=WelcomeInfoItem.Level.WARN,
143
+ )
144
+ )
145
+ if not self._runtime.llm:
146
+ welcome_info.append(
147
+ WelcomeInfoItem(
148
+ name="Model",
149
+ value="not set, send /setup to configure",
150
+ level=WelcomeInfoItem.Level.WARN,
151
+ )
152
+ )
153
+ elif "KIMI_MODEL_NAME" in self._env_overrides:
154
+ welcome_info.append(
155
+ WelcomeInfoItem(
156
+ name="Model",
157
+ value=f"{self._soul.model} (from KIMI_MODEL_NAME)",
158
+ level=WelcomeInfoItem.Level.WARN,
159
+ )
160
+ )
161
+ else:
162
+ welcome_info.append(
163
+ WelcomeInfoItem(
164
+ name="Model",
165
+ value=self._soul.model,
166
+ level=WelcomeInfoItem.Level.INFO,
167
+ )
168
+ )
169
+ with self._app_env():
170
+ app = ShellApp(self._soul, welcome_info=welcome_info)
171
+ return await app.run(command)
172
+
173
+ async def run_print_mode(
174
+ self,
175
+ input_format: InputFormat,
176
+ output_format: OutputFormat,
177
+ command: str | None = None,
178
+ ) -> bool:
179
+ from kimi_cli.ui.print import PrintApp
180
+
181
+ with self._app_env():
182
+ app = PrintApp(
183
+ self._soul,
184
+ input_format,
185
+ output_format,
186
+ self._runtime.session.history_file,
187
+ )
188
+ return await app.run(command)
189
+
190
+ async def run_acp_server(self) -> bool:
191
+ from kimi_cli.ui.acp import ACPServer
192
+
193
+ with self._app_env():
194
+ app = ACPServer(self._soul)
195
+ return await app.run()
kimi_cli/cli.py CHANGED
@@ -7,12 +7,7 @@ from typing import Any, Literal, get_args
7
7
 
8
8
  import click
9
9
 
10
- from kimi_cli import KimiCLI
11
10
  from kimi_cli.constant import VERSION
12
- from kimi_cli.session import Session
13
- from kimi_cli.share import get_share_dir
14
- from kimi_cli.ui.print import InputFormat, OutputFormat
15
- from kimi_cli.utils.logging import logger
16
11
 
17
12
 
18
13
  class Reload(Exception):
@@ -22,6 +17,8 @@ class Reload(Exception):
22
17
 
23
18
 
24
19
  UIMode = Literal["shell", "print", "acp"]
20
+ InputFormat = Literal["text", "stream-json"]
21
+ OutputFormat = Literal["text", "stream-json"]
25
22
 
26
23
 
27
24
  @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@@ -156,6 +153,10 @@ def kimi(
156
153
  yolo: bool,
157
154
  ):
158
155
  """Kimi, your next CLI agent."""
156
+ from kimi_cli.app import KimiCLI
157
+ from kimi_cli.session import Session
158
+ from kimi_cli.share import get_share_dir
159
+ from kimi_cli.utils.logging import logger
159
160
 
160
161
  def _noop_echo(*args: Any, **kwargs: Any):
161
162
  pass
kimi_cli/config.py CHANGED
@@ -18,7 +18,7 @@ class LLMProvider(BaseModel):
18
18
  """API base URL"""
19
19
  api_key: SecretStr
20
20
  """API key"""
21
- custom_headers: dict[str, str] = Field(default_factory=dict)
21
+ custom_headers: dict[str, str] | None = None
22
22
  """Custom headers to include in API requests"""
23
23
 
24
24
  @field_serializer("api_key", when_used="json")
@@ -26,6 +26,9 @@ class LLMProvider(BaseModel):
26
26
  return v.get_secret_value()
27
27
 
28
28
 
29
+ LLMModelCapability = Literal["image_in"]
30
+
31
+
29
32
  class LLMModel(BaseModel):
30
33
  """LLM model configuration."""
31
34
 
@@ -35,6 +38,8 @@ class LLMModel(BaseModel):
35
38
  """Model name"""
36
39
  max_context_size: int
37
40
  """Maximum context size (unit: tokens)"""
41
+ capabilities: set[LLMModelCapability] | None = None
42
+ """Model capabilities"""
38
43
 
39
44
 
40
45
  class LoopControl(BaseModel):
@@ -53,7 +58,7 @@ class MoonshotSearchConfig(BaseModel):
53
58
  """Base URL for Moonshot Search service."""
54
59
  api_key: SecretStr
55
60
  """API key for Moonshot Search service."""
56
- custom_headers: dict[str, str] = Field(default_factory=dict)
61
+ custom_headers: dict[str, str] | None = None
57
62
  """Custom headers to include in API requests."""
58
63
 
59
64
  @field_serializer("api_key", when_used="json")
kimi_cli/llm.py CHANGED
@@ -2,31 +2,49 @@ import os
2
2
  from typing import NamedTuple
3
3
 
4
4
  from kosong.base.chat_provider import ChatProvider
5
- from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
6
- from kosong.chat_provider.kimi import Kimi
7
- from kosong.chat_provider.openai_legacy import OpenAILegacy
8
5
  from pydantic import SecretStr
9
6
 
10
- from kimi_cli.config import LLMModel, LLMProvider
7
+ from kimi_cli.config import LLMModel, LLMModelCapability, LLMProvider
11
8
  from kimi_cli.constant import USER_AGENT
12
9
 
13
10
 
14
11
  class LLM(NamedTuple):
15
12
  chat_provider: ChatProvider
16
13
  max_context_size: int
14
+ capabilities: set[LLMModelCapability]
15
+ # TODO: these additional fields should be moved to ChatProvider
17
16
 
17
+ @property
18
+ def model_name(self) -> str:
19
+ return self.chat_provider.model_name
20
+
21
+ @property
22
+ def supports_image_in(self) -> bool:
23
+ return "image_in" in self.capabilities
24
+
25
+
26
+ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> dict[str, str]:
27
+ """Override provider/model settings from environment variables.
28
+
29
+ Returns:
30
+ Mapping of environment variables that were applied.
31
+ """
32
+ applied: dict[str, str] = {}
18
33
 
19
- def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel):
20
34
  match provider.type:
21
35
  case "kimi":
22
36
  if base_url := os.getenv("KIMI_BASE_URL"):
23
37
  provider.base_url = base_url
38
+ applied["KIMI_BASE_URL"] = base_url
24
39
  if api_key := os.getenv("KIMI_API_KEY"):
25
40
  provider.api_key = SecretStr(api_key)
41
+ applied["KIMI_API_KEY"] = "******"
26
42
  if model_name := os.getenv("KIMI_MODEL_NAME"):
27
43
  model.model = model_name
44
+ applied["KIMI_MODEL_NAME"] = model.model
28
45
  if max_context_size := os.getenv("KIMI_MODEL_MAX_CONTEXT_SIZE"):
29
46
  model.max_context_size = int(max_context_size)
47
+ applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = str(model.max_context_size)
30
48
  case "openai_legacy":
31
49
  if base_url := os.getenv("OPENAI_BASE_URL"):
32
50
  provider.base_url = base_url
@@ -35,6 +53,8 @@ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel):
35
53
  case _:
36
54
  pass
37
55
 
56
+ return applied
57
+
38
58
 
39
59
  def create_llm(
40
60
  provider: LLMProvider,
@@ -45,6 +65,8 @@ def create_llm(
45
65
  ) -> LLM:
46
66
  match provider.type:
47
67
  case "kimi":
68
+ from kosong.chat_provider.kimi import Kimi
69
+
48
70
  chat_provider = Kimi(
49
71
  model=model.model,
50
72
  base_url=provider.base_url,
@@ -52,12 +74,14 @@ def create_llm(
52
74
  stream=stream,
53
75
  default_headers={
54
76
  "User-Agent": USER_AGENT,
55
- **provider.custom_headers,
77
+ **(provider.custom_headers or {}),
56
78
  },
57
79
  )
58
80
  if session_id:
59
81
  chat_provider = chat_provider.with_generation_kwargs(prompt_cache_key=session_id)
60
82
  case "openai_legacy":
83
+ from kosong.chat_provider.openai_legacy import OpenAILegacy
84
+
61
85
  chat_provider = OpenAILegacy(
62
86
  model=model.model,
63
87
  base_url=provider.base_url,
@@ -65,6 +89,8 @@ def create_llm(
65
89
  stream=stream,
66
90
  )
67
91
  case "_chaos":
92
+ from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
93
+
68
94
  chat_provider = ChaosChatProvider(
69
95
  model=model.model,
70
96
  base_url=provider.base_url,
@@ -75,4 +101,8 @@ def create_llm(
75
101
  ),
76
102
  )
77
103
 
78
- return LLM(chat_provider=chat_provider, max_context_size=model.max_context_size)
104
+ return LLM(
105
+ chat_provider=chat_provider,
106
+ max_context_size=model.max_context_size,
107
+ capabilities=model.capabilities or set(),
108
+ )
kimi_cli/soul/__init__.py CHANGED
@@ -4,6 +4,9 @@ from collections.abc import Callable, Coroutine
4
4
  from contextvars import ContextVar
5
5
  from typing import Any, NamedTuple, Protocol, runtime_checkable
6
6
 
7
+ from kosong.base.message import ContentPart
8
+
9
+ from kimi_cli.llm import LLM
7
10
  from kimi_cli.utils.logging import logger
8
11
  from kimi_cli.wire import Wire, WireUISide
9
12
  from kimi_cli.wire.message import WireMessage
@@ -15,6 +18,19 @@ class LLMNotSet(Exception):
15
18
  pass
16
19
 
17
20
 
21
+ class LLMNotSupported(Exception):
22
+ """Raised when the LLM does not have required capabilities."""
23
+
24
+ def __init__(self, llm: LLM, capabilities: list[str]):
25
+ self.llm = llm
26
+ self.capabilities = capabilities
27
+ capabilities_str = "capability" if len(capabilities) == 1 else "capabilities"
28
+ super().__init__(
29
+ f"The LLM model '{llm.model_name}' does not support required {capabilities_str}: "
30
+ f"{', '.join(capabilities)}."
31
+ )
32
+
33
+
18
34
  class MaxStepsReached(Exception):
19
35
  """Raised when the maximum number of steps is reached."""
20
36
 
@@ -47,15 +63,16 @@ class Soul(Protocol):
47
63
  """The current status of the soul. The returned value is immutable."""
48
64
  ...
49
65
 
50
- async def run(self, user_input: str):
66
+ async def run(self, user_input: str | list[ContentPart]):
51
67
  """
52
68
  Run the agent with the given user input until the max steps or no more tool calls.
53
69
 
54
70
  Args:
55
- user_input (str): The user input to the agent.
71
+ user_input (str | list[ContentPart]): The user input to the agent.
56
72
 
57
73
  Raises:
58
74
  LLMNotSet: When the LLM is not set.
75
+ LLMNotSupported: When the LLM does not have required capabilities.
59
76
  ChatProviderError: When the LLM provider returns an error.
60
77
  MaxStepsReached: When the maximum number of steps is reached.
61
78
  asyncio.CancelledError: When the run is cancelled by user.
@@ -73,7 +90,7 @@ class RunCancelled(Exception):
73
90
 
74
91
  async def run_soul(
75
92
  soul: "Soul",
76
- user_input: str,
93
+ user_input: str | list[ContentPart],
77
94
  ui_loop_fn: UILoopFn,
78
95
  cancel_event: asyncio.Event,
79
96
  ) -> None:
@@ -85,6 +102,7 @@ async def run_soul(
85
102
 
86
103
  Raises:
87
104
  LLMNotSet: When the LLM is not set.
105
+ LLMNotSupported: When the LLM does not have required capabilities.
88
106
  ChatProviderError: When the LLM provider returns an error.
89
107
  MaxStepsReached: When the maximum number of steps is reached.
90
108
  RunCancelled: When the run is cancelled by the cancel event.
@@ -125,7 +143,7 @@ async def run_soul(
125
143
  try:
126
144
  await asyncio.wait_for(ui_task, timeout=0.5)
127
145
  except asyncio.QueueShutDown:
128
- # expected
146
+ logger.debug("UI loop shut down")
129
147
  pass
130
148
  except TimeoutError:
131
149
  logger.warning("UI loop timed out")
kimi_cli/soul/agent.py CHANGED
@@ -4,7 +4,6 @@ import string
4
4
  from pathlib import Path
5
5
  from typing import Any, NamedTuple
6
6
 
7
- import fastmcp
8
7
  from kosong.tooling import CallableTool, CallableTool2, Toolset
9
8
 
10
9
  from kimi_cli.agentspec import ResolvedAgentSpec, load_agent_spec
@@ -14,7 +13,6 @@ from kimi_cli.soul.approval import Approval
14
13
  from kimi_cli.soul.denwarenji import DenwaRenji
15
14
  from kimi_cli.soul.runtime import BuiltinSystemPromptArgs, Runtime
16
15
  from kimi_cli.soul.toolset import CustomToolset
17
- from kimi_cli.tools.mcp import MCPTool
18
16
  from kimi_cli.utils.logging import logger
19
17
 
20
18
 
@@ -143,6 +141,10 @@ async def _load_mcp_tools(
143
141
  ValueError: If the MCP config is not valid.
144
142
  RuntimeError: If the MCP server cannot be connected.
145
143
  """
144
+ import fastmcp
145
+
146
+ from kimi_cli.tools.mcp import MCPTool
147
+
146
148
  for mcp_config in mcp_configs:
147
149
  logger.info("Loading MCP tools from: {mcp_config}", mcp_config=mcp_config)
148
150
  client = fastmcp.Client(mcp_config)