kimi-cli 0.35__py3-none-any.whl → 0.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. kimi_cli/CHANGELOG.md +165 -0
  2. kimi_cli/__init__.py +0 -374
  3. kimi_cli/agents/{koder → default}/agent.yaml +1 -1
  4. kimi_cli/agents/{koder → default}/system.md +1 -1
  5. kimi_cli/agentspec.py +115 -0
  6. kimi_cli/app.py +208 -0
  7. kimi_cli/cli.py +321 -0
  8. kimi_cli/config.py +33 -16
  9. kimi_cli/constant.py +4 -0
  10. kimi_cli/exception.py +16 -0
  11. kimi_cli/llm.py +144 -3
  12. kimi_cli/metadata.py +6 -69
  13. kimi_cli/prompts/__init__.py +4 -0
  14. kimi_cli/session.py +103 -0
  15. kimi_cli/soul/__init__.py +130 -9
  16. kimi_cli/soul/agent.py +159 -0
  17. kimi_cli/soul/approval.py +5 -6
  18. kimi_cli/soul/compaction.py +106 -0
  19. kimi_cli/soul/context.py +1 -1
  20. kimi_cli/soul/kimisoul.py +180 -80
  21. kimi_cli/soul/message.py +6 -6
  22. kimi_cli/soul/runtime.py +96 -0
  23. kimi_cli/soul/toolset.py +3 -2
  24. kimi_cli/tools/__init__.py +35 -31
  25. kimi_cli/tools/bash/__init__.py +25 -9
  26. kimi_cli/tools/bash/cmd.md +31 -0
  27. kimi_cli/tools/dmail/__init__.py +5 -4
  28. kimi_cli/tools/file/__init__.py +8 -0
  29. kimi_cli/tools/file/glob.md +1 -1
  30. kimi_cli/tools/file/glob.py +4 -4
  31. kimi_cli/tools/file/grep.py +36 -19
  32. kimi_cli/tools/file/patch.py +52 -10
  33. kimi_cli/tools/file/read.py +6 -5
  34. kimi_cli/tools/file/replace.py +16 -4
  35. kimi_cli/tools/file/write.py +16 -4
  36. kimi_cli/tools/mcp.py +7 -4
  37. kimi_cli/tools/task/__init__.py +60 -41
  38. kimi_cli/tools/task/task.md +1 -1
  39. kimi_cli/tools/todo/__init__.py +4 -2
  40. kimi_cli/tools/utils.py +1 -1
  41. kimi_cli/tools/web/fetch.py +2 -1
  42. kimi_cli/tools/web/search.py +13 -12
  43. kimi_cli/ui/__init__.py +0 -68
  44. kimi_cli/ui/acp/__init__.py +67 -38
  45. kimi_cli/ui/print/__init__.py +46 -69
  46. kimi_cli/ui/shell/__init__.py +145 -154
  47. kimi_cli/ui/shell/console.py +27 -1
  48. kimi_cli/ui/shell/debug.py +187 -0
  49. kimi_cli/ui/shell/keyboard.py +183 -0
  50. kimi_cli/ui/shell/metacmd.py +34 -81
  51. kimi_cli/ui/shell/prompt.py +245 -28
  52. kimi_cli/ui/shell/replay.py +104 -0
  53. kimi_cli/ui/shell/setup.py +19 -19
  54. kimi_cli/ui/shell/update.py +11 -5
  55. kimi_cli/ui/shell/visualize.py +576 -0
  56. kimi_cli/ui/wire/README.md +109 -0
  57. kimi_cli/ui/wire/__init__.py +340 -0
  58. kimi_cli/ui/wire/jsonrpc.py +48 -0
  59. kimi_cli/utils/__init__.py +0 -0
  60. kimi_cli/utils/aiohttp.py +10 -0
  61. kimi_cli/utils/changelog.py +6 -2
  62. kimi_cli/utils/clipboard.py +10 -0
  63. kimi_cli/utils/message.py +15 -1
  64. kimi_cli/utils/rich/__init__.py +33 -0
  65. kimi_cli/utils/rich/markdown.py +959 -0
  66. kimi_cli/utils/rich/markdown_sample.md +108 -0
  67. kimi_cli/utils/rich/markdown_sample_short.md +2 -0
  68. kimi_cli/utils/signals.py +41 -0
  69. kimi_cli/utils/string.py +8 -0
  70. kimi_cli/utils/term.py +114 -0
  71. kimi_cli/wire/__init__.py +73 -0
  72. kimi_cli/wire/message.py +191 -0
  73. kimi_cli-0.52.dist-info/METADATA +186 -0
  74. kimi_cli-0.52.dist-info/RECORD +99 -0
  75. kimi_cli-0.52.dist-info/entry_points.txt +3 -0
  76. kimi_cli/agent.py +0 -261
  77. kimi_cli/agents/koder/README.md +0 -3
  78. kimi_cli/prompts/metacmds/__init__.py +0 -4
  79. kimi_cli/soul/wire.py +0 -101
  80. kimi_cli/ui/shell/liveview.py +0 -158
  81. kimi_cli/utils/provider.py +0 -64
  82. kimi_cli-0.35.dist-info/METADATA +0 -24
  83. kimi_cli-0.35.dist-info/RECORD +0 -76
  84. kimi_cli-0.35.dist-info/entry_points.txt +0 -3
  85. /kimi_cli/agents/{koder → default}/sub.yaml +0 -0
  86. /kimi_cli/prompts/{metacmds/compact.md → compact.md} +0 -0
  87. /kimi_cli/prompts/{metacmds/init.md → init.md} +0 -0
  88. {kimi_cli-0.35.dist-info → kimi_cli-0.52.dist-info}/WHEEL +0 -0
kimi_cli/llm.py CHANGED
@@ -1,8 +1,149 @@
1
- from typing import NamedTuple
1
+ import os
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Literal, cast, get_args
2
4
 
3
- from kosong.base.chat_provider import ChatProvider
5
+ from kosong.chat_provider import ChatProvider
6
+ from pydantic import SecretStr
4
7
 
8
+ from kimi_cli.constant import USER_AGENT
5
9
 
6
- class LLM(NamedTuple):
10
+ if TYPE_CHECKING:
11
+ from kimi_cli.config import LLMModel, LLMProvider
12
+
13
+ type ProviderType = Literal["kimi", "openai_legacy", "openai_responses", "anthropic", "_chaos"]
14
+
15
+ type ModelCapability = Literal["image_in", "thinking"]
16
+ ALL_MODEL_CAPABILITIES: set[ModelCapability] = set(get_args(ModelCapability))
17
+
18
+
19
+ @dataclass(slots=True)
20
+ class LLM:
7
21
  chat_provider: ChatProvider
8
22
  max_context_size: int
23
+ capabilities: set[ModelCapability]
24
+
25
+ @property
26
+ def model_name(self) -> str:
27
+ return self.chat_provider.model_name
28
+
29
+
30
+ def augment_provider_with_env_vars(provider: "LLMProvider", model: "LLMModel") -> dict[str, str]:
31
+ """Override provider/model settings from environment variables.
32
+
33
+ Returns:
34
+ Mapping of environment variables that were applied.
35
+ """
36
+ applied: dict[str, str] = {}
37
+
38
+ match provider.type:
39
+ case "kimi":
40
+ if base_url := os.getenv("KIMI_BASE_URL"):
41
+ provider.base_url = base_url
42
+ applied["KIMI_BASE_URL"] = base_url
43
+ if api_key := os.getenv("KIMI_API_KEY"):
44
+ provider.api_key = SecretStr(api_key)
45
+ applied["KIMI_API_KEY"] = "******"
46
+ if model_name := os.getenv("KIMI_MODEL_NAME"):
47
+ model.model = model_name
48
+ applied["KIMI_MODEL_NAME"] = model_name
49
+ if max_context_size := os.getenv("KIMI_MODEL_MAX_CONTEXT_SIZE"):
50
+ model.max_context_size = int(max_context_size)
51
+ applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = max_context_size
52
+ if capabilities := os.getenv("KIMI_MODEL_CAPABILITIES"):
53
+ caps_lower = (cap.strip().lower() for cap in capabilities.split(",") if cap.strip())
54
+ model.capabilities = set(
55
+ cast(ModelCapability, cap)
56
+ for cap in caps_lower
57
+ if cap in get_args(ModelCapability)
58
+ )
59
+ applied["KIMI_MODEL_CAPABILITIES"] = capabilities
60
+ case "openai_legacy" | "openai_responses":
61
+ if base_url := os.getenv("OPENAI_BASE_URL"):
62
+ provider.base_url = base_url
63
+ if api_key := os.getenv("OPENAI_API_KEY"):
64
+ provider.api_key = SecretStr(api_key)
65
+ case _:
66
+ pass
67
+
68
+ return applied
69
+
70
+
71
+ def create_llm(
72
+ provider: "LLMProvider",
73
+ model: "LLMModel",
74
+ *,
75
+ stream: bool = True,
76
+ session_id: str | None = None,
77
+ ) -> LLM:
78
+ match provider.type:
79
+ case "kimi":
80
+ from kosong.chat_provider.kimi import Kimi
81
+
82
+ chat_provider = Kimi(
83
+ model=model.model,
84
+ base_url=provider.base_url,
85
+ api_key=provider.api_key.get_secret_value(),
86
+ stream=stream,
87
+ default_headers={
88
+ "User-Agent": USER_AGENT,
89
+ **(provider.custom_headers or {}),
90
+ },
91
+ )
92
+ if session_id:
93
+ chat_provider = chat_provider.with_generation_kwargs(prompt_cache_key=session_id)
94
+ case "openai_legacy":
95
+ from kosong.contrib.chat_provider.openai_legacy import OpenAILegacy
96
+
97
+ chat_provider = OpenAILegacy(
98
+ model=model.model,
99
+ base_url=provider.base_url,
100
+ api_key=provider.api_key.get_secret_value(),
101
+ stream=stream,
102
+ )
103
+ case "openai_responses":
104
+ from kosong.contrib.chat_provider.openai_responses import OpenAIResponses
105
+
106
+ chat_provider = OpenAIResponses(
107
+ model=model.model,
108
+ base_url=provider.base_url,
109
+ api_key=provider.api_key.get_secret_value(),
110
+ stream=stream,
111
+ )
112
+ case "anthropic":
113
+ from kosong.contrib.chat_provider.anthropic import Anthropic
114
+
115
+ chat_provider = Anthropic(
116
+ model=model.model,
117
+ base_url=provider.base_url,
118
+ api_key=provider.api_key.get_secret_value(),
119
+ stream=stream,
120
+ default_max_tokens=50000,
121
+ )
122
+ case "_chaos":
123
+ from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
124
+
125
+ chat_provider = ChaosChatProvider(
126
+ model=model.model,
127
+ base_url=provider.base_url,
128
+ api_key=provider.api_key.get_secret_value(),
129
+ chaos_config=ChaosConfig(
130
+ error_probability=0.8,
131
+ error_types=[429, 500, 503],
132
+ ),
133
+ )
134
+
135
+ return LLM(
136
+ chat_provider=chat_provider,
137
+ max_context_size=model.max_context_size,
138
+ capabilities=_derive_capabilities(provider, model),
139
+ )
140
+
141
+
142
+ def _derive_capabilities(provider: "LLMProvider", model: "LLMModel") -> set[ModelCapability]:
143
+ capabilities = model.capabilities or set()
144
+ if provider.type != "kimi":
145
+ return capabilities
146
+
147
+ if model.model == "kimi-for-coding" or "thinking" in model.model:
148
+ capabilities.add("thinking")
149
+ return capabilities
kimi_cli/metadata.py CHANGED
@@ -1,8 +1,6 @@
1
1
  import json
2
- import uuid
3
2
  from hashlib import md5
4
3
  from pathlib import Path
5
- from typing import NamedTuple
6
4
 
7
5
  from pydantic import BaseModel, Field
8
6
 
@@ -25,7 +23,7 @@ class WorkDirMeta(BaseModel):
25
23
 
26
24
  @property
27
25
  def sessions_dir(self) -> Path:
28
- path = get_share_dir() / "sessions" / md5(self.path.encode()).hexdigest()
26
+ path = get_share_dir() / "sessions" / md5(self.path.encode(encoding="utf-8")).hexdigest()
29
27
  path.mkdir(parents=True, exist_ok=True)
30
28
  return path
31
29
 
@@ -33,10 +31,12 @@ class WorkDirMeta(BaseModel):
33
31
  class Metadata(BaseModel):
34
32
  """Kimi metadata structure."""
35
33
 
36
- work_dirs: list[WorkDirMeta] = Field(default_factory=list, description="Work directory list")
34
+ work_dirs: list[WorkDirMeta] = Field(
35
+ default_factory=list[WorkDirMeta], description="Work directory list"
36
+ )
37
37
 
38
38
 
39
- def _load_metadata() -> Metadata:
39
+ def load_metadata() -> Metadata:
40
40
  metadata_file = get_metadata_file()
41
41
  logger.debug("Loading metadata from file: {file}", file=metadata_file)
42
42
  if not metadata_file.exists():
@@ -47,71 +47,8 @@ def _load_metadata() -> Metadata:
47
47
  return Metadata(**data)
48
48
 
49
49
 
50
- def _save_metadata(metadata: Metadata):
50
+ def save_metadata(metadata: Metadata):
51
51
  metadata_file = get_metadata_file()
52
52
  logger.debug("Saving metadata to file: {file}", file=metadata_file)
53
53
  with open(metadata_file, "w", encoding="utf-8") as f:
54
54
  json.dump(metadata.model_dump(), f, indent=2, ensure_ascii=False)
55
-
56
-
57
- class Session(NamedTuple):
58
- """A session of a work directory."""
59
-
60
- id: str
61
- work_dir: WorkDirMeta
62
- history_file: Path
63
-
64
-
65
- def new_session(work_dir: Path, _history_file: Path | None = None) -> Session:
66
- """Create a new session for a work directory."""
67
- logger.debug("Creating new session for work directory: {work_dir}", work_dir=work_dir)
68
-
69
- metadata = _load_metadata()
70
- work_dir_meta = next((wd for wd in metadata.work_dirs if wd.path == str(work_dir)), None)
71
- if work_dir_meta is None:
72
- work_dir_meta = WorkDirMeta(path=str(work_dir))
73
- metadata.work_dirs.append(work_dir_meta)
74
-
75
- session_id = str(uuid.uuid4())
76
- if _history_file is None:
77
- history_file = work_dir_meta.sessions_dir / f"{session_id}.jsonl"
78
- work_dir_meta.last_session_id = session_id
79
- else:
80
- logger.warning("Using provided history file: {history_file}", history_file=_history_file)
81
- _history_file.parent.mkdir(parents=True, exist_ok=True)
82
- if _history_file.exists():
83
- assert _history_file.is_file()
84
- history_file = _history_file
85
-
86
- if history_file.exists():
87
- # truncate if exists
88
- logger.warning(
89
- "History file already exists, truncating: {history_file}", history_file=history_file
90
- )
91
- history_file.unlink()
92
- history_file.touch()
93
-
94
- _save_metadata(metadata)
95
- return Session(id=session_id, work_dir=work_dir_meta, history_file=history_file)
96
-
97
-
98
- def continue_session(work_dir: Path) -> Session | None:
99
- """Get the last session for a work directory."""
100
- logger.debug("Continuing session for work directory: {work_dir}", work_dir=work_dir)
101
-
102
- metadata = _load_metadata()
103
- work_dir_meta = next((wd for wd in metadata.work_dirs if wd.path == str(work_dir)), None)
104
- if work_dir_meta is None:
105
- logger.debug("Work directory never been used")
106
- return None
107
- if work_dir_meta.last_session_id is None:
108
- logger.debug("Work directory never had a session")
109
- return None
110
-
111
- logger.debug(
112
- "Found last session for work directory: {session_id}",
113
- session_id=work_dir_meta.last_session_id,
114
- )
115
- session_id = work_dir_meta.last_session_id
116
- history_file = work_dir_meta.sessions_dir / f"{session_id}.jsonl"
117
- return Session(id=session_id, work_dir=work_dir_meta, history_file=history_file)
@@ -0,0 +1,4 @@
1
+ from pathlib import Path
2
+
3
+ INIT = (Path(__file__).parent / "init.md").read_text(encoding="utf-8")
4
+ COMPACT = (Path(__file__).parent / "compact.md").read_text(encoding="utf-8")
kimi_cli/session.py ADDED
@@ -0,0 +1,103 @@
1
+ import uuid
2
+ from pathlib import Path
3
+ from typing import NamedTuple
4
+
5
+ from kimi_cli.metadata import WorkDirMeta, load_metadata, save_metadata
6
+ from kimi_cli.utils.logging import logger
7
+
8
+
9
+ class Session(NamedTuple):
10
+ """A session of a work directory."""
11
+
12
+ id: str
13
+ work_dir: Path
14
+ history_file: Path
15
+
16
+ @staticmethod
17
+ def create(work_dir: Path, _history_file: Path | None = None) -> "Session":
18
+ """Create a new session for a work directory."""
19
+ logger.debug("Creating new session for work directory: {work_dir}", work_dir=work_dir)
20
+
21
+ metadata = load_metadata()
22
+ work_dir_meta = next((wd for wd in metadata.work_dirs if wd.path == str(work_dir)), None)
23
+ if work_dir_meta is None:
24
+ work_dir_meta = WorkDirMeta(path=str(work_dir))
25
+ metadata.work_dirs.append(work_dir_meta)
26
+
27
+ session_id = str(uuid.uuid4())
28
+ if _history_file is None:
29
+ history_file = work_dir_meta.sessions_dir / f"{session_id}.jsonl"
30
+ else:
31
+ logger.warning(
32
+ "Using provided history file: {history_file}", history_file=_history_file
33
+ )
34
+ _history_file.parent.mkdir(parents=True, exist_ok=True)
35
+ if _history_file.exists():
36
+ assert _history_file.is_file()
37
+ history_file = _history_file
38
+
39
+ if history_file.exists():
40
+ # truncate if exists
41
+ logger.warning(
42
+ "History file already exists, truncating: {history_file}", history_file=history_file
43
+ )
44
+ history_file.unlink()
45
+ history_file.touch()
46
+
47
+ save_metadata(metadata)
48
+
49
+ return Session(
50
+ id=session_id,
51
+ work_dir=work_dir,
52
+ history_file=history_file,
53
+ )
54
+
55
+ @staticmethod
56
+ def continue_(work_dir: Path) -> "Session | None":
57
+ """Get the last session for a work directory."""
58
+ logger.debug("Continuing session for work directory: {work_dir}", work_dir=work_dir)
59
+
60
+ metadata = load_metadata()
61
+ work_dir_meta = next((wd for wd in metadata.work_dirs if wd.path == str(work_dir)), None)
62
+ if work_dir_meta is None:
63
+ logger.debug("Work directory never been used")
64
+ return None
65
+ if work_dir_meta.last_session_id is None:
66
+ logger.debug("Work directory never had a session")
67
+ return None
68
+
69
+ logger.debug(
70
+ "Found last session for work directory: {session_id}",
71
+ session_id=work_dir_meta.last_session_id,
72
+ )
73
+ session_id = work_dir_meta.last_session_id
74
+ history_file = work_dir_meta.sessions_dir / f"{session_id}.jsonl"
75
+
76
+ return Session(
77
+ id=session_id,
78
+ work_dir=work_dir,
79
+ history_file=history_file,
80
+ )
81
+
82
+ def mark_as_last(self) -> None:
83
+ """Mark this session as the last completed session for its work directory."""
84
+ metadata = load_metadata()
85
+ work_dir_meta = next(
86
+ (wd for wd in metadata.work_dirs if wd.path == str(self.work_dir)), None
87
+ )
88
+
89
+ if work_dir_meta is None:
90
+ logger.warning(
91
+ "Work directory metadata missing when marking last session, recreating: {work_dir}",
92
+ work_dir=self.work_dir,
93
+ )
94
+ work_dir_meta = WorkDirMeta(path=str(self.work_dir))
95
+ metadata.work_dirs.append(work_dir_meta)
96
+
97
+ work_dir_meta.last_session_id = self.id
98
+ logger.debug(
99
+ "Updated last session for work directory: {work_dir} -> {session_id}",
100
+ work_dir=self.work_dir,
101
+ session_id=self.id,
102
+ )
103
+ save_metadata(metadata)
kimi_cli/soul/__init__.py CHANGED
@@ -1,7 +1,17 @@
1
- from typing import TYPE_CHECKING, NamedTuple, Protocol, runtime_checkable
1
+ import asyncio
2
+ import contextlib
3
+ from collections.abc import Callable, Coroutine
4
+ from contextvars import ContextVar
5
+ from typing import TYPE_CHECKING, Any, NamedTuple, Protocol, runtime_checkable
6
+
7
+ from kosong.message import ContentPart
8
+
9
+ from kimi_cli.utils.logging import logger
10
+ from kimi_cli.wire import Wire, WireUISide
11
+ from kimi_cli.wire.message import WireMessage
2
12
 
3
13
  if TYPE_CHECKING:
4
- from kimi_cli.soul.wire import Wire
14
+ from kimi_cli.llm import LLM, ModelCapability
5
15
 
6
16
 
7
17
  class LLMNotSet(Exception):
@@ -10,6 +20,19 @@ class LLMNotSet(Exception):
10
20
  pass
11
21
 
12
22
 
23
+ class LLMNotSupported(Exception):
24
+ """Raised when the LLM does not have required capabilities."""
25
+
26
+ def __init__(self, llm: "LLM", capabilities: "list[ModelCapability]"):
27
+ self.llm = llm
28
+ self.capabilities = capabilities
29
+ capabilities_str = "capability" if len(capabilities) == 1 else "capabilities"
30
+ super().__init__(
31
+ f"LLM model '{llm.model_name}' does not support required {capabilities_str}: "
32
+ f"{', '.join(capabilities)}."
33
+ )
34
+
35
+
13
36
  class MaxStepsReached(Exception):
14
37
  """Raised when the maximum number of steps is reached."""
15
38
 
@@ -33,8 +56,13 @@ class Soul(Protocol):
33
56
  ...
34
57
 
35
58
  @property
36
- def model(self) -> str:
37
- """The LLM model used by the soul. Empty string indicates no LLM configured."""
59
+ def model_name(self) -> str:
60
+ """The name of the LLM model used by the soul. Empty string indicates no LLM configured."""
61
+ ...
62
+
63
+ @property
64
+ def model_capabilities(self) -> "set[ModelCapability] | None":
65
+ """The capabilities of the LLM model used by the soul. None indicates no LLM configured."""
38
66
  ...
39
67
 
40
68
  @property
@@ -42,18 +70,111 @@ class Soul(Protocol):
42
70
  """The current status of the soul. The returned value is immutable."""
43
71
  ...
44
72
 
45
- async def run(self, user_input: str, wire: "Wire"):
73
+ async def run(self, user_input: str | list[ContentPart]):
46
74
  """
47
- Run the agent with the given user input.
75
+ Run the agent with the given user input until the max steps or no more tool calls.
48
76
 
49
77
  Args:
50
- user_input (str): The user input to the agent.
51
- wire (Wire): The wire to send events and requests to the UI loop.
78
+ user_input (str | list[ContentPart]): The user input to the agent.
52
79
 
53
80
  Raises:
54
- ChatProviderNotSet: When the chat provider is not set.
81
+ LLMNotSet: When the LLM is not set.
82
+ LLMNotSupported: When the LLM does not have required capabilities.
55
83
  ChatProviderError: When the LLM provider returns an error.
56
84
  MaxStepsReached: When the maximum number of steps is reached.
57
85
  asyncio.CancelledError: When the run is cancelled by user.
58
86
  """
59
87
  ...
88
+
89
+
90
+ type UILoopFn = Callable[[WireUISide], Coroutine[Any, Any, None]]
91
+ """A long-running async function to visualize the agent behavior."""
92
+
93
+
94
+ class RunCancelled(Exception):
95
+ """The run was cancelled by the cancel event."""
96
+
97
+
98
+ async def run_soul(
99
+ soul: "Soul",
100
+ user_input: str | list[ContentPart],
101
+ ui_loop_fn: UILoopFn,
102
+ cancel_event: asyncio.Event,
103
+ ) -> None:
104
+ """
105
+ Run the soul with the given user input, connecting it to the UI loop with a wire.
106
+
107
+ `cancel_event` is a outside handle that can be used to cancel the run. When the
108
+ event is set, the run will be gracefully stopped and a `RunCancelled` will be raised.
109
+
110
+ Raises:
111
+ LLMNotSet: When the LLM is not set.
112
+ LLMNotSupported: When the LLM does not have required capabilities.
113
+ ChatProviderError: When the LLM provider returns an error.
114
+ MaxStepsReached: When the maximum number of steps is reached.
115
+ RunCancelled: When the run is cancelled by the cancel event.
116
+ """
117
+ wire = Wire()
118
+ wire_token = _current_wire.set(wire)
119
+
120
+ logger.debug("Starting UI loop with function: {ui_loop_fn}", ui_loop_fn=ui_loop_fn)
121
+ ui_task = asyncio.create_task(ui_loop_fn(wire.ui_side))
122
+
123
+ logger.debug("Starting soul run")
124
+ soul_task = asyncio.create_task(soul.run(user_input))
125
+
126
+ cancel_event_task = asyncio.create_task(cancel_event.wait())
127
+ await asyncio.wait(
128
+ [soul_task, cancel_event_task],
129
+ return_when=asyncio.FIRST_COMPLETED,
130
+ )
131
+
132
+ try:
133
+ if cancel_event.is_set():
134
+ logger.debug("Cancelling the run task")
135
+ soul_task.cancel()
136
+ try:
137
+ await soul_task
138
+ except asyncio.CancelledError:
139
+ raise RunCancelled from None
140
+ else:
141
+ assert soul_task.done() # either stop event is set or the run task is done
142
+ cancel_event_task.cancel()
143
+ with contextlib.suppress(asyncio.CancelledError):
144
+ await cancel_event_task
145
+ soul_task.result() # this will raise if any exception was raised in the run task
146
+ finally:
147
+ logger.debug("Shutting down the UI loop")
148
+ # shutting down the wire should break the UI loop
149
+ wire.shutdown()
150
+ try:
151
+ await asyncio.wait_for(ui_task, timeout=0.5)
152
+ except asyncio.QueueShutDown:
153
+ logger.debug("UI loop shut down")
154
+ pass
155
+ except TimeoutError:
156
+ logger.warning("UI loop timed out")
157
+ finally:
158
+ _current_wire.reset(wire_token)
159
+
160
+
161
+ _current_wire = ContextVar[Wire | None]("current_wire", default=None)
162
+
163
+
164
+ def get_wire_or_none() -> Wire | None:
165
+ """
166
+ Get the current wire or None.
167
+ Expect to be not None when called from anywhere in the agent loop.
168
+ """
169
+ return _current_wire.get()
170
+
171
+
172
+ def wire_send(msg: WireMessage) -> None:
173
+ """
174
+ Send a wire message to the current wire.
175
+ Take this as `print` and `input` for souls.
176
+ Souls should always use this function to send wire messages.
177
+ """
178
+ wire = get_wire_or_none()
179
+ assert wire is not None, "Wire is expected to be set when soul is running"
180
+ wire.soul_side.send(msg)
kimi_cli/soul/agent.py ADDED
@@ -0,0 +1,159 @@
1
+ import importlib
2
+ import inspect
3
+ import string
4
+ from pathlib import Path
5
+ from typing import Any, NamedTuple
6
+
7
+ from kosong.tooling import CallableTool, CallableTool2, Toolset
8
+
9
+ from kimi_cli.agentspec import ResolvedAgentSpec, load_agent_spec
10
+ from kimi_cli.config import Config
11
+ from kimi_cli.session import Session
12
+ from kimi_cli.soul.approval import Approval
13
+ from kimi_cli.soul.denwarenji import DenwaRenji
14
+ from kimi_cli.soul.runtime import BuiltinSystemPromptArgs, Runtime
15
+ from kimi_cli.soul.toolset import CustomToolset
16
+ from kimi_cli.tools import SkipThisTool
17
+ from kimi_cli.utils.logging import logger
18
+
19
+
20
+ class Agent(NamedTuple):
21
+ """The loaded agent."""
22
+
23
+ name: str
24
+ system_prompt: str
25
+ toolset: Toolset
26
+
27
+
28
+ async def load_agent(
29
+ agent_file: Path,
30
+ runtime: Runtime,
31
+ *,
32
+ mcp_configs: list[dict[str, Any]],
33
+ ) -> Agent:
34
+ """
35
+ Load agent from specification file.
36
+
37
+ Raises:
38
+ FileNotFoundError: If the agent spec file does not exist.
39
+ AgentSpecError: If the agent spec is not valid.
40
+ """
41
+ logger.info("Loading agent: {agent_file}", agent_file=agent_file)
42
+ agent_spec = load_agent_spec(agent_file)
43
+
44
+ system_prompt = _load_system_prompt(
45
+ agent_spec.system_prompt_path,
46
+ agent_spec.system_prompt_args,
47
+ runtime.builtin_args,
48
+ )
49
+
50
+ tool_deps = {
51
+ ResolvedAgentSpec: agent_spec,
52
+ Runtime: runtime,
53
+ Config: runtime.config,
54
+ BuiltinSystemPromptArgs: runtime.builtin_args,
55
+ Session: runtime.session,
56
+ DenwaRenji: runtime.denwa_renji,
57
+ Approval: runtime.approval,
58
+ }
59
+ tools = agent_spec.tools
60
+ if agent_spec.exclude_tools:
61
+ logger.debug("Excluding tools: {tools}", tools=agent_spec.exclude_tools)
62
+ tools = [tool for tool in tools if tool not in agent_spec.exclude_tools]
63
+ toolset = CustomToolset()
64
+ bad_tools = _load_tools(toolset, tools, tool_deps)
65
+ if bad_tools:
66
+ raise ValueError(f"Invalid tools: {bad_tools}")
67
+
68
+ assert isinstance(toolset, CustomToolset)
69
+ if mcp_configs:
70
+ await _load_mcp_tools(toolset, mcp_configs)
71
+
72
+ return Agent(
73
+ name=agent_spec.name,
74
+ system_prompt=system_prompt,
75
+ toolset=toolset,
76
+ )
77
+
78
+
79
+ def _load_system_prompt(
80
+ path: Path, args: dict[str, str], builtin_args: BuiltinSystemPromptArgs
81
+ ) -> str:
82
+ logger.info("Loading system prompt: {path}", path=path)
83
+ system_prompt = path.read_text(encoding="utf-8").strip()
84
+ logger.debug(
85
+ "Substituting system prompt with builtin args: {builtin_args}, spec args: {spec_args}",
86
+ builtin_args=builtin_args,
87
+ spec_args=args,
88
+ )
89
+ return string.Template(system_prompt).substitute(builtin_args._asdict(), **args)
90
+
91
+
92
+ type ToolType = CallableTool | CallableTool2[Any]
93
+ # TODO: move this to kosong.tooling.simple
94
+
95
+
96
+ def _load_tools(
97
+ toolset: CustomToolset,
98
+ tool_paths: list[str],
99
+ dependencies: dict[type[Any], Any],
100
+ ) -> list[str]:
101
+ bad_tools: list[str] = []
102
+ for tool_path in tool_paths:
103
+ try:
104
+ tool = _load_tool(tool_path, dependencies)
105
+ except SkipThisTool:
106
+ logger.info("Skipping tool: {tool_path}", tool_path=tool_path)
107
+ continue
108
+ if tool:
109
+ toolset += tool
110
+ else:
111
+ bad_tools.append(tool_path)
112
+ logger.info("Loaded tools: {tools}", tools=[tool.name for tool in toolset.tools])
113
+ if bad_tools:
114
+ logger.error("Bad tools: {bad_tools}", bad_tools=bad_tools)
115
+ return bad_tools
116
+
117
+
118
+ def _load_tool(tool_path: str, dependencies: dict[type[Any], Any]) -> ToolType | None:
119
+ logger.debug("Loading tool: {tool_path}", tool_path=tool_path)
120
+ module_name, class_name = tool_path.rsplit(":", 1)
121
+ try:
122
+ module = importlib.import_module(module_name)
123
+ except ImportError:
124
+ return None
125
+ cls = getattr(module, class_name, None)
126
+ if cls is None:
127
+ return None
128
+ args: list[type[Any]] = []
129
+ for param in inspect.signature(cls).parameters.values():
130
+ if param.kind == inspect.Parameter.KEYWORD_ONLY:
131
+ # once we encounter a keyword-only parameter, we stop injecting dependencies
132
+ break
133
+ # all positional parameters should be dependencies to be injected
134
+ if param.annotation not in dependencies:
135
+ raise ValueError(f"Tool dependency not found: {param.annotation}")
136
+ args.append(dependencies[param.annotation])
137
+ return cls(*args)
138
+
139
+
140
+ async def _load_mcp_tools(
141
+ toolset: CustomToolset,
142
+ mcp_configs: list[dict[str, Any]],
143
+ ):
144
+ """
145
+ Raises:
146
+ ValueError: If the MCP config is not valid.
147
+ RuntimeError: If the MCP server cannot be connected.
148
+ """
149
+ import fastmcp
150
+
151
+ from kimi_cli.tools.mcp import MCPTool
152
+
153
+ for mcp_config in mcp_configs:
154
+ logger.info("Loading MCP tools from: {mcp_config}", mcp_config=mcp_config)
155
+ client = fastmcp.Client(mcp_config)
156
+ async with client:
157
+ for tool in await client.list_tools():
158
+ toolset += MCPTool(tool, client)
159
+ return toolset