kimi-cli 0.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kimi-cli might be problematic. Click here for more details.
- kimi_cli/CHANGELOG.md +304 -0
- kimi_cli/__init__.py +374 -0
- kimi_cli/agent.py +261 -0
- kimi_cli/agents/koder/README.md +3 -0
- kimi_cli/agents/koder/agent.yaml +24 -0
- kimi_cli/agents/koder/sub.yaml +11 -0
- kimi_cli/agents/koder/system.md +72 -0
- kimi_cli/config.py +138 -0
- kimi_cli/llm.py +8 -0
- kimi_cli/metadata.py +117 -0
- kimi_cli/prompts/metacmds/__init__.py +4 -0
- kimi_cli/prompts/metacmds/compact.md +74 -0
- kimi_cli/prompts/metacmds/init.md +21 -0
- kimi_cli/py.typed +0 -0
- kimi_cli/share.py +8 -0
- kimi_cli/soul/__init__.py +59 -0
- kimi_cli/soul/approval.py +69 -0
- kimi_cli/soul/context.py +142 -0
- kimi_cli/soul/denwarenji.py +37 -0
- kimi_cli/soul/kimisoul.py +248 -0
- kimi_cli/soul/message.py +76 -0
- kimi_cli/soul/toolset.py +25 -0
- kimi_cli/soul/wire.py +101 -0
- kimi_cli/tools/__init__.py +85 -0
- kimi_cli/tools/bash/__init__.py +97 -0
- kimi_cli/tools/bash/bash.md +31 -0
- kimi_cli/tools/dmail/__init__.py +38 -0
- kimi_cli/tools/dmail/dmail.md +15 -0
- kimi_cli/tools/file/__init__.py +21 -0
- kimi_cli/tools/file/glob.md +17 -0
- kimi_cli/tools/file/glob.py +149 -0
- kimi_cli/tools/file/grep.md +5 -0
- kimi_cli/tools/file/grep.py +285 -0
- kimi_cli/tools/file/patch.md +8 -0
- kimi_cli/tools/file/patch.py +131 -0
- kimi_cli/tools/file/read.md +14 -0
- kimi_cli/tools/file/read.py +139 -0
- kimi_cli/tools/file/replace.md +7 -0
- kimi_cli/tools/file/replace.py +132 -0
- kimi_cli/tools/file/write.md +5 -0
- kimi_cli/tools/file/write.py +107 -0
- kimi_cli/tools/mcp.py +85 -0
- kimi_cli/tools/task/__init__.py +156 -0
- kimi_cli/tools/task/task.md +26 -0
- kimi_cli/tools/test.py +55 -0
- kimi_cli/tools/think/__init__.py +21 -0
- kimi_cli/tools/think/think.md +1 -0
- kimi_cli/tools/todo/__init__.py +27 -0
- kimi_cli/tools/todo/set_todo_list.md +15 -0
- kimi_cli/tools/utils.py +150 -0
- kimi_cli/tools/web/__init__.py +4 -0
- kimi_cli/tools/web/fetch.md +1 -0
- kimi_cli/tools/web/fetch.py +94 -0
- kimi_cli/tools/web/search.md +1 -0
- kimi_cli/tools/web/search.py +126 -0
- kimi_cli/ui/__init__.py +68 -0
- kimi_cli/ui/acp/__init__.py +441 -0
- kimi_cli/ui/print/__init__.py +176 -0
- kimi_cli/ui/shell/__init__.py +326 -0
- kimi_cli/ui/shell/console.py +3 -0
- kimi_cli/ui/shell/liveview.py +158 -0
- kimi_cli/ui/shell/metacmd.py +309 -0
- kimi_cli/ui/shell/prompt.py +574 -0
- kimi_cli/ui/shell/setup.py +192 -0
- kimi_cli/ui/shell/update.py +204 -0
- kimi_cli/utils/changelog.py +101 -0
- kimi_cli/utils/logging.py +18 -0
- kimi_cli/utils/message.py +8 -0
- kimi_cli/utils/path.py +23 -0
- kimi_cli/utils/provider.py +64 -0
- kimi_cli/utils/pyinstaller.py +24 -0
- kimi_cli/utils/string.py +12 -0
- kimi_cli-0.35.dist-info/METADATA +24 -0
- kimi_cli-0.35.dist-info/RECORD +76 -0
- kimi_cli-0.35.dist-info/WHEEL +4 -0
- kimi_cli-0.35.dist-info/entry_points.txt +3 -0
kimi_cli/agent.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
import inspect
|
|
3
|
+
import string
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, NamedTuple
|
|
6
|
+
|
|
7
|
+
import fastmcp
|
|
8
|
+
import yaml
|
|
9
|
+
from kosong.tooling import Toolset
|
|
10
|
+
from kosong.tooling.simple import ToolType
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
|
|
13
|
+
from kimi_cli.config import Config
|
|
14
|
+
from kimi_cli.llm import LLM
|
|
15
|
+
from kimi_cli.metadata import Session
|
|
16
|
+
from kimi_cli.soul.approval import Approval
|
|
17
|
+
from kimi_cli.soul.denwarenji import DenwaRenji
|
|
18
|
+
from kimi_cli.soul.toolset import CustomToolset
|
|
19
|
+
from kimi_cli.tools.mcp import MCPTool
|
|
20
|
+
from kimi_cli.utils.logging import logger
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AgentSpec(BaseModel):
|
|
24
|
+
"""Agent specification."""
|
|
25
|
+
|
|
26
|
+
extend: str | None = Field(default=None, description="Agent file to extend")
|
|
27
|
+
name: str | None = Field(default=None, description="Agent name") # required
|
|
28
|
+
system_prompt_path: Path | None = Field(
|
|
29
|
+
default=None, description="System prompt path"
|
|
30
|
+
) # required
|
|
31
|
+
system_prompt_args: dict[str, str] = Field(
|
|
32
|
+
default_factory=dict, description="System prompt arguments"
|
|
33
|
+
)
|
|
34
|
+
tools: list[str] | None = Field(default=None, description="Tools") # required
|
|
35
|
+
exclude_tools: list[str] | None = Field(default=None, description="Tools to exclude")
|
|
36
|
+
subagents: dict[str, "SubagentSpec"] | None = Field(default=None, description="Subagents")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class SubagentSpec(BaseModel):
|
|
40
|
+
"""Subagent specification."""
|
|
41
|
+
|
|
42
|
+
path: Path = Field(description="Subagent file path")
|
|
43
|
+
description: str = Field(description="Subagent description")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class BuiltinSystemPromptArgs(NamedTuple):
|
|
47
|
+
"""Builtin system prompt arguments."""
|
|
48
|
+
|
|
49
|
+
KIMI_NOW: str
|
|
50
|
+
"""The current datetime."""
|
|
51
|
+
KIMI_WORK_DIR: Path
|
|
52
|
+
"""The current working directory."""
|
|
53
|
+
KIMI_WORK_DIR_LS: str
|
|
54
|
+
"""The `ls -la` output of current working directory."""
|
|
55
|
+
KIMI_AGENTS_MD: str # TODO: move to first message from system prompt
|
|
56
|
+
"""The content of AGENTS.md."""
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class AgentGlobals(NamedTuple):
|
|
60
|
+
"""Agent globals."""
|
|
61
|
+
|
|
62
|
+
config: Config
|
|
63
|
+
llm: LLM | None
|
|
64
|
+
builtin_args: BuiltinSystemPromptArgs
|
|
65
|
+
denwa_renji: DenwaRenji
|
|
66
|
+
session: Session
|
|
67
|
+
approval: Approval
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class Agent(NamedTuple):
|
|
71
|
+
"""The loaded agent."""
|
|
72
|
+
|
|
73
|
+
name: str
|
|
74
|
+
system_prompt: str
|
|
75
|
+
toolset: Toolset
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def get_agents_dir() -> Path:
|
|
79
|
+
return Path(__file__).parent / "agents"
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
DEFAULT_AGENT_FILE = get_agents_dir() / "koder" / "agent.yaml"
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
async def load_agent_with_mcp(
|
|
86
|
+
agent_file: Path,
|
|
87
|
+
globals_: AgentGlobals,
|
|
88
|
+
mcp_configs: list[dict[str, Any]],
|
|
89
|
+
) -> Agent:
|
|
90
|
+
agent = load_agent(agent_file, globals_)
|
|
91
|
+
assert isinstance(agent.toolset, CustomToolset)
|
|
92
|
+
if mcp_configs:
|
|
93
|
+
await _load_mcp_tools(agent.toolset, mcp_configs)
|
|
94
|
+
return agent
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def load_agent(
|
|
98
|
+
agent_file: Path,
|
|
99
|
+
globals_: AgentGlobals,
|
|
100
|
+
) -> Agent:
|
|
101
|
+
"""
|
|
102
|
+
Load agent from specification file.
|
|
103
|
+
|
|
104
|
+
Raises:
|
|
105
|
+
ValueError: If the agent spec is not valid.
|
|
106
|
+
"""
|
|
107
|
+
agent_spec = _load_agent_spec(agent_file)
|
|
108
|
+
assert agent_spec.extend is None, "agent extension should be recursively resolved"
|
|
109
|
+
if agent_spec.name is None:
|
|
110
|
+
raise ValueError("Agent name is required")
|
|
111
|
+
if agent_spec.system_prompt_path is None:
|
|
112
|
+
raise ValueError("System prompt path is required")
|
|
113
|
+
if agent_spec.tools is None:
|
|
114
|
+
raise ValueError("Tools are required")
|
|
115
|
+
|
|
116
|
+
system_prompt = _load_system_prompt(
|
|
117
|
+
agent_spec.system_prompt_path, agent_spec.system_prompt_args, globals_.builtin_args
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
tool_deps = {
|
|
121
|
+
AgentSpec: agent_spec,
|
|
122
|
+
AgentGlobals: globals_,
|
|
123
|
+
Config: globals_.config,
|
|
124
|
+
BuiltinSystemPromptArgs: globals_.builtin_args,
|
|
125
|
+
Session: globals_.session,
|
|
126
|
+
DenwaRenji: globals_.denwa_renji,
|
|
127
|
+
Approval: globals_.approval,
|
|
128
|
+
}
|
|
129
|
+
tools = agent_spec.tools
|
|
130
|
+
if agent_spec.exclude_tools:
|
|
131
|
+
logger.debug("Excluding tools: {tools}", tools=agent_spec.exclude_tools)
|
|
132
|
+
tools = [tool for tool in tools if tool not in agent_spec.exclude_tools]
|
|
133
|
+
toolset = CustomToolset()
|
|
134
|
+
bad_tools = _load_tools(toolset, tools, tool_deps)
|
|
135
|
+
if bad_tools:
|
|
136
|
+
raise ValueError(f"Invalid tools: {bad_tools}")
|
|
137
|
+
|
|
138
|
+
return Agent(
|
|
139
|
+
name=agent_spec.name,
|
|
140
|
+
system_prompt=system_prompt,
|
|
141
|
+
toolset=toolset,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _load_agent_spec(agent_file: Path) -> AgentSpec:
|
|
146
|
+
assert agent_file.is_file(), "expect agent file to exist"
|
|
147
|
+
with open(agent_file, encoding="utf-8") as f:
|
|
148
|
+
data: dict[str, Any] = yaml.safe_load(f)
|
|
149
|
+
|
|
150
|
+
version = data.get("version", 1)
|
|
151
|
+
if version != 1:
|
|
152
|
+
raise ValueError(f"Unsupported agent spec version: {version}")
|
|
153
|
+
|
|
154
|
+
agent_spec = AgentSpec(**data.get("agent", {}))
|
|
155
|
+
if agent_spec.system_prompt_path is not None:
|
|
156
|
+
agent_spec.system_prompt_path = agent_file.parent / agent_spec.system_prompt_path
|
|
157
|
+
if agent_spec.subagents is not None:
|
|
158
|
+
for v in agent_spec.subagents.values():
|
|
159
|
+
v.path = agent_file.parent / v.path
|
|
160
|
+
if agent_spec.extend:
|
|
161
|
+
if agent_spec.extend == "default":
|
|
162
|
+
base_agent_file = DEFAULT_AGENT_FILE
|
|
163
|
+
else:
|
|
164
|
+
base_agent_file = agent_file.parent / agent_spec.extend
|
|
165
|
+
base_agent_spec = _load_agent_spec(base_agent_file)
|
|
166
|
+
if agent_spec.name is not None:
|
|
167
|
+
base_agent_spec.name = agent_spec.name
|
|
168
|
+
if agent_spec.system_prompt_path is not None:
|
|
169
|
+
base_agent_spec.system_prompt_path = agent_spec.system_prompt_path
|
|
170
|
+
for k, v in agent_spec.system_prompt_args.items():
|
|
171
|
+
base_agent_spec.system_prompt_args[k] = v
|
|
172
|
+
if agent_spec.tools is not None:
|
|
173
|
+
base_agent_spec.tools = agent_spec.tools
|
|
174
|
+
if agent_spec.exclude_tools is not None:
|
|
175
|
+
base_agent_spec.exclude_tools = agent_spec.exclude_tools
|
|
176
|
+
if agent_spec.subagents is not None:
|
|
177
|
+
base_agent_spec.subagents = agent_spec.subagents
|
|
178
|
+
agent_spec = base_agent_spec
|
|
179
|
+
return agent_spec
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _load_system_prompt(
|
|
183
|
+
path: Path, args: dict[str, str], builtin_args: BuiltinSystemPromptArgs
|
|
184
|
+
) -> str:
|
|
185
|
+
system_prompt = path.read_text().strip()
|
|
186
|
+
logger.debug(
|
|
187
|
+
"Substituting system prompt with builtin args: {builtin_args}, spec args: {spec_args}",
|
|
188
|
+
builtin_args=builtin_args,
|
|
189
|
+
spec_args=args,
|
|
190
|
+
)
|
|
191
|
+
return string.Template(system_prompt).substitute(builtin_args._asdict(), **args)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _load_tools(
|
|
195
|
+
toolset: CustomToolset,
|
|
196
|
+
tool_paths: list[str],
|
|
197
|
+
dependencies: dict[type[Any], Any],
|
|
198
|
+
) -> list[str]:
|
|
199
|
+
bad_tools = []
|
|
200
|
+
for tool_path in tool_paths:
|
|
201
|
+
tool = _load_tool(tool_path, dependencies)
|
|
202
|
+
if tool:
|
|
203
|
+
toolset += tool
|
|
204
|
+
else:
|
|
205
|
+
bad_tools.append(tool_path)
|
|
206
|
+
logger.debug("Loaded tools: {tools}", tools=toolset.tools)
|
|
207
|
+
if bad_tools:
|
|
208
|
+
logger.error("Bad tools: {bad_tools}", bad_tools=bad_tools)
|
|
209
|
+
return bad_tools
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _load_tool(tool_path: str, dependencies: dict[type[Any], Any]) -> ToolType | None:
|
|
213
|
+
logger.debug("Loading tool: {tool_path}", tool_path=tool_path)
|
|
214
|
+
module_name, class_name = tool_path.rsplit(":", 1)
|
|
215
|
+
try:
|
|
216
|
+
module = importlib.import_module(module_name)
|
|
217
|
+
except ImportError:
|
|
218
|
+
return None
|
|
219
|
+
cls = getattr(module, class_name, None)
|
|
220
|
+
if cls is None:
|
|
221
|
+
return None
|
|
222
|
+
args = []
|
|
223
|
+
for param in inspect.signature(cls).parameters.values():
|
|
224
|
+
if param.kind == inspect.Parameter.KEYWORD_ONLY:
|
|
225
|
+
# once we encounter a keyword-only parameter, we stop injecting dependencies
|
|
226
|
+
break
|
|
227
|
+
# all positional parameters should be dependencies to be injected
|
|
228
|
+
if param.annotation not in dependencies:
|
|
229
|
+
raise ValueError(f"Tool dependency not found: {param.annotation}")
|
|
230
|
+
args.append(dependencies[param.annotation])
|
|
231
|
+
return cls(*args)
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
async def _load_mcp_tools(
|
|
235
|
+
toolset: CustomToolset,
|
|
236
|
+
mcp_configs: list[dict[str, Any]],
|
|
237
|
+
):
|
|
238
|
+
"""
|
|
239
|
+
Raises:
|
|
240
|
+
ValueError: If the MCP config is not valid.
|
|
241
|
+
RuntimeError: If the MCP server cannot be connected.
|
|
242
|
+
"""
|
|
243
|
+
for mcp_config in mcp_configs:
|
|
244
|
+
client = fastmcp.Client(mcp_config)
|
|
245
|
+
async with client:
|
|
246
|
+
for tool in await client.list_tools():
|
|
247
|
+
toolset += MCPTool(tool, client)
|
|
248
|
+
return toolset
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def load_agents_md(work_dir: Path) -> str | None:
|
|
252
|
+
paths = [
|
|
253
|
+
work_dir / "AGENTS.md",
|
|
254
|
+
work_dir / "agents.md",
|
|
255
|
+
]
|
|
256
|
+
for path in paths:
|
|
257
|
+
if path.is_file():
|
|
258
|
+
logger.debug("Loaded agents.md: {path}", path=path)
|
|
259
|
+
return path.read_text().strip()
|
|
260
|
+
logger.debug("No AGENTS.md found")
|
|
261
|
+
return None
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
version: 1
|
|
2
|
+
agent:
|
|
3
|
+
name: ""
|
|
4
|
+
system_prompt_path: ./system.md
|
|
5
|
+
system_prompt_args:
|
|
6
|
+
ROLE_ADDITIONAL: ""
|
|
7
|
+
tools:
|
|
8
|
+
- "kimi_cli.tools.task:Task"
|
|
9
|
+
# - "kimi_cli.tools.dmail:SendDMail"
|
|
10
|
+
- "kimi_cli.tools.think:Think"
|
|
11
|
+
- "kimi_cli.tools.todo:SetTodoList"
|
|
12
|
+
- "kimi_cli.tools.bash:Bash"
|
|
13
|
+
- "kimi_cli.tools.file:ReadFile"
|
|
14
|
+
- "kimi_cli.tools.file:Glob"
|
|
15
|
+
- "kimi_cli.tools.file:Grep"
|
|
16
|
+
- "kimi_cli.tools.file:WriteFile"
|
|
17
|
+
- "kimi_cli.tools.file:StrReplaceFile"
|
|
18
|
+
# - "kimi_cli.tools.file:PatchFile"
|
|
19
|
+
- "kimi_cli.tools.web:SearchWeb"
|
|
20
|
+
- "kimi_cli.tools.web:FetchURL"
|
|
21
|
+
subagents:
|
|
22
|
+
koder:
|
|
23
|
+
path: ./sub.yaml
|
|
24
|
+
description: "Good at general software engineering tasks."
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
version: 1
|
|
2
|
+
agent:
|
|
3
|
+
extend: ./agent.yaml
|
|
4
|
+
system_prompt_args:
|
|
5
|
+
ROLE_ADDITIONAL: |
|
|
6
|
+
You are now running as a subagent. All the `user` messages are sent by the main agent. The main agent cannot see your context, it can only see your last message when you finish the task. You need to provide a comprehensive summary on what you have done and learned in your final message. If you wrote or modified any files, you must mention them in the summary.
|
|
7
|
+
exclude_tools:
|
|
8
|
+
- "kimi_cli.tools.task:Task"
|
|
9
|
+
- "kimi_cli.tools.dmail:SendDMail"
|
|
10
|
+
- "kimi_cli.tools.todo:SetTodoList"
|
|
11
|
+
subagents: # make sure no subagents are provided
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
You are Kimi CLI. You are an interactive CLI agent specializing in software engineering tasks. Your primary goal is to help users safely and efficiently, adhering strictly to the following instructions and utilizing your available tools.
|
|
2
|
+
|
|
3
|
+
${ROLE_ADDITIONAL}
|
|
4
|
+
|
|
5
|
+
# Prompt and Tool Use
|
|
6
|
+
|
|
7
|
+
The user's requests are provided in natural language within `user` messages, which may contain code snippets, logs, file paths, or specific requirements. ALWAYS follow the user's requests, always stay on track. Do not do anything that is not asked.
|
|
8
|
+
|
|
9
|
+
When handling the user's request, you can call available tools to accomplish the task. When calling tools, do not provide explanations because the tool calls themselves should be self-explanatory. You MUST follow the description of each tool and its parameters when calling tools.
|
|
10
|
+
|
|
11
|
+
You have the capability to output any number of tool calls in a single response. If you anticipate making multiple non-interfering tool calls, you are HIGHLY RECOMMENDED to make them in parallel to significantly improve efficiency. This is very important to your performance.
|
|
12
|
+
|
|
13
|
+
The results of the tool calls will be returned to you in a `tool` message. In some cases, non-plain-text content might be sent as a `user` message following the `tool` message. You must decide on your next action based on the tool call results, which could be one of the following: 1. Continue working on the task, 2. Inform the user that the task is completed or has failed, or 3. Ask the user for more information.
|
|
14
|
+
|
|
15
|
+
The system may, where appropriate, insert hints or information wrapped in `<system>` and `</system>` tags within `user` or `tool` messages. This information is relevant to the current task or tool calls, may or may not be important to you. Take this info into consideration when determining your next action.
|
|
16
|
+
|
|
17
|
+
When responding to the user, you MUST use the SAME language as the user, unless explicitly instructed to do otherwise.
|
|
18
|
+
|
|
19
|
+
# General Coding Guidelines
|
|
20
|
+
|
|
21
|
+
Always think carefully. Be patient and thorough. Do not give up too early.
|
|
22
|
+
|
|
23
|
+
ALWAYS, keep it stupidly simple. Do not overcomplicate things.
|
|
24
|
+
|
|
25
|
+
When building something from scratch, you should:
|
|
26
|
+
|
|
27
|
+
- Understand the user's requirements.
|
|
28
|
+
- Design the architecture and make a plan for the implementation.
|
|
29
|
+
- Write the code in a modular and maintainable way.
|
|
30
|
+
|
|
31
|
+
When working on existing codebase, you should:
|
|
32
|
+
|
|
33
|
+
- Understand the codebase and the user's requirements. Identify the ultimate goal and the most important criteria to achieve the goal.
|
|
34
|
+
- For a bug fix, you typically need to check error logs or failed tests, scan over the codebase to find the root cause, and figure out a fix. If user mentioned any failed tests, you should make sure they pass after the changes.
|
|
35
|
+
- For a feature, you typically need to design the architecture, and write the code in a modular and maintainable way, with minimal intrusions to existing code. Add new tests if the project already has tests.
|
|
36
|
+
- For a code refactoring, you typically need to update all the places that call the code you are refactoring if the interface changes. DO NOT change any existing logic especially in tests, focus only on fixing any errors caused by the interface changes.
|
|
37
|
+
- Make MINIMAL changes to achieve the goal. This is very important to your performance.
|
|
38
|
+
- Follow the coding style of existing code in the project.
|
|
39
|
+
|
|
40
|
+
# Working Environment
|
|
41
|
+
|
|
42
|
+
## Operating System
|
|
43
|
+
|
|
44
|
+
The operating environment is not in a sandbox. Any action especially mutation you do will immediately affect the user's system. So you MUST be extremely cautious. Unless being explicitly instructed to do so, you should never access (read/write/execute) files outside of the working directory.
|
|
45
|
+
|
|
46
|
+
## Working Directory
|
|
47
|
+
|
|
48
|
+
The current working directory is `${KIMI_WORK_DIR}`. This should be considered as the project root if you are instructed to perform tasks on the project. Every file system operation will be relative to the working directory if you do not explicitly specify the absolute path. Tools may require absolute paths for some parameters, if so, you should strictly follow the requirements.
|
|
49
|
+
|
|
50
|
+
The `ls -la` output of current working directory is:
|
|
51
|
+
|
|
52
|
+
```
|
|
53
|
+
${KIMI_WORK_DIR_LS}
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Use this as your basic understanding of the project structure.
|
|
57
|
+
|
|
58
|
+
## Date and Time
|
|
59
|
+
|
|
60
|
+
The current date and time in ISO format is `${KIMI_NOW}`. This is only a reference for you when searching the web, or checking file modification time, etc. If you need the exact time, use Bash tool with proper command.
|
|
61
|
+
|
|
62
|
+
# Project Information
|
|
63
|
+
|
|
64
|
+
Markdown files named `AGENTS.md` usually contain the background, structure, coding styles, user preferences and other relevant information about the project. You should use this information to understand the project and the user's preferences. `AGENTS.md` files may exist at different locations in the project, but typically there is one in the project root. The following content between two `---`s is the content of the root-level `AGENTS.md` file.
|
|
65
|
+
|
|
66
|
+
`${KIMI_WORK_DIR}/AGENTS.md`:
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
${KIMI_AGENTS_MD}
|
|
71
|
+
|
|
72
|
+
---
|
kimi_cli/config.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Literal, Self
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field, SecretStr, ValidationError, field_serializer, model_validator
|
|
6
|
+
|
|
7
|
+
from kimi_cli.share import get_share_dir
|
|
8
|
+
from kimi_cli.utils.logging import logger
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LLMProvider(BaseModel):
|
|
12
|
+
"""LLM provider configuration."""
|
|
13
|
+
|
|
14
|
+
type: Literal["kimi", "openai_legacy", "_chaos"]
|
|
15
|
+
"""Provider type"""
|
|
16
|
+
base_url: str
|
|
17
|
+
"""API base URL"""
|
|
18
|
+
api_key: SecretStr
|
|
19
|
+
"""API key"""
|
|
20
|
+
|
|
21
|
+
@field_serializer("api_key", when_used="json")
|
|
22
|
+
def dump_secret(self, v: SecretStr):
|
|
23
|
+
return v.get_secret_value()
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class LLMModel(BaseModel):
|
|
27
|
+
"""LLM model configuration."""
|
|
28
|
+
|
|
29
|
+
provider: str
|
|
30
|
+
"""Provider name"""
|
|
31
|
+
model: str
|
|
32
|
+
"""Model name"""
|
|
33
|
+
max_context_size: int
|
|
34
|
+
"""Maximum context size (unit: tokens)"""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class LoopControl(BaseModel):
|
|
38
|
+
"""Agent loop control configuration."""
|
|
39
|
+
|
|
40
|
+
max_steps_per_run: int = 100
|
|
41
|
+
"""Maximum number of steps in one run"""
|
|
42
|
+
max_retries_per_step: int = 3
|
|
43
|
+
"""Maximum number of retries in one step"""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class MoonshotSearchConfig(BaseModel):
|
|
47
|
+
"""Moonshot Search configuration."""
|
|
48
|
+
|
|
49
|
+
base_url: str
|
|
50
|
+
"""Base URL for Moonshot Search service."""
|
|
51
|
+
api_key: SecretStr
|
|
52
|
+
"""API key for Moonshot Search service."""
|
|
53
|
+
|
|
54
|
+
@field_serializer("api_key", when_used="json")
|
|
55
|
+
def dump_secret(self, v: SecretStr):
|
|
56
|
+
return v.get_secret_value()
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Services(BaseModel):
|
|
60
|
+
"""Services configuration."""
|
|
61
|
+
|
|
62
|
+
moonshot_search: MoonshotSearchConfig | None = None
|
|
63
|
+
"""Moonshot Search configuration."""
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class Config(BaseModel):
|
|
67
|
+
"""Main configuration structure."""
|
|
68
|
+
|
|
69
|
+
default_model: str = Field(default="", description="Default model to use")
|
|
70
|
+
models: dict[str, LLMModel] = Field(default_factory=dict, description="List of LLM models")
|
|
71
|
+
providers: dict[str, LLMProvider] = Field(
|
|
72
|
+
default_factory=dict, description="List of LLM providers"
|
|
73
|
+
)
|
|
74
|
+
loop_control: LoopControl = Field(default_factory=LoopControl, description="Agent loop control")
|
|
75
|
+
services: Services = Field(default_factory=Services, description="Services configuration")
|
|
76
|
+
|
|
77
|
+
@model_validator(mode="after")
|
|
78
|
+
def validate_model(self) -> Self:
|
|
79
|
+
if self.default_model and self.default_model not in self.models:
|
|
80
|
+
raise ValueError(f"Default model {self.default_model} not found in models")
|
|
81
|
+
for model in self.models.values():
|
|
82
|
+
if model.provider not in self.providers:
|
|
83
|
+
raise ValueError(f"Provider {model.provider} not found in providers")
|
|
84
|
+
return self
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def get_config_file() -> Path:
|
|
88
|
+
"""Get the configuration file path."""
|
|
89
|
+
return get_share_dir() / "config.json"
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def get_default_config() -> Config:
|
|
93
|
+
"""Get the default configuration."""
|
|
94
|
+
return Config(
|
|
95
|
+
default_model="",
|
|
96
|
+
models={},
|
|
97
|
+
providers={},
|
|
98
|
+
services=Services(),
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def load_config() -> Config:
|
|
103
|
+
"""Load configuration from config file.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Validated Config object.
|
|
107
|
+
"""
|
|
108
|
+
config_file = get_config_file()
|
|
109
|
+
logger.debug("Loading config from file: {file}", file=config_file)
|
|
110
|
+
|
|
111
|
+
if not config_file.exists():
|
|
112
|
+
config = get_default_config()
|
|
113
|
+
logger.debug("No config file found, creating default config: {config}", config=config)
|
|
114
|
+
with open(config_file, "w", encoding="utf-8") as f:
|
|
115
|
+
f.write(config.model_dump_json(indent=2, exclude_none=True))
|
|
116
|
+
return config
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
with open(config_file, encoding="utf-8") as f:
|
|
120
|
+
data = json.load(f)
|
|
121
|
+
return Config(**data)
|
|
122
|
+
except (json.JSONDecodeError, ValidationError) as e:
|
|
123
|
+
raise ConfigError(f"Invalid configuration file: {config_file}") from e
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class ConfigError(Exception):
|
|
127
|
+
"""Configuration error."""
|
|
128
|
+
|
|
129
|
+
def __init__(self, message: str):
|
|
130
|
+
super().__init__(message)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def save_config(config: Config):
|
|
134
|
+
"""Save configuration to config file."""
|
|
135
|
+
config_file = get_config_file()
|
|
136
|
+
logger.debug("Saving config to file: {file}", file=config_file)
|
|
137
|
+
with open(config_file, "w", encoding="utf-8") as f:
|
|
138
|
+
f.write(config.model_dump_json(indent=2, exclude_none=True))
|
kimi_cli/llm.py
ADDED
kimi_cli/metadata.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import uuid
|
|
3
|
+
from hashlib import md5
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import NamedTuple
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
8
|
+
|
|
9
|
+
from kimi_cli.share import get_share_dir
|
|
10
|
+
from kimi_cli.utils.logging import logger
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_metadata_file() -> Path:
|
|
14
|
+
return get_share_dir() / "kimi.json"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class WorkDirMeta(BaseModel):
|
|
18
|
+
"""Metadata for a work directory."""
|
|
19
|
+
|
|
20
|
+
path: str
|
|
21
|
+
"""The full path of the work directory."""
|
|
22
|
+
|
|
23
|
+
last_session_id: str | None = None
|
|
24
|
+
"""Last session ID of this work directory."""
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def sessions_dir(self) -> Path:
|
|
28
|
+
path = get_share_dir() / "sessions" / md5(self.path.encode()).hexdigest()
|
|
29
|
+
path.mkdir(parents=True, exist_ok=True)
|
|
30
|
+
return path
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Metadata(BaseModel):
|
|
34
|
+
"""Kimi metadata structure."""
|
|
35
|
+
|
|
36
|
+
work_dirs: list[WorkDirMeta] = Field(default_factory=list, description="Work directory list")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _load_metadata() -> Metadata:
|
|
40
|
+
metadata_file = get_metadata_file()
|
|
41
|
+
logger.debug("Loading metadata from file: {file}", file=metadata_file)
|
|
42
|
+
if not metadata_file.exists():
|
|
43
|
+
logger.debug("No metadata file found, creating empty metadata")
|
|
44
|
+
return Metadata()
|
|
45
|
+
with open(metadata_file, encoding="utf-8") as f:
|
|
46
|
+
data = json.load(f)
|
|
47
|
+
return Metadata(**data)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _save_metadata(metadata: Metadata):
|
|
51
|
+
metadata_file = get_metadata_file()
|
|
52
|
+
logger.debug("Saving metadata to file: {file}", file=metadata_file)
|
|
53
|
+
with open(metadata_file, "w", encoding="utf-8") as f:
|
|
54
|
+
json.dump(metadata.model_dump(), f, indent=2, ensure_ascii=False)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class Session(NamedTuple):
|
|
58
|
+
"""A session of a work directory."""
|
|
59
|
+
|
|
60
|
+
id: str
|
|
61
|
+
work_dir: WorkDirMeta
|
|
62
|
+
history_file: Path
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def new_session(work_dir: Path, _history_file: Path | None = None) -> Session:
|
|
66
|
+
"""Create a new session for a work directory."""
|
|
67
|
+
logger.debug("Creating new session for work directory: {work_dir}", work_dir=work_dir)
|
|
68
|
+
|
|
69
|
+
metadata = _load_metadata()
|
|
70
|
+
work_dir_meta = next((wd for wd in metadata.work_dirs if wd.path == str(work_dir)), None)
|
|
71
|
+
if work_dir_meta is None:
|
|
72
|
+
work_dir_meta = WorkDirMeta(path=str(work_dir))
|
|
73
|
+
metadata.work_dirs.append(work_dir_meta)
|
|
74
|
+
|
|
75
|
+
session_id = str(uuid.uuid4())
|
|
76
|
+
if _history_file is None:
|
|
77
|
+
history_file = work_dir_meta.sessions_dir / f"{session_id}.jsonl"
|
|
78
|
+
work_dir_meta.last_session_id = session_id
|
|
79
|
+
else:
|
|
80
|
+
logger.warning("Using provided history file: {history_file}", history_file=_history_file)
|
|
81
|
+
_history_file.parent.mkdir(parents=True, exist_ok=True)
|
|
82
|
+
if _history_file.exists():
|
|
83
|
+
assert _history_file.is_file()
|
|
84
|
+
history_file = _history_file
|
|
85
|
+
|
|
86
|
+
if history_file.exists():
|
|
87
|
+
# truncate if exists
|
|
88
|
+
logger.warning(
|
|
89
|
+
"History file already exists, truncating: {history_file}", history_file=history_file
|
|
90
|
+
)
|
|
91
|
+
history_file.unlink()
|
|
92
|
+
history_file.touch()
|
|
93
|
+
|
|
94
|
+
_save_metadata(metadata)
|
|
95
|
+
return Session(id=session_id, work_dir=work_dir_meta, history_file=history_file)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def continue_session(work_dir: Path) -> Session | None:
|
|
99
|
+
"""Get the last session for a work directory."""
|
|
100
|
+
logger.debug("Continuing session for work directory: {work_dir}", work_dir=work_dir)
|
|
101
|
+
|
|
102
|
+
metadata = _load_metadata()
|
|
103
|
+
work_dir_meta = next((wd for wd in metadata.work_dirs if wd.path == str(work_dir)), None)
|
|
104
|
+
if work_dir_meta is None:
|
|
105
|
+
logger.debug("Work directory never been used")
|
|
106
|
+
return None
|
|
107
|
+
if work_dir_meta.last_session_id is None:
|
|
108
|
+
logger.debug("Work directory never had a session")
|
|
109
|
+
return None
|
|
110
|
+
|
|
111
|
+
logger.debug(
|
|
112
|
+
"Found last session for work directory: {session_id}",
|
|
113
|
+
session_id=work_dir_meta.last_session_id,
|
|
114
|
+
)
|
|
115
|
+
session_id = work_dir_meta.last_session_id
|
|
116
|
+
history_file = work_dir_meta.sessions_dir / f"{session_id}.jsonl"
|
|
117
|
+
return Session(id=session_id, work_dir=work_dir_meta, history_file=history_file)
|