kimi-cli 0.44__py3-none-any.whl → 0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kimi-cli might be problematic. Click here for more details.

Files changed (137) hide show
  1. kimi_cli/CHANGELOG.md +349 -40
  2. kimi_cli/__init__.py +6 -0
  3. kimi_cli/acp/AGENTS.md +91 -0
  4. kimi_cli/acp/__init__.py +13 -0
  5. kimi_cli/acp/convert.py +111 -0
  6. kimi_cli/acp/kaos.py +270 -0
  7. kimi_cli/acp/mcp.py +46 -0
  8. kimi_cli/acp/server.py +335 -0
  9. kimi_cli/acp/session.py +445 -0
  10. kimi_cli/acp/tools.py +158 -0
  11. kimi_cli/acp/types.py +13 -0
  12. kimi_cli/agents/default/agent.yaml +4 -4
  13. kimi_cli/agents/default/sub.yaml +2 -1
  14. kimi_cli/agents/default/system.md +79 -21
  15. kimi_cli/agents/okabe/agent.yaml +17 -0
  16. kimi_cli/agentspec.py +53 -25
  17. kimi_cli/app.py +180 -52
  18. kimi_cli/cli/__init__.py +595 -0
  19. kimi_cli/cli/__main__.py +8 -0
  20. kimi_cli/cli/info.py +63 -0
  21. kimi_cli/cli/mcp.py +349 -0
  22. kimi_cli/config.py +153 -17
  23. kimi_cli/constant.py +3 -0
  24. kimi_cli/exception.py +23 -2
  25. kimi_cli/flow/__init__.py +117 -0
  26. kimi_cli/flow/d2.py +376 -0
  27. kimi_cli/flow/mermaid.py +218 -0
  28. kimi_cli/llm.py +129 -23
  29. kimi_cli/metadata.py +32 -7
  30. kimi_cli/platforms.py +262 -0
  31. kimi_cli/prompts/__init__.py +2 -0
  32. kimi_cli/prompts/compact.md +4 -5
  33. kimi_cli/session.py +223 -31
  34. kimi_cli/share.py +2 -0
  35. kimi_cli/skill.py +145 -0
  36. kimi_cli/skills/kimi-cli-help/SKILL.md +55 -0
  37. kimi_cli/skills/skill-creator/SKILL.md +351 -0
  38. kimi_cli/soul/__init__.py +51 -20
  39. kimi_cli/soul/agent.py +213 -85
  40. kimi_cli/soul/approval.py +86 -17
  41. kimi_cli/soul/compaction.py +64 -53
  42. kimi_cli/soul/context.py +38 -5
  43. kimi_cli/soul/denwarenji.py +2 -0
  44. kimi_cli/soul/kimisoul.py +442 -60
  45. kimi_cli/soul/message.py +54 -54
  46. kimi_cli/soul/slash.py +72 -0
  47. kimi_cli/soul/toolset.py +387 -6
  48. kimi_cli/toad.py +74 -0
  49. kimi_cli/tools/AGENTS.md +5 -0
  50. kimi_cli/tools/__init__.py +42 -34
  51. kimi_cli/tools/display.py +25 -0
  52. kimi_cli/tools/dmail/__init__.py +10 -10
  53. kimi_cli/tools/dmail/dmail.md +11 -9
  54. kimi_cli/tools/file/__init__.py +1 -3
  55. kimi_cli/tools/file/glob.py +20 -23
  56. kimi_cli/tools/file/grep.md +1 -1
  57. kimi_cli/tools/file/{grep.py → grep_local.py} +51 -23
  58. kimi_cli/tools/file/read.md +24 -6
  59. kimi_cli/tools/file/read.py +134 -50
  60. kimi_cli/tools/file/replace.md +1 -1
  61. kimi_cli/tools/file/replace.py +36 -29
  62. kimi_cli/tools/file/utils.py +282 -0
  63. kimi_cli/tools/file/write.py +43 -22
  64. kimi_cli/tools/multiagent/__init__.py +7 -0
  65. kimi_cli/tools/multiagent/create.md +11 -0
  66. kimi_cli/tools/multiagent/create.py +50 -0
  67. kimi_cli/tools/{task/__init__.py → multiagent/task.py} +48 -53
  68. kimi_cli/tools/shell/__init__.py +120 -0
  69. kimi_cli/tools/{bash → shell}/bash.md +1 -2
  70. kimi_cli/tools/shell/powershell.md +25 -0
  71. kimi_cli/tools/test.py +4 -4
  72. kimi_cli/tools/think/__init__.py +2 -2
  73. kimi_cli/tools/todo/__init__.py +14 -8
  74. kimi_cli/tools/utils.py +64 -24
  75. kimi_cli/tools/web/fetch.py +68 -13
  76. kimi_cli/tools/web/search.py +10 -12
  77. kimi_cli/ui/acp/__init__.py +65 -412
  78. kimi_cli/ui/print/__init__.py +37 -49
  79. kimi_cli/ui/print/visualize.py +179 -0
  80. kimi_cli/ui/shell/__init__.py +141 -84
  81. kimi_cli/ui/shell/console.py +2 -0
  82. kimi_cli/ui/shell/debug.py +28 -23
  83. kimi_cli/ui/shell/keyboard.py +5 -1
  84. kimi_cli/ui/shell/prompt.py +220 -194
  85. kimi_cli/ui/shell/replay.py +111 -46
  86. kimi_cli/ui/shell/setup.py +89 -82
  87. kimi_cli/ui/shell/slash.py +422 -0
  88. kimi_cli/ui/shell/update.py +4 -2
  89. kimi_cli/ui/shell/usage.py +271 -0
  90. kimi_cli/ui/shell/visualize.py +574 -72
  91. kimi_cli/ui/wire/__init__.py +267 -0
  92. kimi_cli/ui/wire/jsonrpc.py +142 -0
  93. kimi_cli/ui/wire/protocol.py +1 -0
  94. kimi_cli/utils/__init__.py +0 -0
  95. kimi_cli/utils/aiohttp.py +2 -0
  96. kimi_cli/utils/aioqueue.py +72 -0
  97. kimi_cli/utils/broadcast.py +37 -0
  98. kimi_cli/utils/changelog.py +12 -7
  99. kimi_cli/utils/clipboard.py +12 -0
  100. kimi_cli/utils/datetime.py +37 -0
  101. kimi_cli/utils/environment.py +58 -0
  102. kimi_cli/utils/envvar.py +12 -0
  103. kimi_cli/utils/frontmatter.py +44 -0
  104. kimi_cli/utils/logging.py +7 -6
  105. kimi_cli/utils/message.py +9 -14
  106. kimi_cli/utils/path.py +99 -9
  107. kimi_cli/utils/pyinstaller.py +6 -0
  108. kimi_cli/utils/rich/__init__.py +33 -0
  109. kimi_cli/utils/rich/columns.py +99 -0
  110. kimi_cli/utils/rich/markdown.py +961 -0
  111. kimi_cli/utils/rich/markdown_sample.md +108 -0
  112. kimi_cli/utils/rich/markdown_sample_short.md +2 -0
  113. kimi_cli/utils/signals.py +2 -0
  114. kimi_cli/utils/slashcmd.py +124 -0
  115. kimi_cli/utils/string.py +2 -0
  116. kimi_cli/utils/term.py +168 -0
  117. kimi_cli/utils/typing.py +20 -0
  118. kimi_cli/wire/__init__.py +98 -29
  119. kimi_cli/wire/serde.py +45 -0
  120. kimi_cli/wire/types.py +299 -0
  121. kimi_cli-0.78.dist-info/METADATA +200 -0
  122. kimi_cli-0.78.dist-info/RECORD +135 -0
  123. kimi_cli-0.78.dist-info/entry_points.txt +4 -0
  124. kimi_cli/cli.py +0 -250
  125. kimi_cli/soul/runtime.py +0 -96
  126. kimi_cli/tools/bash/__init__.py +0 -99
  127. kimi_cli/tools/file/patch.md +0 -8
  128. kimi_cli/tools/file/patch.py +0 -143
  129. kimi_cli/tools/mcp.py +0 -85
  130. kimi_cli/ui/shell/liveview.py +0 -386
  131. kimi_cli/ui/shell/metacmd.py +0 -262
  132. kimi_cli/wire/message.py +0 -91
  133. kimi_cli-0.44.dist-info/METADATA +0 -188
  134. kimi_cli-0.44.dist-info/RECORD +0 -89
  135. kimi_cli-0.44.dist-info/entry_points.txt +0 -3
  136. /kimi_cli/tools/{task → multiagent}/task.md +0 -0
  137. {kimi_cli-0.44.dist-info → kimi_cli-0.78.dist-info}/WHEEL +0 -0
@@ -1,85 +1,93 @@
1
1
  import json
2
- from pathlib import Path
2
+ from typing import cast
3
3
 
4
- import streamingjson
4
+ import streamingjson # type: ignore[reportMissingTypeStubs]
5
+ from kaos.path import KaosPath
5
6
  from kosong.utils.typing import JsonType
6
7
 
7
8
  from kimi_cli.utils.string import shorten_middle
8
9
 
9
10
 
10
- def extract_subtitle(lexer: streamingjson.Lexer, tool_name: str) -> str | None:
11
+ class SkipThisTool(Exception):
12
+ """Raised when a tool decides to skip itself from the loading process."""
13
+
14
+ pass
15
+
16
+
17
+ def extract_key_argument(json_content: str | streamingjson.Lexer, tool_name: str) -> str | None:
18
+ if isinstance(json_content, streamingjson.Lexer):
19
+ json_str = json_content.complete_json()
20
+ else:
21
+ json_str = json_content
11
22
  try:
12
- curr_args: JsonType = json.loads(lexer.complete_json())
23
+ curr_args: JsonType = json.loads(json_str)
13
24
  except json.JSONDecodeError:
14
25
  return None
15
26
  if not curr_args:
16
27
  return None
17
- subtitle: str = ""
28
+ key_argument: str = ""
18
29
  match tool_name:
19
30
  case "Task":
20
31
  if not isinstance(curr_args, dict) or not curr_args.get("description"):
21
32
  return None
22
- subtitle = str(curr_args["description"])
33
+ key_argument = str(curr_args["description"])
34
+ case "CreateSubagent":
35
+ if not isinstance(curr_args, dict) or not curr_args.get("name"):
36
+ return None
37
+ key_argument = str(curr_args["name"])
23
38
  case "SendDMail":
24
- return "El Psy Kongroo"
39
+ return None
25
40
  case "Think":
26
41
  if not isinstance(curr_args, dict) or not curr_args.get("thought"):
27
42
  return None
28
- subtitle = str(curr_args["thought"])
43
+ key_argument = str(curr_args["thought"])
29
44
  case "SetTodoList":
30
- if not isinstance(curr_args, dict) or not curr_args.get("todos"):
31
- return None
32
- if not isinstance(curr_args["todos"], list):
33
- return None
34
- for todo in curr_args["todos"]:
35
- if not isinstance(todo, dict) or not todo.get("title"):
36
- continue
37
- subtitle += f"• {todo['title']}"
38
- if todo.get("status"):
39
- subtitle += f" [{todo['status']}]"
40
- subtitle += "\n"
41
- return "\n" + subtitle.strip()
42
- case "Bash":
45
+ return None
46
+ case "Shell":
43
47
  if not isinstance(curr_args, dict) or not curr_args.get("command"):
44
48
  return None
45
- subtitle = str(curr_args["command"])
49
+ key_argument = str(curr_args["command"])
46
50
  case "ReadFile":
47
51
  if not isinstance(curr_args, dict) or not curr_args.get("path"):
48
52
  return None
49
- subtitle = _normalize_path(str(curr_args["path"]))
53
+ key_argument = _normalize_path(str(curr_args["path"]))
50
54
  case "Glob":
51
55
  if not isinstance(curr_args, dict) or not curr_args.get("pattern"):
52
56
  return None
53
- subtitle = str(curr_args["pattern"])
57
+ key_argument = str(curr_args["pattern"])
54
58
  case "Grep":
55
59
  if not isinstance(curr_args, dict) or not curr_args.get("pattern"):
56
60
  return None
57
- subtitle = str(curr_args["pattern"])
61
+ key_argument = str(curr_args["pattern"])
58
62
  case "WriteFile":
59
63
  if not isinstance(curr_args, dict) or not curr_args.get("path"):
60
64
  return None
61
- subtitle = _normalize_path(str(curr_args["path"]))
65
+ key_argument = _normalize_path(str(curr_args["path"]))
62
66
  case "StrReplaceFile":
63
67
  if not isinstance(curr_args, dict) or not curr_args.get("path"):
64
68
  return None
65
- subtitle = _normalize_path(str(curr_args["path"]))
69
+ key_argument = _normalize_path(str(curr_args["path"]))
66
70
  case "SearchWeb":
67
71
  if not isinstance(curr_args, dict) or not curr_args.get("query"):
68
72
  return None
69
- subtitle = str(curr_args["query"])
73
+ key_argument = str(curr_args["query"])
70
74
  case "FetchURL":
71
75
  if not isinstance(curr_args, dict) or not curr_args.get("url"):
72
76
  return None
73
- subtitle = str(curr_args["url"])
77
+ key_argument = str(curr_args["url"])
74
78
  case _:
75
- subtitle = "".join(lexer.json_content)
76
- if tool_name not in ["SetTodoList"]:
77
- subtitle = shorten_middle(subtitle, width=50)
78
- return subtitle
79
+ if isinstance(json_content, streamingjson.Lexer):
80
+ # lexer.json_content is list[str] based on streamingjson source code
81
+ content: list[str] = cast(list[str], json_content.json_content) # type: ignore[reportUnknownMemberType]
82
+ key_argument = "".join(content)
83
+ else:
84
+ key_argument = json_content
85
+ key_argument = shorten_middle(key_argument, width=50)
86
+ return key_argument
79
87
 
80
88
 
81
89
  def _normalize_path(path: str) -> str:
82
- cwd = str(Path.cwd().absolute())
90
+ cwd = str(KaosPath.cwd().canonical())
83
91
  if path.startswith(cwd):
84
92
  path = path[len(cwd) :].lstrip("/\\")
85
93
  return path
@@ -0,0 +1,25 @@
1
+ from typing import Literal
2
+
3
+ from kosong.tooling import DisplayBlock
4
+ from pydantic import BaseModel
5
+
6
+
7
+ class DiffDisplayBlock(DisplayBlock):
8
+ """Display block describing a file diff."""
9
+
10
+ type: str = "diff"
11
+ path: str
12
+ old_text: str
13
+ new_text: str
14
+
15
+
16
+ class TodoDisplayItem(BaseModel):
17
+ title: str
18
+ status: Literal["pending", "in_progress", "done"]
19
+
20
+
21
+ class TodoDisplayBlock(DisplayBlock):
22
+ """Display block describing a todo list update."""
23
+
24
+ type: str = "todo"
25
+ items: list[TodoDisplayItem]
@@ -1,24 +1,25 @@
1
1
  from pathlib import Path
2
2
  from typing import override
3
3
 
4
- from kosong.tooling import CallableTool2, ToolError, ToolReturnType
4
+ from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnValue
5
5
 
6
6
  from kimi_cli.soul.denwarenji import DenwaRenji, DenwaRenjiError, DMail
7
+ from kimi_cli.tools.utils import load_desc
7
8
 
8
9
  NAME = "SendDMail"
9
10
 
10
11
 
11
- class SendDMail(CallableTool2):
12
+ class SendDMail(CallableTool2[DMail]):
12
13
  name: str = NAME
13
- description: str = (Path(__file__).parent / "dmail.md").read_text(encoding="utf-8")
14
+ description: str = load_desc(Path(__file__).parent / "dmail.md")
14
15
  params: type[DMail] = DMail
15
16
 
16
- def __init__(self, denwa_renji: DenwaRenji, **kwargs):
17
- super().__init__(**kwargs)
17
+ def __init__(self, denwa_renji: DenwaRenji) -> None:
18
+ super().__init__()
18
19
  self._denwa_renji = denwa_renji
19
20
 
20
21
  @override
21
- async def __call__(self, params: DMail) -> ToolReturnType:
22
+ async def __call__(self, params: DMail) -> ToolReturnValue:
22
23
  try:
23
24
  self._denwa_renji.send_dmail(params)
24
25
  except DenwaRenjiError as e:
@@ -27,12 +28,11 @@ class SendDMail(CallableTool2):
27
28
  message=f"Failed to send D-Mail. Error: {str(e)}",
28
29
  brief="Failed to send D-Mail",
29
30
  )
30
- # always return an error because a successful SendDMail call will never return
31
- return ToolError(
31
+ return ToolOk(
32
32
  output="",
33
33
  message=(
34
- "If you see this message, the D-Mail was not sent successfully. "
34
+ "If you see this message, the D-Mail was NOT sent successfully. "
35
35
  "This may be because some other tool that needs approval was rejected."
36
36
  ),
37
- brief="D-Mail not sent",
37
+ brief="El Psy Kongroo",
38
38
  )
@@ -1,15 +1,17 @@
1
1
  Send a message to the past, just like sending a D-Mail in Steins;Gate.
2
2
 
3
- You can see some `user` messages with `CHECKPOINT {checkpoint_id}` wrapped in `<system>` tags in the context. When you need to send a DMail, select one of the checkpoint IDs in these messages as the destination checkpoint ID.
3
+ This tool is provided to enable you to proactively manage the context. You can see some `user` messages with text `CHECKPOINT {checkpoint_id}` wrapped in `<system>` tags in the context. When you feel there is too much irrelevant information in the current context, you can send a D-Mail to revert the context to a previous checkpoint with a message containing only the useful information. When you send a D-Mail, you must specify an existing checkpoint ID from the before-mentioned messages.
4
4
 
5
- When a DMail is sent, the system will revert the current context to the specified checkpoint. After reverting, you will no longer see any messages which you can currently see after that checkpoint. The message in the DMail will be appended to the end of the context. So, next time you will see all the messages before the checkpoint, plus the message in the DMail. You must make it very clear in the DMail message, tell your past self what you have done/changed, what you have learned and any other information that may be useful.
5
+ Typical scenarios you may want to send a D-Mail:
6
6
 
7
- When sending a DMail, DO NOT do much explanation to the user. The user do not care about this. Just explain to your past self.
7
+ - You read a file, found it very large and most of the content is not relevant to the current task. In this case you can send a D-Mail immediately to the checkpoint before you read the file and give your past self only the useful part.
8
+ - You searched the web, the result is large.
9
+ - If you got what you need, you may send a D-Mail to the checkpoint before you searched the web and put only the useful result in the mail message.
10
+ - If you did not get what you need, you may send a D-Mail to tell your past self to try another query.
11
+ - You wrote some code and it did not work as expected. You spent many struggling steps to fix it but the process is not relevant to the ultimate goal. In this case you can send a D-Mail to the checkpoint before you wrote the code and give your past self the fixed version of the code and tell yourself no need to write it again because you already wrote to the filesystem.
8
12
 
9
- Here are some typical scenarios you may want to send a DMail:
13
+ After a D-Mail is sent, the system will revert the current context to the specified checkpoint, after which, you will no longer see any messages which you can now see after that checkpoint. The message in the D-Mail will be appended to the end of the context. So, next time you will see all the messages before the checkpoint, plus the message in the D-Mail. You must make it very clear in the message, tell your past self what you have done/changed, what you have learned and any other information that may be useful, so that your past self can continue the task without confusion and will not repeat the steps you have already done.
10
14
 
11
- - You read a file, found it very large and most of the content is not relevant to the current task. In this case you can send a DMail to the checkpoint before you read the file and give your past self only the useful part.
12
- - You searched the web, found the result very large.
13
- - If you got what you need, you may send a DMail to the checkpoint before you searched the web and give your past self the useful part.
14
- - If you did not get what you need, you may send a DMail to tell your past self to try another query.
15
- - You wrote some code and it did not work as expected. You spent many struggling steps to fix it but the process is not relevant to the ultimate goal. In this case you can send a DMail to the checkpoint before you wrote the code and give your past self the fixed version of the code and tell yourself no need to write it again because you already wrote to the filesystem.
15
+ You must understand that, unlike D-Mail in Steins;Gate, the D-Mail you send here will not revert the filesystem or any external state. That means, you are basically folding the recent messages in your context into a single message, which can significantly reduce the waste of context window.
16
+
17
+ When sending a D-Mail, DO NOT explain to the user. The user do not care about this. Just explain to your past self.
@@ -13,8 +13,7 @@ class FileActions(str, Enum):
13
13
 
14
14
 
15
15
  from .glob import Glob # noqa: E402
16
- from .grep import Grep # noqa: E402
17
- from .patch import PatchFile # noqa: E402
16
+ from .grep_local import Grep # noqa: E402
18
17
  from .read import ReadFile # noqa: E402
19
18
  from .replace import StrReplaceFile # noqa: E402
20
19
  from .write import WriteFile # noqa: E402
@@ -25,5 +24,4 @@ __all__ = (
25
24
  "Grep",
26
25
  "WriteFile",
27
26
  "StrReplaceFile",
28
- "PatchFile",
29
27
  )
@@ -1,15 +1,15 @@
1
1
  """Glob tool implementation."""
2
2
 
3
- import asyncio
4
3
  from pathlib import Path
5
4
  from typing import override
6
5
 
7
- import aiofiles.os
8
- from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
6
+ from kaos.path import KaosPath
7
+ from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnValue
9
8
  from pydantic import BaseModel, Field
10
9
 
11
- from kimi_cli.soul.runtime import BuiltinSystemPromptArgs
10
+ from kimi_cli.soul.agent import BuiltinSystemPromptArgs
12
11
  from kimi_cli.tools.utils import load_desc
12
+ from kimi_cli.utils.path import is_within_directory, list_directory
13
13
 
14
14
  MAX_MATCHES = 1000
15
15
 
@@ -38,17 +38,16 @@ class Glob(CallableTool2[Params]):
38
38
  )
39
39
  params: type[Params] = Params
40
40
 
41
- def __init__(self, builtin_args: BuiltinSystemPromptArgs, **kwargs):
42
- super().__init__(**kwargs)
41
+ def __init__(self, builtin_args: BuiltinSystemPromptArgs) -> None:
42
+ super().__init__()
43
43
  self._work_dir = builtin_args.KIMI_WORK_DIR
44
44
 
45
45
  async def _validate_pattern(self, pattern: str) -> ToolError | None:
46
46
  """Validate that the pattern is safe to use."""
47
47
  if pattern.startswith("**"):
48
- # TODO: give a `ls -la` result as the output
49
- ls_result = await aiofiles.os.listdir(self._work_dir)
48
+ ls_result = await list_directory(self._work_dir)
50
49
  return ToolError(
51
- output="\n".join(ls_result),
50
+ output=ls_result,
52
51
  message=(
53
52
  f"Pattern `{pattern}` starts with '**' which is not allowed. "
54
53
  "This would recursively search all directories and may include large "
@@ -60,13 +59,12 @@ class Glob(CallableTool2[Params]):
60
59
  )
61
60
  return None
62
61
 
63
- def _validate_directory(self, directory: Path) -> ToolError | None:
62
+ async def _validate_directory(self, directory: KaosPath) -> ToolError | None:
64
63
  """Validate that the directory is safe to search."""
65
- resolved_dir = directory.resolve()
66
- resolved_work_dir = self._work_dir.resolve()
64
+ resolved_dir = directory.canonical()
67
65
 
68
66
  # Ensure the directory is within work directory
69
- if not str(resolved_dir).startswith(str(resolved_work_dir)):
67
+ if not is_within_directory(resolved_dir, self._work_dir):
70
68
  return ToolError(
71
69
  message=(
72
70
  f"`{directory}` is outside the working directory. "
@@ -77,14 +75,14 @@ class Glob(CallableTool2[Params]):
77
75
  return None
78
76
 
79
77
  @override
80
- async def __call__(self, params: Params) -> ToolReturnType:
78
+ async def __call__(self, params: Params) -> ToolReturnValue:
81
79
  try:
82
80
  # Validate pattern safety
83
81
  pattern_error = await self._validate_pattern(params.pattern)
84
82
  if pattern_error:
85
83
  return pattern_error
86
84
 
87
- dir_path = Path(params.directory) if params.directory else self._work_dir
85
+ dir_path = KaosPath(params.directory) if params.directory else self._work_dir
88
86
 
89
87
  if not dir_path.is_absolute():
90
88
  return ToolError(
@@ -96,30 +94,29 @@ class Glob(CallableTool2[Params]):
96
94
  )
97
95
 
98
96
  # Validate directory safety
99
- dir_error = self._validate_directory(dir_path)
97
+ dir_error = await self._validate_directory(dir_path)
100
98
  if dir_error:
101
99
  return dir_error
102
100
 
103
- if not dir_path.exists():
101
+ if not await dir_path.exists():
104
102
  return ToolError(
105
103
  message=f"`{params.directory}` does not exist.",
106
104
  brief="Directory not found",
107
105
  )
108
- if not dir_path.is_dir():
106
+ if not await dir_path.is_dir():
109
107
  return ToolError(
110
108
  message=f"`{params.directory}` is not a directory.",
111
109
  brief="Invalid directory",
112
110
  )
113
111
 
114
- def _glob(pattern: str) -> list[Path]:
115
- return list(dir_path.glob(pattern))
116
-
117
112
  # Perform the glob search - users can use ** directly in pattern
118
- matches = await asyncio.to_thread(_glob, params.pattern)
113
+ matches: list[KaosPath] = []
114
+ async for match in dir_path.glob(params.pattern):
115
+ matches.append(match)
119
116
 
120
117
  # Filter out directories if not requested
121
118
  if not params.include_dirs:
122
- matches = [p for p in matches if p.is_file()]
119
+ matches = [p for p in matches if await p.is_file()]
123
120
 
124
121
  # Sort for consistent output
125
122
  matches.sort()
@@ -1,5 +1,5 @@
1
1
  A powerful search tool based-on ripgrep.
2
2
 
3
3
  **Tips:**
4
- - ALWAYS use Grep tool instead of running `grep` or `rg` command with Bash tool.
4
+ - ALWAYS use Grep tool instead of running `grep` or `rg` command with Shell tool.
5
5
  - Use the ripgrep pattern syntax, not grep syntax. E.g. you need to escape braces like `\\{` to search for `{`.
@@ -1,20 +1,26 @@
1
+ """
2
+ The local version of the Grep tool using ripgrep.
3
+ Be cautious that `KaosPath` is not used in this implementation.
4
+ """
5
+
1
6
  import asyncio
2
- import os
3
7
  import platform
4
8
  import shutil
5
9
  import stat
6
10
  import tarfile
7
11
  import tempfile
12
+ import zipfile
8
13
  from pathlib import Path
9
14
  from typing import override
10
15
 
11
16
  import aiohttp
12
- import ripgrepy
13
- from kosong.tooling import CallableTool2, ToolError, ToolOk, ToolReturnType
17
+ import ripgrepy # type: ignore[reportMissingTypeStubs]
18
+ from kosong.tooling import CallableTool2, ToolError, ToolReturnValue
14
19
  from pydantic import BaseModel, Field
15
20
 
16
21
  import kimi_cli
17
22
  from kimi_cli.share import get_share_dir
23
+ from kimi_cli.tools.utils import ToolResultBuilder, load_desc
18
24
  from kimi_cli.utils.aiohttp import new_client_session
19
25
  from kimi_cli.utils.logging import logger
20
26
 
@@ -113,7 +119,7 @@ _RG_DOWNLOAD_LOCK = asyncio.Lock()
113
119
 
114
120
 
115
121
  def _rg_binary_name() -> str:
116
- return "rg.exe" if os.name == "nt" else "rg"
122
+ return "rg.exe" if platform.system() == "Windows" else "rg"
117
123
 
118
124
 
119
125
  def _find_existing_rg(bin_name: str) -> Path | None:
@@ -121,6 +127,7 @@ def _find_existing_rg(bin_name: str) -> Path | None:
121
127
  if share_bin.is_file():
122
128
  return share_bin
123
129
 
130
+ assert kimi_cli.__file__ is not None
124
131
  local_dep = Path(kimi_cli.__file__).parent / "deps" / "bin" / bin_name
125
132
  if local_dep.is_file():
126
133
  return local_dep
@@ -148,6 +155,8 @@ def _detect_target() -> str | None:
148
155
  os_name = "apple-darwin"
149
156
  elif sys_name == "Linux":
150
157
  os_name = "unknown-linux-musl" if arch == "x86_64" else "unknown-linux-gnu"
158
+ elif sys_name == "Windows":
159
+ os_name = "pc-windows-msvc"
151
160
  else:
152
161
  logger.error("Unsupported operating system for ripgrep: {sys_name}", sys_name=sys_name)
153
162
  return None
@@ -160,7 +169,9 @@ async def _download_and_install_rg(bin_name: str) -> Path:
160
169
  if not target:
161
170
  raise RuntimeError("Unsupported platform for ripgrep download")
162
171
 
163
- filename = f"ripgrep-{RG_VERSION}-{target}.tar.gz"
172
+ is_windows = "windows" in target
173
+ archive_ext = "zip" if is_windows else "tar.gz"
174
+ filename = f"ripgrep-{RG_VERSION}-{target}.{archive_ext}"
164
175
  url = f"{RG_BASE_URL}/{filename}"
165
176
  logger.info("Downloading ripgrep from {url}", url=url)
166
177
 
@@ -183,19 +194,30 @@ async def _download_and_install_rg(bin_name: str) -> Path:
183
194
  raise RuntimeError("Failed to download ripgrep binary") from exc
184
195
 
185
196
  try:
186
- with tarfile.open(tar_path, "r:gz") as tar:
187
- member = next(
188
- (m for m in tar.getmembers() if Path(m.name).name == bin_name),
189
- None,
190
- )
191
- if not member:
192
- raise RuntimeError("Ripgrep binary not found in archive")
193
- extracted = tar.extractfile(member)
194
- if not extracted:
195
- raise RuntimeError("Failed to extract ripgrep binary")
196
- with open(destination, "wb") as dest_fh:
197
- shutil.copyfileobj(extracted, dest_fh)
198
- except (tarfile.TarError, OSError) as exc:
197
+ if is_windows:
198
+ with zipfile.ZipFile(tar_path, "r") as zf:
199
+ member_name = next(
200
+ (name for name in zf.namelist() if Path(name).name == bin_name),
201
+ None,
202
+ )
203
+ if not member_name:
204
+ raise RuntimeError("Ripgrep binary not found in archive")
205
+ with zf.open(member_name) as source, open(destination, "wb") as dest_fh:
206
+ shutil.copyfileobj(source, dest_fh)
207
+ else:
208
+ with tarfile.open(tar_path, "r:gz") as tar:
209
+ member = next(
210
+ (m for m in tar.getmembers() if Path(m.name).name == bin_name),
211
+ None,
212
+ )
213
+ if not member:
214
+ raise RuntimeError("Ripgrep binary not found in archive")
215
+ extracted = tar.extractfile(member)
216
+ if not extracted:
217
+ raise RuntimeError("Failed to extract ripgrep binary")
218
+ with open(destination, "wb") as dest_fh:
219
+ shutil.copyfileobj(extracted, dest_fh)
220
+ except (zipfile.BadZipFile, tarfile.TarError, OSError) as exc:
199
221
  raise RuntimeError("Failed to extract ripgrep archive") from exc
200
222
 
201
223
  destination.chmod(destination.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
@@ -220,12 +242,15 @@ async def _ensure_rg_path() -> str:
220
242
 
221
243
  class Grep(CallableTool2[Params]):
222
244
  name: str = "Grep"
223
- description: str = (Path(__file__).parent / "grep.md").read_text(encoding="utf-8")
245
+ description: str = load_desc(Path(__file__).parent / "grep.md")
224
246
  params: type[Params] = Params
225
247
 
226
248
  @override
227
- async def __call__(self, params: Params) -> ToolReturnType:
249
+ async def __call__(self, params: Params) -> ToolReturnValue:
228
250
  try:
251
+ builder = ToolResultBuilder()
252
+ message = ""
253
+
229
254
  # Initialize ripgrep with pattern and path
230
255
  rg_path = await _ensure_rg_path()
231
256
  logger.debug("Using ripgrep binary: {rg_bin}", rg_bin=rg_path)
@@ -261,7 +286,7 @@ class Grep(CallableTool2[Params]):
261
286
  rg = rg.count_matches()
262
287
 
263
288
  # Execute search
264
- result = rg.run()
289
+ result = rg.run(universal_newlines=False)
265
290
 
266
291
  # Get results
267
292
  output = result.as_string
@@ -272,12 +297,15 @@ class Grep(CallableTool2[Params]):
272
297
  if len(lines) > params.head_limit:
273
298
  lines = lines[: params.head_limit]
274
299
  output = "\n".join(lines)
300
+ message = f"Results truncated to first {params.head_limit} lines"
275
301
  if params.output_mode in ["content", "files_with_matches", "count_matches"]:
276
302
  output += f"\n... (results truncated to {params.head_limit} lines)"
277
303
 
278
304
  if not output:
279
- return ToolOk(output="", message="No matches found")
280
- return ToolOk(output=output)
305
+ return builder.ok(message="No matches found")
306
+
307
+ builder.write(output)
308
+ return builder.ok(message=message)
281
309
 
282
310
  except Exception as e:
283
311
  return ToolError(
@@ -3,12 +3,30 @@ Read content from a file.
3
3
  **Tips:**
4
4
  - Make sure you follow the description of each tool parameter.
5
5
  - A `<system>` tag will be given before the read file content.
6
- - Content will be returned with a line number before each line like `cat -n` format.
7
- - Use `line_offset` and `n_lines` parameters when you only need to read a part of the file.
8
- - The maximum number of lines that can be read at once is ${MAX_LINES}.
9
- - Any lines longer than ${MAX_LINE_LENGTH} characters will be truncated, ending with "...".
10
- - The system will notify you when there is any limitation hit when reading the file.
6
+ - The system will notify you when there is anything wrong when reading the file.
11
7
  - This tool is a tool that you typically want to use in parallel. Always read multiple files in one response when possible.
12
- - This tool can only read text files. To list directories, you must use the Glob tool or `ls` command via the Bash tool. To read other file types, use appropriate commands via the Bash tool.
8
+ - This tool can only read text, image and video files. To list directories, you must use the Glob tool or `ls` command via the Shell tool. To read other file types, use appropriate commands via the Shell tool.
13
9
  - If the file doesn't exist or path is invalid, an error will be returned.
14
10
  - If you want to search for a certain content/pattern, prefer Grep tool over ReadFile.
11
+ - For text files:
12
+ - Content will be returned with a line number before each line like `cat -n` format.
13
+ - Use `line_offset` and `n_lines` parameters when you only need to read a part of the file.
14
+ - The maximum number of lines that can be read at once is ${MAX_LINES}.
15
+ - Any lines longer than ${MAX_LINE_LENGTH} characters will be truncated, ending with "...".
16
+ {% if "image_in" in capabilities and "video_in" in capabilities %}
17
+ - For image and video files:
18
+ - Content will be returned in a form that you can view and understand. Feel confident to read image/video files with this tool.
19
+ - The maximum size that can be read is ${MAX_MEDIA_BYTES} bytes. An error will be returned if the file is larger than this limit.
20
+ {% elif "image_in" in capabilities %}
21
+ - For image files:
22
+ - Content will be returned in a form that you can view and understand. Feel confident to read image files with this tool.
23
+ - The maximum size that can be read is ${MAX_MEDIA_BYTES} bytes. An error will be returned if the file is larger than this limit.
24
+ - Other media files (e.g., video, PDFs) are not supported by this tool. Use other proper tools to process them.
25
+ {% elif "video_in" in capabilities %}
26
+ - For video files:
27
+ - Content will be returned in a form that you can view and understand. Feel confident to read video files with this tool.
28
+ - The maximum size that can be read is ${MAX_MEDIA_BYTES} bytes. An error will be returned if the file is larger than this limit.
29
+ - Other media files (e.g., image, PDFs) are not supported by this tool. Use other proper tools to process them.
30
+ {% else %}
31
+ - Media files (e.g., image, video, PDFs) are not supported by this tool. Use other proper tools to process them.
32
+ {% endif %}