kimi-cli 0.44__py3-none-any.whl → 0.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kimi-cli might be problematic. Click here for more details.

Files changed (137) hide show
  1. kimi_cli/CHANGELOG.md +349 -40
  2. kimi_cli/__init__.py +6 -0
  3. kimi_cli/acp/AGENTS.md +91 -0
  4. kimi_cli/acp/__init__.py +13 -0
  5. kimi_cli/acp/convert.py +111 -0
  6. kimi_cli/acp/kaos.py +270 -0
  7. kimi_cli/acp/mcp.py +46 -0
  8. kimi_cli/acp/server.py +335 -0
  9. kimi_cli/acp/session.py +445 -0
  10. kimi_cli/acp/tools.py +158 -0
  11. kimi_cli/acp/types.py +13 -0
  12. kimi_cli/agents/default/agent.yaml +4 -4
  13. kimi_cli/agents/default/sub.yaml +2 -1
  14. kimi_cli/agents/default/system.md +79 -21
  15. kimi_cli/agents/okabe/agent.yaml +17 -0
  16. kimi_cli/agentspec.py +53 -25
  17. kimi_cli/app.py +180 -52
  18. kimi_cli/cli/__init__.py +595 -0
  19. kimi_cli/cli/__main__.py +8 -0
  20. kimi_cli/cli/info.py +63 -0
  21. kimi_cli/cli/mcp.py +349 -0
  22. kimi_cli/config.py +153 -17
  23. kimi_cli/constant.py +3 -0
  24. kimi_cli/exception.py +23 -2
  25. kimi_cli/flow/__init__.py +117 -0
  26. kimi_cli/flow/d2.py +376 -0
  27. kimi_cli/flow/mermaid.py +218 -0
  28. kimi_cli/llm.py +129 -23
  29. kimi_cli/metadata.py +32 -7
  30. kimi_cli/platforms.py +262 -0
  31. kimi_cli/prompts/__init__.py +2 -0
  32. kimi_cli/prompts/compact.md +4 -5
  33. kimi_cli/session.py +223 -31
  34. kimi_cli/share.py +2 -0
  35. kimi_cli/skill.py +145 -0
  36. kimi_cli/skills/kimi-cli-help/SKILL.md +55 -0
  37. kimi_cli/skills/skill-creator/SKILL.md +351 -0
  38. kimi_cli/soul/__init__.py +51 -20
  39. kimi_cli/soul/agent.py +213 -85
  40. kimi_cli/soul/approval.py +86 -17
  41. kimi_cli/soul/compaction.py +64 -53
  42. kimi_cli/soul/context.py +38 -5
  43. kimi_cli/soul/denwarenji.py +2 -0
  44. kimi_cli/soul/kimisoul.py +442 -60
  45. kimi_cli/soul/message.py +54 -54
  46. kimi_cli/soul/slash.py +72 -0
  47. kimi_cli/soul/toolset.py +387 -6
  48. kimi_cli/toad.py +74 -0
  49. kimi_cli/tools/AGENTS.md +5 -0
  50. kimi_cli/tools/__init__.py +42 -34
  51. kimi_cli/tools/display.py +25 -0
  52. kimi_cli/tools/dmail/__init__.py +10 -10
  53. kimi_cli/tools/dmail/dmail.md +11 -9
  54. kimi_cli/tools/file/__init__.py +1 -3
  55. kimi_cli/tools/file/glob.py +20 -23
  56. kimi_cli/tools/file/grep.md +1 -1
  57. kimi_cli/tools/file/{grep.py → grep_local.py} +51 -23
  58. kimi_cli/tools/file/read.md +24 -6
  59. kimi_cli/tools/file/read.py +134 -50
  60. kimi_cli/tools/file/replace.md +1 -1
  61. kimi_cli/tools/file/replace.py +36 -29
  62. kimi_cli/tools/file/utils.py +282 -0
  63. kimi_cli/tools/file/write.py +43 -22
  64. kimi_cli/tools/multiagent/__init__.py +7 -0
  65. kimi_cli/tools/multiagent/create.md +11 -0
  66. kimi_cli/tools/multiagent/create.py +50 -0
  67. kimi_cli/tools/{task/__init__.py → multiagent/task.py} +48 -53
  68. kimi_cli/tools/shell/__init__.py +120 -0
  69. kimi_cli/tools/{bash → shell}/bash.md +1 -2
  70. kimi_cli/tools/shell/powershell.md +25 -0
  71. kimi_cli/tools/test.py +4 -4
  72. kimi_cli/tools/think/__init__.py +2 -2
  73. kimi_cli/tools/todo/__init__.py +14 -8
  74. kimi_cli/tools/utils.py +64 -24
  75. kimi_cli/tools/web/fetch.py +68 -13
  76. kimi_cli/tools/web/search.py +10 -12
  77. kimi_cli/ui/acp/__init__.py +65 -412
  78. kimi_cli/ui/print/__init__.py +37 -49
  79. kimi_cli/ui/print/visualize.py +179 -0
  80. kimi_cli/ui/shell/__init__.py +141 -84
  81. kimi_cli/ui/shell/console.py +2 -0
  82. kimi_cli/ui/shell/debug.py +28 -23
  83. kimi_cli/ui/shell/keyboard.py +5 -1
  84. kimi_cli/ui/shell/prompt.py +220 -194
  85. kimi_cli/ui/shell/replay.py +111 -46
  86. kimi_cli/ui/shell/setup.py +89 -82
  87. kimi_cli/ui/shell/slash.py +422 -0
  88. kimi_cli/ui/shell/update.py +4 -2
  89. kimi_cli/ui/shell/usage.py +271 -0
  90. kimi_cli/ui/shell/visualize.py +574 -72
  91. kimi_cli/ui/wire/__init__.py +267 -0
  92. kimi_cli/ui/wire/jsonrpc.py +142 -0
  93. kimi_cli/ui/wire/protocol.py +1 -0
  94. kimi_cli/utils/__init__.py +0 -0
  95. kimi_cli/utils/aiohttp.py +2 -0
  96. kimi_cli/utils/aioqueue.py +72 -0
  97. kimi_cli/utils/broadcast.py +37 -0
  98. kimi_cli/utils/changelog.py +12 -7
  99. kimi_cli/utils/clipboard.py +12 -0
  100. kimi_cli/utils/datetime.py +37 -0
  101. kimi_cli/utils/environment.py +58 -0
  102. kimi_cli/utils/envvar.py +12 -0
  103. kimi_cli/utils/frontmatter.py +44 -0
  104. kimi_cli/utils/logging.py +7 -6
  105. kimi_cli/utils/message.py +9 -14
  106. kimi_cli/utils/path.py +99 -9
  107. kimi_cli/utils/pyinstaller.py +6 -0
  108. kimi_cli/utils/rich/__init__.py +33 -0
  109. kimi_cli/utils/rich/columns.py +99 -0
  110. kimi_cli/utils/rich/markdown.py +961 -0
  111. kimi_cli/utils/rich/markdown_sample.md +108 -0
  112. kimi_cli/utils/rich/markdown_sample_short.md +2 -0
  113. kimi_cli/utils/signals.py +2 -0
  114. kimi_cli/utils/slashcmd.py +124 -0
  115. kimi_cli/utils/string.py +2 -0
  116. kimi_cli/utils/term.py +168 -0
  117. kimi_cli/utils/typing.py +20 -0
  118. kimi_cli/wire/__init__.py +98 -29
  119. kimi_cli/wire/serde.py +45 -0
  120. kimi_cli/wire/types.py +299 -0
  121. kimi_cli-0.78.dist-info/METADATA +200 -0
  122. kimi_cli-0.78.dist-info/RECORD +135 -0
  123. kimi_cli-0.78.dist-info/entry_points.txt +4 -0
  124. kimi_cli/cli.py +0 -250
  125. kimi_cli/soul/runtime.py +0 -96
  126. kimi_cli/tools/bash/__init__.py +0 -99
  127. kimi_cli/tools/file/patch.md +0 -8
  128. kimi_cli/tools/file/patch.py +0 -143
  129. kimi_cli/tools/mcp.py +0 -85
  130. kimi_cli/ui/shell/liveview.py +0 -386
  131. kimi_cli/ui/shell/metacmd.py +0 -262
  132. kimi_cli/wire/message.py +0 -91
  133. kimi_cli-0.44.dist-info/METADATA +0 -188
  134. kimi_cli-0.44.dist-info/RECORD +0 -89
  135. kimi_cli-0.44.dist-info/entry_points.txt +0 -3
  136. /kimi_cli/tools/{task → multiagent}/task.md +0 -0
  137. {kimi_cli-0.44.dist-info → kimi_cli-0.78.dist-info}/WHEEL +0 -0
kimi_cli/llm.py CHANGED
@@ -1,27 +1,45 @@
1
+ from __future__ import annotations
2
+
1
3
  import os
2
- from typing import NamedTuple
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING, Literal, cast, get_args
3
6
 
4
- from kosong.base.chat_provider import ChatProvider
7
+ from kosong.chat_provider import ChatProvider
5
8
  from pydantic import SecretStr
6
9
 
7
- from kimi_cli.config import LLMModel, LLMModelCapability, LLMProvider
8
10
  from kimi_cli.constant import USER_AGENT
9
11
 
12
+ if TYPE_CHECKING:
13
+ from kimi_cli.config import LLMModel, LLMProvider
14
+
15
+ type ProviderType = Literal[
16
+ "kimi",
17
+ "openai_legacy",
18
+ "openai_responses",
19
+ "anthropic",
20
+ "google_genai", # for backward-compatibility, equals to `gemini`
21
+ "gemini",
22
+ "vertexai",
23
+ "_echo",
24
+ "_chaos",
25
+ ]
26
+
27
+ type ModelCapability = Literal["image_in", "video_in", "thinking", "always_thinking"]
28
+ ALL_MODEL_CAPABILITIES: set[ModelCapability] = set(get_args(ModelCapability.__value__))
10
29
 
11
- class LLM(NamedTuple):
30
+
31
+ @dataclass(slots=True)
32
+ class LLM:
12
33
  chat_provider: ChatProvider
13
34
  max_context_size: int
14
- capabilities: set[LLMModelCapability]
15
- # TODO: these additional fields should be moved to ChatProvider
35
+ capabilities: set[ModelCapability]
36
+ model_config: LLMModel | None = None
37
+ provider_config: LLMProvider | None = None
16
38
 
17
39
  @property
18
40
  def model_name(self) -> str:
19
41
  return self.chat_provider.model_name
20
42
 
21
- @property
22
- def supports_image_in(self) -> bool:
23
- return "image_in" in self.capabilities
24
-
25
43
 
26
44
  def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> dict[str, str]:
27
45
  """Override provider/model settings from environment variables.
@@ -41,11 +59,19 @@ def augment_provider_with_env_vars(provider: LLMProvider, model: LLMModel) -> di
41
59
  applied["KIMI_API_KEY"] = "******"
42
60
  if model_name := os.getenv("KIMI_MODEL_NAME"):
43
61
  model.model = model_name
44
- applied["KIMI_MODEL_NAME"] = model.model
62
+ applied["KIMI_MODEL_NAME"] = model_name
45
63
  if max_context_size := os.getenv("KIMI_MODEL_MAX_CONTEXT_SIZE"):
46
64
  model.max_context_size = int(max_context_size)
47
- applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = str(model.max_context_size)
48
- case "openai_legacy":
65
+ applied["KIMI_MODEL_MAX_CONTEXT_SIZE"] = max_context_size
66
+ if capabilities := os.getenv("KIMI_MODEL_CAPABILITIES"):
67
+ caps_lower = (cap.strip().lower() for cap in capabilities.split(",") if cap.strip())
68
+ model.capabilities = set(
69
+ cast(ModelCapability, cap)
70
+ for cap in caps_lower
71
+ if cap in get_args(ModelCapability.__value__)
72
+ )
73
+ applied["KIMI_MODEL_CAPABILITIES"] = capabilities
74
+ case "openai_legacy" | "openai_responses":
49
75
  if base_url := os.getenv("OPENAI_BASE_URL"):
50
76
  provider.base_url = base_url
51
77
  if api_key := os.getenv("OPENAI_API_KEY"):
@@ -60,9 +86,12 @@ def create_llm(
60
86
  provider: LLMProvider,
61
87
  model: LLMModel,
62
88
  *,
63
- stream: bool = True,
89
+ thinking: bool | None = None,
64
90
  session_id: str | None = None,
65
- ) -> LLM:
91
+ ) -> LLM | None:
92
+ if provider.type != "_echo" and (not provider.base_url or not model.model):
93
+ return None
94
+
66
95
  match provider.type:
67
96
  case "kimi":
68
97
  from kosong.chat_provider.kimi import Kimi
@@ -71,38 +100,115 @@ def create_llm(
71
100
  model=model.model,
72
101
  base_url=provider.base_url,
73
102
  api_key=provider.api_key.get_secret_value(),
74
- stream=stream,
75
103
  default_headers={
76
104
  "User-Agent": USER_AGENT,
77
105
  **(provider.custom_headers or {}),
78
106
  },
79
107
  )
108
+
109
+ gen_kwargs: Kimi.GenerationKwargs = {}
80
110
  if session_id:
81
- chat_provider = chat_provider.with_generation_kwargs(prompt_cache_key=session_id)
111
+ gen_kwargs["prompt_cache_key"] = session_id
112
+ if temperature := os.getenv("KIMI_MODEL_TEMPERATURE"):
113
+ gen_kwargs["temperature"] = float(temperature)
114
+ if top_p := os.getenv("KIMI_MODEL_TOP_P"):
115
+ gen_kwargs["top_p"] = float(top_p)
116
+ if max_tokens := os.getenv("KIMI_MODEL_MAX_TOKENS"):
117
+ gen_kwargs["max_tokens"] = int(max_tokens)
118
+
119
+ if gen_kwargs:
120
+ chat_provider = chat_provider.with_generation_kwargs(**gen_kwargs)
82
121
  case "openai_legacy":
83
- from kosong.chat_provider.openai_legacy import OpenAILegacy
122
+ from kosong.contrib.chat_provider.openai_legacy import OpenAILegacy
84
123
 
85
124
  chat_provider = OpenAILegacy(
86
125
  model=model.model,
87
126
  base_url=provider.base_url,
88
127
  api_key=provider.api_key.get_secret_value(),
89
- stream=stream,
90
128
  )
91
- case "_chaos":
92
- from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
129
+ case "openai_responses":
130
+ from kosong.contrib.chat_provider.openai_responses import OpenAIResponses
93
131
 
94
- chat_provider = ChaosChatProvider(
132
+ chat_provider = OpenAIResponses(
133
+ model=model.model,
134
+ base_url=provider.base_url,
135
+ api_key=provider.api_key.get_secret_value(),
136
+ )
137
+ case "anthropic":
138
+ from kosong.contrib.chat_provider.anthropic import Anthropic
139
+
140
+ chat_provider = Anthropic(
95
141
  model=model.model,
96
142
  base_url=provider.base_url,
97
143
  api_key=provider.api_key.get_secret_value(),
144
+ default_max_tokens=50000,
145
+ )
146
+ case "google_genai" | "gemini":
147
+ from kosong.contrib.chat_provider.google_genai import GoogleGenAI
148
+
149
+ chat_provider = GoogleGenAI(
150
+ model=model.model,
151
+ base_url=provider.base_url,
152
+ api_key=provider.api_key.get_secret_value(),
153
+ )
154
+ case "vertexai":
155
+ from kosong.contrib.chat_provider.google_genai import GoogleGenAI
156
+
157
+ os.environ.update(provider.env or {})
158
+ chat_provider = GoogleGenAI(
159
+ model=model.model,
160
+ base_url=provider.base_url,
161
+ api_key=provider.api_key.get_secret_value(),
162
+ vertexai=True,
163
+ )
164
+ case "_echo":
165
+ from kosong.chat_provider.echo import EchoChatProvider
166
+
167
+ chat_provider = EchoChatProvider()
168
+ case "_chaos":
169
+ from kosong.chat_provider.chaos import ChaosChatProvider, ChaosConfig
170
+ from kosong.chat_provider.kimi import Kimi
171
+
172
+ chat_provider = ChaosChatProvider(
173
+ provider=Kimi(
174
+ model=model.model,
175
+ base_url=provider.base_url,
176
+ api_key=provider.api_key.get_secret_value(),
177
+ default_headers={
178
+ "User-Agent": USER_AGENT,
179
+ **(provider.custom_headers or {}),
180
+ },
181
+ ),
98
182
  chaos_config=ChaosConfig(
99
183
  error_probability=0.8,
100
184
  error_types=[429, 500, 503],
101
185
  ),
102
186
  )
103
187
 
188
+ capabilities = derive_model_capabilities(model)
189
+
190
+ # Apply thinking if specified or if model always requires thinking
191
+ if "always_thinking" in capabilities or (thinking is True and "thinking" in capabilities):
192
+ chat_provider = chat_provider.with_thinking("high")
193
+ elif thinking is False:
194
+ chat_provider = chat_provider.with_thinking("off")
195
+ # If thinking is None and model doesn't always think, leave as-is (default behavior)
196
+
104
197
  return LLM(
105
198
  chat_provider=chat_provider,
106
199
  max_context_size=model.max_context_size,
107
- capabilities=model.capabilities or set(),
200
+ capabilities=capabilities,
201
+ model_config=model,
202
+ provider_config=provider,
108
203
  )
204
+
205
+
206
+ def derive_model_capabilities(model: LLMModel) -> set[ModelCapability]:
207
+ capabilities = set(model.capabilities or ())
208
+ # Models with "thinking" in their name are always-thinking models
209
+ if "thinking" in model.model.lower() or "reason" in model.model.lower():
210
+ capabilities.update(("thinking", "always_thinking"))
211
+ # These models support thinking but can be toggled on/off
212
+ elif model.model in {"kimi-for-coding", "kimi-code"}:
213
+ capabilities.add("thinking")
214
+ return capabilities
kimi_cli/metadata.py CHANGED
@@ -1,8 +1,13 @@
1
+ from __future__ import annotations
2
+
1
3
  import json
2
4
  from hashlib import md5
3
5
  from pathlib import Path
4
6
 
5
- from pydantic import BaseModel, Field
7
+ from kaos import get_current_kaos
8
+ from kaos.local import local_kaos
9
+ from kaos.path import KaosPath
10
+ from pydantic import BaseModel, ConfigDict, Field
6
11
 
7
12
  from kimi_cli.share import get_share_dir
8
13
  from kimi_cli.utils.logging import logger
@@ -18,22 +23,42 @@ class WorkDirMeta(BaseModel):
18
23
  path: str
19
24
  """The full path of the work directory."""
20
25
 
26
+ kaos: str = local_kaos.name
27
+ """The name of the KAOS where the work directory is located."""
28
+
21
29
  last_session_id: str | None = None
22
30
  """Last session ID of this work directory."""
23
31
 
24
32
  @property
25
33
  def sessions_dir(self) -> Path:
26
- path = get_share_dir() / "sessions" / md5(self.path.encode()).hexdigest()
27
- path.mkdir(parents=True, exist_ok=True)
28
- return path
34
+ """The directory to store sessions for this work directory."""
35
+ path_md5 = md5(self.path.encode(encoding="utf-8")).hexdigest()
36
+ dir_basename = path_md5 if self.kaos == local_kaos.name else f"{self.kaos}_{path_md5}"
37
+ session_dir = get_share_dir() / "sessions" / dir_basename
38
+ session_dir.mkdir(parents=True, exist_ok=True)
39
+ return session_dir
29
40
 
30
41
 
31
42
  class Metadata(BaseModel):
32
43
  """Kimi metadata structure."""
33
44
 
34
- work_dirs: list[WorkDirMeta] = Field(
35
- default_factory=list[WorkDirMeta], description="Work directory list"
36
- )
45
+ model_config = ConfigDict(extra="ignore")
46
+
47
+ work_dirs: list[WorkDirMeta] = Field(default_factory=list[WorkDirMeta])
48
+ """Work directory list."""
49
+
50
+ def get_work_dir_meta(self, path: KaosPath) -> WorkDirMeta | None:
51
+ """Get the metadata for a work directory."""
52
+ for wd in self.work_dirs:
53
+ if wd.path == str(path) and wd.kaos == get_current_kaos().name:
54
+ return wd
55
+ return None
56
+
57
+ def new_work_dir_meta(self, path: KaosPath) -> WorkDirMeta:
58
+ """Create a new work directory metadata."""
59
+ wd_meta = WorkDirMeta(path=str(path), kaos=get_current_kaos().name)
60
+ self.work_dirs.append(wd_meta)
61
+ return wd_meta
37
62
 
38
63
 
39
64
  def load_metadata() -> Metadata:
kimi_cli/platforms.py ADDED
@@ -0,0 +1,262 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, NamedTuple, cast
4
+
5
+ import aiohttp
6
+ from pydantic import BaseModel
7
+
8
+ from kimi_cli.config import Config, LLMModel, load_config, save_config
9
+ from kimi_cli.llm import ModelCapability
10
+ from kimi_cli.utils.aiohttp import new_client_session
11
+ from kimi_cli.utils.logging import logger
12
+
13
+
14
+ class ModelInfo(BaseModel):
15
+ """Model information returned from the API."""
16
+
17
+ id: str
18
+ context_length: int = 0
19
+ supports_reasoning: bool = False
20
+
21
+ @property
22
+ def capabilities(self) -> set[ModelCapability]:
23
+ """Derive capabilities from model info."""
24
+ caps: set[ModelCapability] = set()
25
+ if self.supports_reasoning:
26
+ caps.add("thinking")
27
+ # Models with "thinking" in name are always-thinking
28
+ if "thinking" in self.id.lower():
29
+ caps.update(("thinking", "always_thinking"))
30
+ return caps
31
+
32
+
33
+ class Platform(NamedTuple):
34
+ id: str
35
+ name: str
36
+ base_url: str
37
+ search_url: str | None = None
38
+ fetch_url: str | None = None
39
+ allowed_prefixes: list[str] | None = None
40
+
41
+
42
+ PLATFORMS: list[Platform] = [
43
+ Platform(
44
+ id="kimi-code",
45
+ name="Kimi Code",
46
+ base_url="https://api.kimi.com/coding/v1",
47
+ search_url="https://api.kimi.com/coding/v1/search",
48
+ fetch_url="https://api.kimi.com/coding/v1/fetch",
49
+ ),
50
+ Platform(
51
+ id="moonshot-cn",
52
+ name="Moonshot AI Open Platform (moonshot.cn)",
53
+ base_url="https://api.moonshot.cn/v1",
54
+ allowed_prefixes=["kimi-k"],
55
+ ),
56
+ Platform(
57
+ id="moonshot-ai",
58
+ name="Moonshot AI Open Platform (moonshot.ai)",
59
+ base_url="https://api.moonshot.ai/v1",
60
+ allowed_prefixes=["kimi-k"],
61
+ ),
62
+ ]
63
+
64
+ _PLATFORM_BY_ID = {platform.id: platform for platform in PLATFORMS}
65
+ _PLATFORM_BY_NAME = {platform.name: platform for platform in PLATFORMS}
66
+
67
+
68
+ def get_platform_by_id(platform_id: str) -> Platform | None:
69
+ return _PLATFORM_BY_ID.get(platform_id)
70
+
71
+
72
+ def get_platform_by_name(name: str) -> Platform | None:
73
+ return _PLATFORM_BY_NAME.get(name)
74
+
75
+
76
+ MANAGED_PROVIDER_PREFIX = "managed:"
77
+
78
+
79
+ def managed_provider_key(platform_id: str) -> str:
80
+ return f"{MANAGED_PROVIDER_PREFIX}{platform_id}"
81
+
82
+
83
+ def managed_model_key(platform_id: str, model_id: str) -> str:
84
+ return f"{platform_id}/{model_id}"
85
+
86
+
87
+ def parse_managed_provider_key(provider_key: str) -> str | None:
88
+ if not provider_key.startswith(MANAGED_PROVIDER_PREFIX):
89
+ return None
90
+ return provider_key.removeprefix(MANAGED_PROVIDER_PREFIX)
91
+
92
+
93
+ def is_managed_provider_key(provider_key: str) -> bool:
94
+ return provider_key.startswith(MANAGED_PROVIDER_PREFIX)
95
+
96
+
97
+ def get_platform_name_for_provider(provider_key: str) -> str | None:
98
+ platform_id = parse_managed_provider_key(provider_key)
99
+ if not platform_id:
100
+ return None
101
+ platform = get_platform_by_id(platform_id)
102
+ return platform.name if platform else None
103
+
104
+
105
+ async def refresh_managed_models(config: Config) -> bool:
106
+ if not config.is_from_default_location:
107
+ return False
108
+
109
+ managed_providers = {
110
+ key: provider for key, provider in config.providers.items() if is_managed_provider_key(key)
111
+ }
112
+ if not managed_providers:
113
+ return False
114
+
115
+ changed = False
116
+ updates: list[tuple[str, str, list[ModelInfo]]] = []
117
+ for provider_key, provider in managed_providers.items():
118
+ platform_id = parse_managed_provider_key(provider_key)
119
+ if not platform_id:
120
+ continue
121
+ platform = get_platform_by_id(platform_id)
122
+ if platform is None:
123
+ logger.warning("Managed platform not found: {platform}", platform=platform_id)
124
+ continue
125
+
126
+ try:
127
+ models = await list_models(platform, provider.api_key.get_secret_value())
128
+ except Exception as exc:
129
+ logger.error(
130
+ "Failed to refresh models for {platform}: {error}",
131
+ platform=platform_id,
132
+ error=exc,
133
+ )
134
+ continue
135
+
136
+ updates.append((provider_key, platform_id, models))
137
+ if _apply_models(config, provider_key, platform_id, models):
138
+ changed = True
139
+
140
+ if changed:
141
+ config_for_save = load_config()
142
+ save_changed = False
143
+ for provider_key, platform_id, models in updates:
144
+ if _apply_models(config_for_save, provider_key, platform_id, models):
145
+ save_changed = True
146
+ if save_changed:
147
+ save_config(config_for_save)
148
+ return changed
149
+
150
+
151
+ async def list_models(platform: Platform, api_key: str) -> list[ModelInfo]:
152
+ async with new_client_session() as session:
153
+ models = await _list_models(
154
+ session,
155
+ base_url=platform.base_url,
156
+ api_key=api_key,
157
+ )
158
+ if platform.allowed_prefixes is None:
159
+ return models
160
+ prefixes = tuple(platform.allowed_prefixes)
161
+ return [model for model in models if model.id.startswith(prefixes)]
162
+
163
+
164
+ async def _list_models(
165
+ session: aiohttp.ClientSession,
166
+ *,
167
+ base_url: str,
168
+ api_key: str,
169
+ ) -> list[ModelInfo]:
170
+ models_url = f"{base_url.rstrip('/')}/models"
171
+ try:
172
+ async with session.get(
173
+ models_url,
174
+ headers={"Authorization": f"Bearer {api_key}"},
175
+ raise_for_status=True,
176
+ ) as response:
177
+ resp_json = await response.json()
178
+ except aiohttp.ClientError:
179
+ raise
180
+
181
+ data = resp_json.get("data")
182
+ if not isinstance(data, list):
183
+ raise ValueError(f"Unexpected models response for {base_url}")
184
+
185
+ result: list[ModelInfo] = []
186
+ for item in cast(list[dict[str, Any]], data):
187
+ model_id = item.get("id")
188
+ if not model_id:
189
+ continue
190
+ result.append(
191
+ ModelInfo(
192
+ id=str(model_id),
193
+ context_length=int(item.get("context_length") or 0),
194
+ supports_reasoning=bool(item.get("supports_reasoning")),
195
+ )
196
+ )
197
+ return result
198
+
199
+
200
+ def _apply_models(
201
+ config: Config,
202
+ provider_key: str,
203
+ platform_id: str,
204
+ models: list[ModelInfo],
205
+ ) -> bool:
206
+ changed = False
207
+ model_keys: list[str] = []
208
+
209
+ for model in models:
210
+ model_key = managed_model_key(platform_id, model.id)
211
+ model_keys.append(model_key)
212
+
213
+ existing = config.models.get(model_key)
214
+ capabilities = model.capabilities or None # empty set -> None
215
+
216
+ if existing is None:
217
+ config.models[model_key] = LLMModel(
218
+ provider=provider_key,
219
+ model=model.id,
220
+ max_context_size=model.context_length,
221
+ capabilities=capabilities,
222
+ )
223
+ changed = True
224
+ continue
225
+
226
+ if existing.provider != provider_key:
227
+ existing.provider = provider_key
228
+ changed = True
229
+ if existing.model != model.id:
230
+ existing.model = model.id
231
+ changed = True
232
+ if existing.max_context_size != model.context_length:
233
+ existing.max_context_size = model.context_length
234
+ changed = True
235
+ if existing.capabilities != capabilities:
236
+ existing.capabilities = capabilities
237
+ changed = True
238
+
239
+ removed_default = False
240
+ model_keys_set = set(model_keys)
241
+ for key, model in list(config.models.items()):
242
+ if model.provider != provider_key:
243
+ continue
244
+ if key in model_keys_set:
245
+ continue
246
+ del config.models[key]
247
+ if config.default_model == key:
248
+ removed_default = True
249
+ changed = True
250
+
251
+ if removed_default:
252
+ if model_keys:
253
+ config.default_model = model_keys[0]
254
+ else:
255
+ config.default_model = next(iter(config.models), "")
256
+ changed = True
257
+
258
+ if config.default_model and config.default_model not in config.models:
259
+ config.default_model = next(iter(config.models), "")
260
+ changed = True
261
+
262
+ return changed
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  from pathlib import Path
2
4
 
3
5
  INIT = (Path(__file__).parent / "init.md").read_text(encoding="utf-8")
@@ -1,4 +1,7 @@
1
- You are tasked with compacting a coding conversation context. This is critical for maintaining an effective working memory for the coding agent.
1
+
2
+ ---
3
+
4
+ The above is a list of messages in an agent conversation. You are now given a task to compact this conversation context according to specific priorities and rules.
2
5
 
3
6
  **Compression Priorities (in order):**
4
7
  1. **Current Task State**: What is being worked on RIGHT NOW
@@ -19,10 +22,6 @@ You are tasked with compacting a coding conversation context. This is critical f
19
22
  - For errors: Keep full error message + final solution
20
23
  - For discussions: Extract decisions and action items only
21
24
 
22
- **Input Context to Compress:**
23
-
24
- ${CONTEXT}
25
-
26
25
  **Required Output Structure:**
27
26
 
28
27
  <current_focus>