chatmcp-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (228) hide show
  1. aider/__init__.py +20 -0
  2. aider/__main__.py +4 -0
  3. aider/_version.py +21 -0
  4. aider/analytics.py +250 -0
  5. aider/args.py +926 -0
  6. aider/args_formatter.py +228 -0
  7. aider/coders/__init__.py +34 -0
  8. aider/coders/architect_coder.py +48 -0
  9. aider/coders/architect_prompts.py +40 -0
  10. aider/coders/ask_coder.py +9 -0
  11. aider/coders/ask_prompts.py +35 -0
  12. aider/coders/base_coder.py +2483 -0
  13. aider/coders/base_prompts.py +60 -0
  14. aider/coders/chat_chunks.py +64 -0
  15. aider/coders/context_coder.py +53 -0
  16. aider/coders/context_prompts.py +75 -0
  17. aider/coders/editblock_coder.py +657 -0
  18. aider/coders/editblock_fenced_coder.py +10 -0
  19. aider/coders/editblock_fenced_prompts.py +143 -0
  20. aider/coders/editblock_func_coder.py +141 -0
  21. aider/coders/editblock_func_prompts.py +27 -0
  22. aider/coders/editblock_prompts.py +174 -0
  23. aider/coders/editor_diff_fenced_coder.py +9 -0
  24. aider/coders/editor_diff_fenced_prompts.py +11 -0
  25. aider/coders/editor_editblock_coder.py +8 -0
  26. aider/coders/editor_editblock_prompts.py +18 -0
  27. aider/coders/editor_whole_coder.py +8 -0
  28. aider/coders/editor_whole_prompts.py +10 -0
  29. aider/coders/help_coder.py +16 -0
  30. aider/coders/help_prompts.py +46 -0
  31. aider/coders/patch_coder.py +706 -0
  32. aider/coders/patch_prompts.py +161 -0
  33. aider/coders/search_replace.py +757 -0
  34. aider/coders/shell.py +37 -0
  35. aider/coders/single_wholefile_func_coder.py +102 -0
  36. aider/coders/single_wholefile_func_prompts.py +27 -0
  37. aider/coders/udiff_coder.py +429 -0
  38. aider/coders/udiff_prompts.py +115 -0
  39. aider/coders/udiff_simple.py +14 -0
  40. aider/coders/udiff_simple_prompts.py +25 -0
  41. aider/coders/wholefile_coder.py +144 -0
  42. aider/coders/wholefile_func_coder.py +134 -0
  43. aider/coders/wholefile_func_prompts.py +27 -0
  44. aider/coders/wholefile_prompts.py +67 -0
  45. aider/commands.py +1665 -0
  46. aider/copypaste.py +72 -0
  47. aider/deprecated.py +126 -0
  48. aider/diffs.py +128 -0
  49. aider/dump.py +29 -0
  50. aider/editor.py +147 -0
  51. aider/exceptions.py +107 -0
  52. aider/format_settings.py +26 -0
  53. aider/gui.py +545 -0
  54. aider/help.py +163 -0
  55. aider/help_pats.py +19 -0
  56. aider/history.py +143 -0
  57. aider/io.py +1175 -0
  58. aider/linter.py +304 -0
  59. aider/llm.py +47 -0
  60. aider/main.py +1267 -0
  61. aider/mdstream.py +243 -0
  62. aider/models.py +1286 -0
  63. aider/onboarding.py +428 -0
  64. aider/openrouter.py +128 -0
  65. aider/prompts.py +64 -0
  66. aider/queries/tree-sitter-language-pack/README.md +7 -0
  67. aider/queries/tree-sitter-language-pack/arduino-tags.scm +5 -0
  68. aider/queries/tree-sitter-language-pack/c-tags.scm +9 -0
  69. aider/queries/tree-sitter-language-pack/chatito-tags.scm +16 -0
  70. aider/queries/tree-sitter-language-pack/commonlisp-tags.scm +122 -0
  71. aider/queries/tree-sitter-language-pack/cpp-tags.scm +15 -0
  72. aider/queries/tree-sitter-language-pack/csharp-tags.scm +26 -0
  73. aider/queries/tree-sitter-language-pack/d-tags.scm +26 -0
  74. aider/queries/tree-sitter-language-pack/dart-tags.scm +92 -0
  75. aider/queries/tree-sitter-language-pack/elisp-tags.scm +5 -0
  76. aider/queries/tree-sitter-language-pack/elixir-tags.scm +54 -0
  77. aider/queries/tree-sitter-language-pack/elm-tags.scm +19 -0
  78. aider/queries/tree-sitter-language-pack/gleam-tags.scm +41 -0
  79. aider/queries/tree-sitter-language-pack/go-tags.scm +42 -0
  80. aider/queries/tree-sitter-language-pack/java-tags.scm +20 -0
  81. aider/queries/tree-sitter-language-pack/javascript-tags.scm +88 -0
  82. aider/queries/tree-sitter-language-pack/lua-tags.scm +34 -0
  83. aider/queries/tree-sitter-language-pack/ocaml-tags.scm +115 -0
  84. aider/queries/tree-sitter-language-pack/ocaml_interface-tags.scm +98 -0
  85. aider/queries/tree-sitter-language-pack/pony-tags.scm +39 -0
  86. aider/queries/tree-sitter-language-pack/properties-tags.scm +5 -0
  87. aider/queries/tree-sitter-language-pack/python-tags.scm +14 -0
  88. aider/queries/tree-sitter-language-pack/r-tags.scm +21 -0
  89. aider/queries/tree-sitter-language-pack/racket-tags.scm +12 -0
  90. aider/queries/tree-sitter-language-pack/ruby-tags.scm +64 -0
  91. aider/queries/tree-sitter-language-pack/rust-tags.scm +60 -0
  92. aider/queries/tree-sitter-language-pack/solidity-tags.scm +43 -0
  93. aider/queries/tree-sitter-language-pack/swift-tags.scm +51 -0
  94. aider/queries/tree-sitter-language-pack/udev-tags.scm +20 -0
  95. aider/queries/tree-sitter-languages/README.md +23 -0
  96. aider/queries/tree-sitter-languages/c-tags.scm +9 -0
  97. aider/queries/tree-sitter-languages/c_sharp-tags.scm +46 -0
  98. aider/queries/tree-sitter-languages/cpp-tags.scm +15 -0
  99. aider/queries/tree-sitter-languages/dart-tags.scm +91 -0
  100. aider/queries/tree-sitter-languages/elisp-tags.scm +8 -0
  101. aider/queries/tree-sitter-languages/elixir-tags.scm +54 -0
  102. aider/queries/tree-sitter-languages/elm-tags.scm +19 -0
  103. aider/queries/tree-sitter-languages/go-tags.scm +30 -0
  104. aider/queries/tree-sitter-languages/hcl-tags.scm +77 -0
  105. aider/queries/tree-sitter-languages/java-tags.scm +20 -0
  106. aider/queries/tree-sitter-languages/javascript-tags.scm +88 -0
  107. aider/queries/tree-sitter-languages/kotlin-tags.scm +27 -0
  108. aider/queries/tree-sitter-languages/ocaml-tags.scm +115 -0
  109. aider/queries/tree-sitter-languages/ocaml_interface-tags.scm +98 -0
  110. aider/queries/tree-sitter-languages/php-tags.scm +26 -0
  111. aider/queries/tree-sitter-languages/python-tags.scm +12 -0
  112. aider/queries/tree-sitter-languages/ql-tags.scm +26 -0
  113. aider/queries/tree-sitter-languages/ruby-tags.scm +64 -0
  114. aider/queries/tree-sitter-languages/rust-tags.scm +60 -0
  115. aider/queries/tree-sitter-languages/scala-tags.scm +65 -0
  116. aider/queries/tree-sitter-languages/typescript-tags.scm +41 -0
  117. aider/reasoning_tags.py +82 -0
  118. aider/repo.py +623 -0
  119. aider/repomap.py +847 -0
  120. aider/report.py +200 -0
  121. aider/resources/__init__.py +3 -0
  122. aider/resources/model-metadata.json +468 -0
  123. aider/resources/model-settings.yml +1767 -0
  124. aider/run_cmd.py +132 -0
  125. aider/scrape.py +284 -0
  126. aider/sendchat.py +61 -0
  127. aider/special.py +203 -0
  128. aider/urls.py +17 -0
  129. aider/utils.py +338 -0
  130. aider/versioncheck.py +113 -0
  131. aider/voice.py +187 -0
  132. aider/waiting.py +221 -0
  133. aider/watch.py +318 -0
  134. aider/watch_prompts.py +12 -0
  135. aider/website/Gemfile +8 -0
  136. aider/website/_includes/blame.md +162 -0
  137. aider/website/_includes/get-started.md +22 -0
  138. aider/website/_includes/help-tip.md +5 -0
  139. aider/website/_includes/help.md +24 -0
  140. aider/website/_includes/install.md +5 -0
  141. aider/website/_includes/keys.md +4 -0
  142. aider/website/_includes/model-warnings.md +67 -0
  143. aider/website/_includes/multi-line.md +22 -0
  144. aider/website/_includes/python-m-aider.md +5 -0
  145. aider/website/_includes/recording.css +228 -0
  146. aider/website/_includes/recording.md +34 -0
  147. aider/website/_includes/replit-pipx.md +9 -0
  148. aider/website/_includes/works-best.md +1 -0
  149. aider/website/_sass/custom/custom.scss +103 -0
  150. aider/website/docs/config/adv-model-settings.md +1881 -0
  151. aider/website/docs/config/aider_conf.md +527 -0
  152. aider/website/docs/config/api-keys.md +90 -0
  153. aider/website/docs/config/dotenv.md +478 -0
  154. aider/website/docs/config/editor.md +127 -0
  155. aider/website/docs/config/model-aliases.md +103 -0
  156. aider/website/docs/config/options.md +843 -0
  157. aider/website/docs/config/reasoning.md +209 -0
  158. aider/website/docs/config.md +44 -0
  159. aider/website/docs/faq.md +378 -0
  160. aider/website/docs/git.md +76 -0
  161. aider/website/docs/index.md +47 -0
  162. aider/website/docs/install/codespaces.md +39 -0
  163. aider/website/docs/install/docker.md +57 -0
  164. aider/website/docs/install/optional.md +100 -0
  165. aider/website/docs/install/replit.md +8 -0
  166. aider/website/docs/install.md +115 -0
  167. aider/website/docs/languages.md +264 -0
  168. aider/website/docs/legal/contributor-agreement.md +111 -0
  169. aider/website/docs/legal/privacy.md +104 -0
  170. aider/website/docs/llms/anthropic.md +77 -0
  171. aider/website/docs/llms/azure.md +48 -0
  172. aider/website/docs/llms/bedrock.md +132 -0
  173. aider/website/docs/llms/cohere.md +34 -0
  174. aider/website/docs/llms/deepseek.md +32 -0
  175. aider/website/docs/llms/gemini.md +49 -0
  176. aider/website/docs/llms/github.md +105 -0
  177. aider/website/docs/llms/groq.md +36 -0
  178. aider/website/docs/llms/lm-studio.md +39 -0
  179. aider/website/docs/llms/ollama.md +75 -0
  180. aider/website/docs/llms/openai-compat.md +39 -0
  181. aider/website/docs/llms/openai.md +58 -0
  182. aider/website/docs/llms/openrouter.md +78 -0
  183. aider/website/docs/llms/other.md +103 -0
  184. aider/website/docs/llms/vertex.md +50 -0
  185. aider/website/docs/llms/warnings.md +10 -0
  186. aider/website/docs/llms/xai.md +53 -0
  187. aider/website/docs/llms.md +54 -0
  188. aider/website/docs/more/analytics.md +122 -0
  189. aider/website/docs/more/edit-formats.md +116 -0
  190. aider/website/docs/more/infinite-output.md +137 -0
  191. aider/website/docs/more-info.md +8 -0
  192. aider/website/docs/recordings/auto-accept-architect.md +31 -0
  193. aider/website/docs/recordings/dont-drop-original-read-files.md +35 -0
  194. aider/website/docs/recordings/index.md +21 -0
  195. aider/website/docs/recordings/model-accepts-settings.md +69 -0
  196. aider/website/docs/recordings/tree-sitter-language-pack.md +80 -0
  197. aider/website/docs/repomap.md +112 -0
  198. aider/website/docs/scripting.md +100 -0
  199. aider/website/docs/troubleshooting/aider-not-found.md +24 -0
  200. aider/website/docs/troubleshooting/edit-errors.md +76 -0
  201. aider/website/docs/troubleshooting/imports.md +62 -0
  202. aider/website/docs/troubleshooting/models-and-keys.md +54 -0
  203. aider/website/docs/troubleshooting/support.md +79 -0
  204. aider/website/docs/troubleshooting/token-limits.md +96 -0
  205. aider/website/docs/troubleshooting/warnings.md +12 -0
  206. aider/website/docs/troubleshooting.md +11 -0
  207. aider/website/docs/usage/browser.md +57 -0
  208. aider/website/docs/usage/caching.md +49 -0
  209. aider/website/docs/usage/commands.md +132 -0
  210. aider/website/docs/usage/conventions.md +119 -0
  211. aider/website/docs/usage/copypaste.md +121 -0
  212. aider/website/docs/usage/images-urls.md +48 -0
  213. aider/website/docs/usage/lint-test.md +118 -0
  214. aider/website/docs/usage/modes.md +211 -0
  215. aider/website/docs/usage/not-code.md +179 -0
  216. aider/website/docs/usage/notifications.md +87 -0
  217. aider/website/docs/usage/tips.md +79 -0
  218. aider/website/docs/usage/tutorials.md +30 -0
  219. aider/website/docs/usage/voice.md +121 -0
  220. aider/website/docs/usage/watch.md +294 -0
  221. aider/website/docs/usage.md +92 -0
  222. aider/website/share/index.md +101 -0
  223. chatmcp_cli-0.1.0.dist-info/METADATA +502 -0
  224. chatmcp_cli-0.1.0.dist-info/RECORD +228 -0
  225. chatmcp_cli-0.1.0.dist-info/WHEEL +5 -0
  226. chatmcp_cli-0.1.0.dist-info/entry_points.txt +3 -0
  227. chatmcp_cli-0.1.0.dist-info/licenses/LICENSE.txt +202 -0
  228. chatmcp_cli-0.1.0.dist-info/top_level.txt +1 -0
aider/models.py ADDED
@@ -0,0 +1,1286 @@
1
+ import difflib
2
+ import hashlib
3
+ import importlib.resources
4
+ import json
5
+ import math
6
+ import os
7
+ import platform
8
+ import sys
9
+ import time
10
+ from dataclasses import dataclass, fields
11
+ from datetime import datetime
12
+ from pathlib import Path
13
+ from typing import Optional, Union
14
+
15
+ import json5
16
+ import yaml
17
+ from PIL import Image
18
+
19
+ from aider import __version__
20
+ from aider.dump import dump # noqa: F401
21
+ from aider.llm import litellm
22
+ from aider.openrouter import OpenRouterModelManager
23
+ from aider.sendchat import ensure_alternating_roles, sanity_check_messages
24
+ from aider.utils import check_pip_install_extra
25
+
26
+ RETRY_TIMEOUT = 60
27
+
28
+ request_timeout = 600
29
+
30
+ DEFAULT_MODEL_NAME = "gpt-4o"
31
+ ANTHROPIC_BETA_HEADER = "prompt-caching-2024-07-31,pdfs-2024-09-25"
32
+
33
+ OPENAI_MODELS = """
34
+ o1
35
+ o1-preview
36
+ o1-mini
37
+ o3-mini
38
+ gpt-4
39
+ gpt-4o
40
+ gpt-4o-2024-05-13
41
+ gpt-4-turbo-preview
42
+ gpt-4-0314
43
+ gpt-4-0613
44
+ gpt-4-32k
45
+ gpt-4-32k-0314
46
+ gpt-4-32k-0613
47
+ gpt-4-turbo
48
+ gpt-4-turbo-2024-04-09
49
+ gpt-4-1106-preview
50
+ gpt-4-0125-preview
51
+ gpt-4-vision-preview
52
+ gpt-4-1106-vision-preview
53
+ gpt-4o-mini
54
+ gpt-4o-mini-2024-07-18
55
+ gpt-3.5-turbo
56
+ gpt-3.5-turbo-0301
57
+ gpt-3.5-turbo-0613
58
+ gpt-3.5-turbo-1106
59
+ gpt-3.5-turbo-0125
60
+ gpt-3.5-turbo-16k
61
+ gpt-3.5-turbo-16k-0613
62
+ """
63
+
64
+ OPENAI_MODELS = [ln.strip() for ln in OPENAI_MODELS.splitlines() if ln.strip()]
65
+
66
+ ANTHROPIC_MODELS = """
67
+ claude-2
68
+ claude-2.1
69
+ claude-3-haiku-20240307
70
+ claude-3-5-haiku-20241022
71
+ claude-3-opus-20240229
72
+ claude-3-sonnet-20240229
73
+ claude-3-5-sonnet-20240620
74
+ claude-3-5-sonnet-20241022
75
+ claude-sonnet-4-20250514
76
+ claude-opus-4-20250514
77
+ """
78
+
79
+ ANTHROPIC_MODELS = [ln.strip() for ln in ANTHROPIC_MODELS.splitlines() if ln.strip()]
80
+
81
+ # Mapping of model aliases to their canonical names
82
+ MODEL_ALIASES = {
83
+ # Claude models
84
+ "sonnet": "anthropic/claude-sonnet-4-20250514",
85
+ "haiku": "claude-3-5-haiku-20241022",
86
+ "opus": "claude-opus-4-20250514",
87
+ # GPT models
88
+ "4": "gpt-4-0613",
89
+ "4o": "gpt-4o",
90
+ "4-turbo": "gpt-4-1106-preview",
91
+ "35turbo": "gpt-3.5-turbo",
92
+ "35-turbo": "gpt-3.5-turbo",
93
+ "3": "gpt-3.5-turbo",
94
+ # Other models
95
+ "deepseek": "deepseek/deepseek-chat",
96
+ "flash": "gemini/gemini-2.5-flash-preview-04-17",
97
+ "quasar": "openrouter/openrouter/quasar-alpha",
98
+ "r1": "deepseek/deepseek-reasoner",
99
+ "gemini-2.5-pro": "gemini/gemini-2.5-pro-preview-05-06",
100
+ "gemini": "gemini/gemini-2.5-pro-preview-05-06",
101
+ "gemini-exp": "gemini/gemini-2.5-pro-exp-03-25",
102
+ "grok3": "xai/grok-3-beta",
103
+ "optimus": "openrouter/openrouter/optimus-alpha",
104
+ }
105
+ # Model metadata loaded from resources and user's files.
106
+
107
+
108
+ @dataclass
109
+ class ModelSettings:
110
+ # Model class needs to have each of these as well
111
+ name: str
112
+ edit_format: str = "whole"
113
+ weak_model_name: Optional[str] = None
114
+ use_repo_map: bool = False
115
+ send_undo_reply: bool = False
116
+ lazy: bool = False
117
+ overeager: bool = False
118
+ reminder: str = "user"
119
+ examples_as_sys_msg: bool = False
120
+ extra_params: Optional[dict] = None
121
+ cache_control: bool = False
122
+ caches_by_default: bool = False
123
+ use_system_prompt: bool = True
124
+ use_temperature: Union[bool, float] = True
125
+ streaming: bool = True
126
+ editor_model_name: Optional[str] = None
127
+ editor_edit_format: Optional[str] = None
128
+ reasoning_tag: Optional[str] = None
129
+ remove_reasoning: Optional[str] = None # Deprecated alias for reasoning_tag
130
+ system_prompt_prefix: Optional[str] = None
131
+ accepts_settings: Optional[list] = None
132
+
133
+
134
+ # Load model settings from package resource
135
+ MODEL_SETTINGS = []
136
+ with importlib.resources.open_text("aider.resources", "model-settings.yml") as f:
137
+ model_settings_list = yaml.safe_load(f)
138
+ for model_settings_dict in model_settings_list:
139
+ MODEL_SETTINGS.append(ModelSettings(**model_settings_dict))
140
+
141
+
142
+ class ModelInfoManager:
143
+ MODEL_INFO_URL = (
144
+ "https://raw.githubusercontent.com/BerriAI/litellm/main/"
145
+ "model_prices_and_context_window.json"
146
+ )
147
+ CACHE_TTL = 60 * 60 * 24 # 24 hours
148
+
149
+ def __init__(self):
150
+ self.cache_dir = Path.home() / ".aider" / "caches"
151
+ self.cache_file = self.cache_dir / "model_prices_and_context_window.json"
152
+ self.content = None
153
+ self.local_model_metadata = {}
154
+ self.verify_ssl = True
155
+ self._cache_loaded = False
156
+
157
+ # Manager for the cached OpenRouter model database
158
+ self.openrouter_manager = OpenRouterModelManager()
159
+
160
+ def set_verify_ssl(self, verify_ssl):
161
+ self.verify_ssl = verify_ssl
162
+ if hasattr(self, "openrouter_manager"):
163
+ self.openrouter_manager.set_verify_ssl(verify_ssl)
164
+
165
+ def _load_cache(self):
166
+ if self._cache_loaded:
167
+ return
168
+
169
+ try:
170
+ self.cache_dir.mkdir(parents=True, exist_ok=True)
171
+ if self.cache_file.exists():
172
+ cache_age = time.time() - self.cache_file.stat().st_mtime
173
+ if cache_age < self.CACHE_TTL:
174
+ try:
175
+ self.content = json.loads(self.cache_file.read_text())
176
+ except json.JSONDecodeError:
177
+ # If the cache file is corrupted, treat it as missing
178
+ self.content = None
179
+ except OSError:
180
+ pass
181
+
182
+ self._cache_loaded = True
183
+
184
+ def _update_cache(self):
185
+ try:
186
+ import requests
187
+
188
+ # Respect the --no-verify-ssl switch
189
+ response = requests.get(self.MODEL_INFO_URL, timeout=5, verify=self.verify_ssl)
190
+ if response.status_code == 200:
191
+ self.content = response.json()
192
+ try:
193
+ self.cache_file.write_text(json.dumps(self.content, indent=4))
194
+ except OSError:
195
+ pass
196
+ except Exception as ex:
197
+ print(str(ex))
198
+ try:
199
+ # Save empty dict to cache file on failure
200
+ self.cache_file.write_text("{}")
201
+ except OSError:
202
+ pass
203
+
204
+ def get_model_from_cached_json_db(self, model):
205
+ data = self.local_model_metadata.get(model)
206
+ if data:
207
+ return data
208
+
209
+ # Ensure cache is loaded before checking content
210
+ self._load_cache()
211
+
212
+ if not self.content:
213
+ self._update_cache()
214
+
215
+ if not self.content:
216
+ return dict()
217
+
218
+ info = self.content.get(model, dict())
219
+ if info:
220
+ return info
221
+
222
+ pieces = model.split("/")
223
+ if len(pieces) == 2:
224
+ info = self.content.get(pieces[1])
225
+ if info and info.get("litellm_provider") == pieces[0]:
226
+ return info
227
+
228
+ return dict()
229
+
230
+ def get_model_info(self, model):
231
+ cached_info = self.get_model_from_cached_json_db(model)
232
+
233
+ litellm_info = None
234
+ if litellm._lazy_module or not cached_info:
235
+ try:
236
+ litellm_info = litellm.get_model_info(model)
237
+ except Exception as ex:
238
+ if "model_prices_and_context_window.json" not in str(ex):
239
+ print(str(ex))
240
+
241
+ if litellm_info:
242
+ return litellm_info
243
+
244
+ if not cached_info and model.startswith("openrouter/"):
245
+ # First try using the locally cached OpenRouter model database
246
+ openrouter_info = self.openrouter_manager.get_model_info(model)
247
+ if openrouter_info:
248
+ return openrouter_info
249
+
250
+ # Fallback to legacy web-scraping if the API cache does not contain the model
251
+ openrouter_info = self.fetch_openrouter_model_info(model)
252
+ if openrouter_info:
253
+ return openrouter_info
254
+
255
+ return cached_info
256
+
257
+ def fetch_openrouter_model_info(self, model):
258
+ """
259
+ Fetch model info by scraping the openrouter model page.
260
+ Expected URL: https://openrouter.ai/<model_route>
261
+ Example: openrouter/qwen/qwen-2.5-72b-instruct:free
262
+ Returns a dict with keys: max_tokens, max_input_tokens, max_output_tokens,
263
+ input_cost_per_token, output_cost_per_token.
264
+ """
265
+ url_part = model[len("openrouter/") :]
266
+ url = "https://openrouter.ai/" + url_part
267
+ try:
268
+ import requests
269
+
270
+ response = requests.get(url, timeout=5, verify=self.verify_ssl)
271
+ if response.status_code != 200:
272
+ return {}
273
+ html = response.text
274
+ import re
275
+
276
+ if re.search(
277
+ rf"The model\s*.*{re.escape(url_part)}.* is not available", html, re.IGNORECASE
278
+ ):
279
+ print(f"\033[91mError: Model '{url_part}' is not available\033[0m")
280
+ return {}
281
+ text = re.sub(r"<[^>]+>", " ", html)
282
+ context_match = re.search(r"([\d,]+)\s*context", text)
283
+ if context_match:
284
+ context_str = context_match.group(1).replace(",", "")
285
+ context_size = int(context_str)
286
+ else:
287
+ context_size = None
288
+ input_cost_match = re.search(r"\$\s*([\d.]+)\s*/M input tokens", text, re.IGNORECASE)
289
+ output_cost_match = re.search(r"\$\s*([\d.]+)\s*/M output tokens", text, re.IGNORECASE)
290
+ input_cost = float(input_cost_match.group(1)) / 1000000 if input_cost_match else None
291
+ output_cost = float(output_cost_match.group(1)) / 1000000 if output_cost_match else None
292
+ if context_size is None or input_cost is None or output_cost is None:
293
+ return {}
294
+ params = {
295
+ "max_input_tokens": context_size,
296
+ "max_tokens": context_size,
297
+ "max_output_tokens": context_size,
298
+ "input_cost_per_token": input_cost,
299
+ "output_cost_per_token": output_cost,
300
+ }
301
+ return params
302
+ except Exception as e:
303
+ print("Error fetching openrouter info:", str(e))
304
+ return {}
305
+
306
+
307
+ model_info_manager = ModelInfoManager()
308
+
309
+
310
+ class Model(ModelSettings):
311
+ def __init__(
312
+ self, model, weak_model=None, editor_model=None, editor_edit_format=None, verbose=False
313
+ ):
314
+ # Map any alias to its canonical name
315
+ model = MODEL_ALIASES.get(model, model)
316
+
317
+ self.name = model
318
+ self.verbose = verbose
319
+
320
+ self.max_chat_history_tokens = 1024
321
+ self.weak_model = None
322
+ self.editor_model = None
323
+
324
+ # Find the extra settings
325
+ self.extra_model_settings = next(
326
+ (ms for ms in MODEL_SETTINGS if ms.name == "aider/extra_params"), None
327
+ )
328
+
329
+ self.info = self.get_model_info(model)
330
+
331
+ # Are all needed keys/params available?
332
+ res = self.validate_environment()
333
+ self.missing_keys = res.get("missing_keys")
334
+ self.keys_in_environment = res.get("keys_in_environment")
335
+
336
+ max_input_tokens = self.info.get("max_input_tokens") or 0
337
+ # Calculate max_chat_history_tokens as 1/16th of max_input_tokens,
338
+ # with minimum 1k and maximum 8k
339
+ self.max_chat_history_tokens = min(max(max_input_tokens / 16, 1024), 8192)
340
+
341
+ self.configure_model_settings(model)
342
+ if weak_model is False:
343
+ self.weak_model_name = None
344
+ else:
345
+ self.get_weak_model(weak_model)
346
+
347
+ if editor_model is False:
348
+ self.editor_model_name = None
349
+ else:
350
+ self.get_editor_model(editor_model, editor_edit_format)
351
+
352
+ def get_model_info(self, model):
353
+ return model_info_manager.get_model_info(model)
354
+
355
+ def _copy_fields(self, source):
356
+ """Helper to copy fields from a ModelSettings instance to self"""
357
+ for field in fields(ModelSettings):
358
+ val = getattr(source, field.name)
359
+ setattr(self, field.name, val)
360
+
361
+ # Handle backward compatibility: if remove_reasoning is set but reasoning_tag isn't,
362
+ # use remove_reasoning's value for reasoning_tag
363
+ if self.reasoning_tag is None and self.remove_reasoning is not None:
364
+ self.reasoning_tag = self.remove_reasoning
365
+
366
+ def configure_model_settings(self, model):
367
+ # Look for exact model match
368
+ exact_match = False
369
+ for ms in MODEL_SETTINGS:
370
+ # direct match, or match "provider/<model>"
371
+ if model == ms.name:
372
+ self._copy_fields(ms)
373
+ exact_match = True
374
+ break # Continue to apply overrides
375
+
376
+ # Initialize accepts_settings if it's None
377
+ if self.accepts_settings is None:
378
+ self.accepts_settings = []
379
+
380
+ model = model.lower()
381
+
382
+ # If no exact match, try generic settings
383
+ if not exact_match:
384
+ self.apply_generic_model_settings(model)
385
+
386
+ # Apply override settings last if they exist
387
+ if (
388
+ self.extra_model_settings
389
+ and self.extra_model_settings.extra_params
390
+ and self.extra_model_settings.name == "aider/extra_params"
391
+ ):
392
+ # Initialize extra_params if it doesn't exist
393
+ if not self.extra_params:
394
+ self.extra_params = {}
395
+
396
+ # Deep merge the extra_params dicts
397
+ for key, value in self.extra_model_settings.extra_params.items():
398
+ if isinstance(value, dict) and isinstance(self.extra_params.get(key), dict):
399
+ # For nested dicts, merge recursively
400
+ self.extra_params[key] = {**self.extra_params[key], **value}
401
+ else:
402
+ # For non-dict values, simply update
403
+ self.extra_params[key] = value
404
+
405
+ # Ensure OpenRouter models accept thinking_tokens and reasoning_effort
406
+ if self.name.startswith("openrouter/"):
407
+ if self.accepts_settings is None:
408
+ self.accepts_settings = []
409
+ if "thinking_tokens" not in self.accepts_settings:
410
+ self.accepts_settings.append("thinking_tokens")
411
+ if "reasoning_effort" not in self.accepts_settings:
412
+ self.accepts_settings.append("reasoning_effort")
413
+
414
+ def apply_generic_model_settings(self, model):
415
+ if "/o3-mini" in model:
416
+ self.edit_format = "diff"
417
+ self.use_repo_map = True
418
+ self.use_temperature = False
419
+ self.system_prompt_prefix = "Formatting re-enabled. "
420
+ self.system_prompt_prefix = "Formatting re-enabled. "
421
+ if "reasoning_effort" not in self.accepts_settings:
422
+ self.accepts_settings.append("reasoning_effort")
423
+ return # <--
424
+
425
+ if "gpt-4.1-mini" in model:
426
+ self.edit_format = "diff"
427
+ self.use_repo_map = True
428
+ self.reminder = "sys"
429
+ self.examples_as_sys_msg = False
430
+ return # <--
431
+
432
+ if "gpt-4.1" in model:
433
+ self.edit_format = "diff"
434
+ self.use_repo_map = True
435
+ self.reminder = "sys"
436
+ self.examples_as_sys_msg = False
437
+ return # <--
438
+
439
+ if "/o1-mini" in model:
440
+ self.use_repo_map = True
441
+ self.use_temperature = False
442
+ self.use_system_prompt = False
443
+ return # <--
444
+
445
+ if "/o1-preview" in model:
446
+ self.edit_format = "diff"
447
+ self.use_repo_map = True
448
+ self.use_temperature = False
449
+ self.use_system_prompt = False
450
+ return # <--
451
+
452
+ if "/o1" in model:
453
+ self.edit_format = "diff"
454
+ self.use_repo_map = True
455
+ self.use_temperature = False
456
+ self.streaming = False
457
+ self.system_prompt_prefix = "Formatting re-enabled. "
458
+ if "reasoning_effort" not in self.accepts_settings:
459
+ self.accepts_settings.append("reasoning_effort")
460
+ return # <--
461
+
462
+ if "deepseek" in model and "v3" in model:
463
+ self.edit_format = "diff"
464
+ self.use_repo_map = True
465
+ self.reminder = "sys"
466
+ self.examples_as_sys_msg = True
467
+ return # <--
468
+
469
+ if "deepseek" in model and ("r1" in model or "reasoning" in model):
470
+ self.edit_format = "diff"
471
+ self.use_repo_map = True
472
+ self.examples_as_sys_msg = True
473
+ self.use_temperature = False
474
+ self.reasoning_tag = "think"
475
+ return # <--
476
+
477
+ if ("llama3" in model or "llama-3" in model) and "70b" in model:
478
+ self.edit_format = "diff"
479
+ self.use_repo_map = True
480
+ self.send_undo_reply = True
481
+ self.examples_as_sys_msg = True
482
+ return # <--
483
+
484
+ if "gpt-4-turbo" in model or ("gpt-4-" in model and "-preview" in model):
485
+ self.edit_format = "udiff"
486
+ self.use_repo_map = True
487
+ self.send_undo_reply = True
488
+ return # <--
489
+
490
+ if "gpt-4" in model or "claude-3-opus" in model:
491
+ self.edit_format = "diff"
492
+ self.use_repo_map = True
493
+ self.send_undo_reply = True
494
+ return # <--
495
+
496
+ if "gpt-3.5" in model or "gpt-4" in model:
497
+ self.reminder = "sys"
498
+ return # <--
499
+
500
+ if "3-7-sonnet" in model:
501
+ self.edit_format = "diff"
502
+ self.use_repo_map = True
503
+ self.examples_as_sys_msg = True
504
+ self.reminder = "user"
505
+ if "thinking_tokens" not in self.accepts_settings:
506
+ self.accepts_settings.append("thinking_tokens")
507
+ return # <--
508
+
509
+ if "3.5-sonnet" in model or "3-5-sonnet" in model:
510
+ self.edit_format = "diff"
511
+ self.use_repo_map = True
512
+ self.examples_as_sys_msg = True
513
+ self.reminder = "user"
514
+ return # <--
515
+
516
+ if model.startswith("o1-") or "/o1-" in model:
517
+ self.use_system_prompt = False
518
+ self.use_temperature = False
519
+ return # <--
520
+
521
+ if (
522
+ "qwen" in model
523
+ and "coder" in model
524
+ and ("2.5" in model or "2-5" in model)
525
+ and "32b" in model
526
+ ):
527
+ self.edit_format = "diff"
528
+ self.editor_edit_format = "editor-diff"
529
+ self.use_repo_map = True
530
+ return # <--
531
+
532
+ if "qwq" in model and "32b" in model and "preview" not in model:
533
+ self.edit_format = "diff"
534
+ self.editor_edit_format = "editor-diff"
535
+ self.use_repo_map = True
536
+ self.reasoning_tag = "think"
537
+ self.examples_as_sys_msg = True
538
+ self.use_temperature = 0.6
539
+ self.extra_params = dict(top_p=0.95)
540
+ return # <--
541
+
542
+ if "qwen3" in model and "235b" in model:
543
+ self.edit_format = "diff"
544
+ self.use_repo_map = True
545
+ self.system_prompt_prefix = "/no_think"
546
+ self.use_temperature = 0.7
547
+ self.extra_params = {"top_p": 0.8, "top_k": 20, "min_p": 0.0}
548
+ return # <--
549
+
550
+ # use the defaults
551
+ if self.edit_format == "diff":
552
+ self.use_repo_map = True
553
+ return # <--
554
+
555
+ def __str__(self):
556
+ return self.name
557
+
558
+ def get_weak_model(self, provided_weak_model_name):
559
+ # If weak_model_name is provided, override the model settings
560
+ if provided_weak_model_name:
561
+ self.weak_model_name = provided_weak_model_name
562
+
563
+ if not self.weak_model_name:
564
+ self.weak_model = self
565
+ return
566
+
567
+ if self.weak_model_name == self.name:
568
+ self.weak_model = self
569
+ return
570
+
571
+ self.weak_model = Model(
572
+ self.weak_model_name,
573
+ weak_model=False,
574
+ )
575
+ return self.weak_model
576
+
577
+ def commit_message_models(self):
578
+ return [self.weak_model, self]
579
+
580
+ def get_editor_model(self, provided_editor_model_name, editor_edit_format):
581
+ # If editor_model_name is provided, override the model settings
582
+ if provided_editor_model_name:
583
+ self.editor_model_name = provided_editor_model_name
584
+ if editor_edit_format:
585
+ self.editor_edit_format = editor_edit_format
586
+
587
+ if not self.editor_model_name or self.editor_model_name == self.name:
588
+ self.editor_model = self
589
+ else:
590
+ self.editor_model = Model(
591
+ self.editor_model_name,
592
+ editor_model=False,
593
+ )
594
+
595
+ if not self.editor_edit_format:
596
+ self.editor_edit_format = self.editor_model.edit_format
597
+ if self.editor_edit_format in ("diff", "whole", "diff-fenced"):
598
+ self.editor_edit_format = "editor-" + self.editor_edit_format
599
+
600
+ return self.editor_model
601
+
602
+ def tokenizer(self, text):
603
+ return litellm.encode(model=self.name, text=text)
604
+
605
+ def token_count(self, messages):
606
+ if type(messages) is list:
607
+ try:
608
+ return litellm.token_counter(model=self.name, messages=messages)
609
+ except Exception as err:
610
+ print(f"Unable to count tokens: {err}")
611
+ return 0
612
+
613
+ if not self.tokenizer:
614
+ return
615
+
616
+ if type(messages) is str:
617
+ msgs = messages
618
+ else:
619
+ msgs = json.dumps(messages)
620
+
621
+ try:
622
+ return len(self.tokenizer(msgs))
623
+ except Exception as err:
624
+ print(f"Unable to count tokens: {err}")
625
+ return 0
626
+
627
+ def token_count_for_image(self, fname):
628
+ """
629
+ Calculate the token cost for an image assuming high detail.
630
+ The token cost is determined by the size of the image.
631
+ :param fname: The filename of the image.
632
+ :return: The token cost for the image.
633
+ """
634
+ width, height = self.get_image_size(fname)
635
+
636
+ # If the image is larger than 2048 in any dimension, scale it down to fit within 2048x2048
637
+ max_dimension = max(width, height)
638
+ if max_dimension > 2048:
639
+ scale_factor = 2048 / max_dimension
640
+ width = int(width * scale_factor)
641
+ height = int(height * scale_factor)
642
+
643
+ # Scale the image such that the shortest side is 768 pixels long
644
+ min_dimension = min(width, height)
645
+ scale_factor = 768 / min_dimension
646
+ width = int(width * scale_factor)
647
+ height = int(height * scale_factor)
648
+
649
+ # Calculate the number of 512x512 tiles needed to cover the image
650
+ tiles_width = math.ceil(width / 512)
651
+ tiles_height = math.ceil(height / 512)
652
+ num_tiles = tiles_width * tiles_height
653
+
654
+ # Each tile costs 170 tokens, and there's an additional fixed cost of 85 tokens
655
+ token_cost = num_tiles * 170 + 85
656
+ return token_cost
657
+
658
+ def get_image_size(self, fname):
659
+ """
660
+ Retrieve the size of an image.
661
+ :param fname: The filename of the image.
662
+ :return: A tuple (width, height) representing the image size in pixels.
663
+ """
664
+ with Image.open(fname) as img:
665
+ return img.size
666
+
667
+ def fast_validate_environment(self):
668
+ """Fast path for common models. Avoids forcing litellm import."""
669
+
670
+ model = self.name
671
+
672
+ pieces = model.split("/")
673
+ if len(pieces) > 1:
674
+ provider = pieces[0]
675
+ else:
676
+ provider = None
677
+
678
+ keymap = dict(
679
+ openrouter="OPENROUTER_API_KEY",
680
+ openai="OPENAI_API_KEY",
681
+ deepseek="DEEPSEEK_API_KEY",
682
+ gemini="GEMINI_API_KEY",
683
+ anthropic="ANTHROPIC_API_KEY",
684
+ groq="GROQ_API_KEY",
685
+ fireworks_ai="FIREWORKS_API_KEY",
686
+ )
687
+ var = None
688
+ if model in OPENAI_MODELS:
689
+ var = "OPENAI_API_KEY"
690
+ elif model in ANTHROPIC_MODELS:
691
+ var = "ANTHROPIC_API_KEY"
692
+ else:
693
+ var = keymap.get(provider)
694
+
695
+ if var and os.environ.get(var):
696
+ return dict(keys_in_environment=[var], missing_keys=[])
697
+
698
+ def validate_environment(self):
699
+ res = self.fast_validate_environment()
700
+ if res:
701
+ return res
702
+
703
+ # https://github.com/BerriAI/litellm/issues/3190
704
+
705
+ model = self.name
706
+ res = litellm.validate_environment(model)
707
+
708
+ # If missing AWS credential keys but AWS_PROFILE is set, consider AWS credentials valid
709
+ if res["missing_keys"] and any(
710
+ key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] for key in res["missing_keys"]
711
+ ):
712
+ if model.startswith("bedrock/") or model.startswith("us.anthropic."):
713
+ if os.environ.get("AWS_PROFILE"):
714
+ res["missing_keys"] = [
715
+ k
716
+ for k in res["missing_keys"]
717
+ if k not in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
718
+ ]
719
+ if not res["missing_keys"]:
720
+ res["keys_in_environment"] = True
721
+
722
+ if res["keys_in_environment"]:
723
+ return res
724
+ if res["missing_keys"]:
725
+ return res
726
+
727
+ provider = self.info.get("litellm_provider", "").lower()
728
+ if provider == "cohere_chat":
729
+ return validate_variables(["COHERE_API_KEY"])
730
+ if provider == "gemini":
731
+ return validate_variables(["GEMINI_API_KEY"])
732
+ if provider == "groq":
733
+ return validate_variables(["GROQ_API_KEY"])
734
+
735
+ return res
736
+
737
+ def get_repo_map_tokens(self):
738
+ map_tokens = 1024
739
+ max_inp_tokens = self.info.get("max_input_tokens")
740
+ if max_inp_tokens:
741
+ map_tokens = max_inp_tokens / 8
742
+ map_tokens = min(map_tokens, 4096)
743
+ map_tokens = max(map_tokens, 1024)
744
+ return map_tokens
745
+
746
+ def set_reasoning_effort(self, effort):
747
+ """Set the reasoning effort parameter for models that support it"""
748
+ if effort is not None:
749
+ if self.name.startswith("openrouter/"):
750
+ if not self.extra_params:
751
+ self.extra_params = {}
752
+ if "extra_body" not in self.extra_params:
753
+ self.extra_params["extra_body"] = {}
754
+ self.extra_params["extra_body"]["reasoning"] = {"effort": effort}
755
+ else:
756
+ if not self.extra_params:
757
+ self.extra_params = {}
758
+ if "extra_body" not in self.extra_params:
759
+ self.extra_params["extra_body"] = {}
760
+ self.extra_params["extra_body"]["reasoning_effort"] = effort
761
+
762
+ def parse_token_value(self, value):
763
+ """
764
+ Parse a token value string into an integer.
765
+ Accepts formats: 8096, "8k", "10.5k", "0.5M", "10K", etc.
766
+
767
+ Args:
768
+ value: String or int token value
769
+
770
+ Returns:
771
+ Integer token value
772
+ """
773
+ if isinstance(value, int):
774
+ return value
775
+
776
+ if not isinstance(value, str):
777
+ return int(value) # Try to convert to int
778
+
779
+ value = value.strip().upper()
780
+
781
+ if value.endswith("K"):
782
+ multiplier = 1024
783
+ value = value[:-1]
784
+ elif value.endswith("M"):
785
+ multiplier = 1024 * 1024
786
+ value = value[:-1]
787
+ else:
788
+ multiplier = 1
789
+
790
+ # Convert to float first to handle decimal values like "10.5k"
791
+ return int(float(value) * multiplier)
792
+
793
+ def set_thinking_tokens(self, value):
794
+ """
795
+ Set the thinking token budget for models that support it.
796
+ Accepts formats: 8096, "8k", "10.5k", "0.5M", "10K", etc.
797
+ """
798
+ if value is not None:
799
+ num_tokens = self.parse_token_value(value)
800
+ self.use_temperature = False
801
+ if not self.extra_params:
802
+ self.extra_params = {}
803
+
804
+ # OpenRouter models use 'reasoning' instead of 'thinking'
805
+ if self.name.startswith("openrouter/"):
806
+ if "extra_body" not in self.extra_params:
807
+ self.extra_params["extra_body"] = {}
808
+ self.extra_params["extra_body"]["reasoning"] = {"max_tokens": num_tokens}
809
+ else:
810
+ self.extra_params["thinking"] = {"type": "enabled", "budget_tokens": num_tokens}
811
+
812
+ def get_raw_thinking_tokens(self):
813
+ """Get formatted thinking token budget if available"""
814
+ budget = None
815
+
816
+ if self.extra_params:
817
+ # Check for OpenRouter reasoning format
818
+ if self.name.startswith("openrouter/"):
819
+ if (
820
+ "extra_body" in self.extra_params
821
+ and "reasoning" in self.extra_params["extra_body"]
822
+ and "max_tokens" in self.extra_params["extra_body"]["reasoning"]
823
+ ):
824
+ budget = self.extra_params["extra_body"]["reasoning"]["max_tokens"]
825
+ # Check for standard thinking format
826
+ elif (
827
+ "thinking" in self.extra_params and "budget_tokens" in self.extra_params["thinking"]
828
+ ):
829
+ budget = self.extra_params["thinking"]["budget_tokens"]
830
+
831
+ return budget
832
+
833
+ def get_thinking_tokens(self):
834
+ budget = self.get_raw_thinking_tokens()
835
+
836
+ if budget is not None:
837
+ # Format as xx.yK for thousands, xx.yM for millions
838
+ if budget >= 1024 * 1024:
839
+ value = budget / (1024 * 1024)
840
+ if value == int(value):
841
+ return f"{int(value)}M"
842
+ else:
843
+ return f"{value:.1f}M"
844
+ else:
845
+ value = budget / 1024
846
+ if value == int(value):
847
+ return f"{int(value)}k"
848
+ else:
849
+ return f"{value:.1f}k"
850
+ return None
851
+
852
+ def get_reasoning_effort(self):
853
+ """Get reasoning effort value if available"""
854
+ if self.extra_params:
855
+ # Check for OpenRouter reasoning format
856
+ if self.name.startswith("openrouter/"):
857
+ if (
858
+ "extra_body" in self.extra_params
859
+ and "reasoning" in self.extra_params["extra_body"]
860
+ and "effort" in self.extra_params["extra_body"]["reasoning"]
861
+ ):
862
+ return self.extra_params["extra_body"]["reasoning"]["effort"]
863
+ # Check for standard reasoning_effort format (e.g. in extra_body)
864
+ elif (
865
+ "extra_body" in self.extra_params
866
+ and "reasoning_effort" in self.extra_params["extra_body"]
867
+ ):
868
+ return self.extra_params["extra_body"]["reasoning_effort"]
869
+ return None
870
+
871
+ def is_deepseek_r1(self):
872
+ name = self.name.lower()
873
+ if "deepseek" not in name:
874
+ return
875
+ return "r1" in name or "reasoner" in name
876
+
877
+ def is_ollama(self):
878
+ return self.name.startswith("ollama/") or self.name.startswith("ollama_chat/")
879
+
880
+ def github_copilot_token_to_open_ai_key(self, extra_headers):
881
+ # check to see if there's an openai api key
882
+ # If so, check to see if it's expire
883
+ openai_api_key = "OPENAI_API_KEY"
884
+
885
+ if openai_api_key not in os.environ or (
886
+ int(dict(x.split("=") for x in os.environ[openai_api_key].split(";"))["exp"])
887
+ < int(datetime.now().timestamp())
888
+ ):
889
+ import requests
890
+
891
+ class GitHubCopilotTokenError(Exception):
892
+ """Custom exception for GitHub Copilot token-related errors."""
893
+
894
+ pass
895
+
896
+ # Validate GitHub Copilot token exists
897
+ if "GITHUB_COPILOT_TOKEN" not in os.environ:
898
+ raise KeyError("GITHUB_COPILOT_TOKEN environment variable not found")
899
+
900
+ github_token = os.environ["GITHUB_COPILOT_TOKEN"]
901
+ if not github_token.strip():
902
+ raise KeyError("GITHUB_COPILOT_TOKEN environment variable is empty")
903
+
904
+ headers = {
905
+ "Authorization": f"Bearer {os.environ['GITHUB_COPILOT_TOKEN']}",
906
+ "Editor-Version": extra_headers["Editor-Version"],
907
+ "Copilot-Integration-Id": extra_headers["Copilot-Integration-Id"],
908
+ "Content-Type": "application/json",
909
+ }
910
+
911
+ url = "https://api.github.com/copilot_internal/v2/token"
912
+ res = requests.get(url, headers=headers)
913
+ if res.status_code != 200:
914
+ safe_headers = {k: v for k, v in headers.items() if k != "Authorization"}
915
+ token_preview = github_token[:5] + "..." if len(github_token) >= 5 else github_token
916
+ safe_headers["Authorization"] = f"Bearer {token_preview}"
917
+ raise GitHubCopilotTokenError(
918
+ f"GitHub Copilot API request failed (Status: {res.status_code})\n"
919
+ f"URL: {url}\n"
920
+ f"Headers: {json.dumps(safe_headers, indent=2)}\n"
921
+ f"JSON: {res.text}"
922
+ )
923
+
924
+ response_data = res.json()
925
+ token = response_data.get("token")
926
+ if not token:
927
+ raise GitHubCopilotTokenError("Response missing 'token' field")
928
+
929
+ os.environ[openai_api_key] = token
930
+
931
+ def send_completion(self, messages, functions, stream, temperature=None):
932
+ if os.environ.get("AIDER_SANITY_CHECK_TURNS"):
933
+ sanity_check_messages(messages)
934
+
935
+ if self.is_deepseek_r1():
936
+ messages = ensure_alternating_roles(messages)
937
+
938
+ kwargs = dict(
939
+ model=self.name,
940
+ stream=stream,
941
+ )
942
+
943
+ if self.use_temperature is not False:
944
+ if temperature is None:
945
+ if isinstance(self.use_temperature, bool):
946
+ temperature = 0
947
+ else:
948
+ temperature = float(self.use_temperature)
949
+
950
+ kwargs["temperature"] = temperature
951
+
952
+ if functions is not None:
953
+ function = functions[0]
954
+ kwargs["tools"] = [dict(type="function", function=function)]
955
+ kwargs["tool_choice"] = {"type": "function", "function": {"name": function["name"]}}
956
+ if self.extra_params:
957
+ kwargs.update(self.extra_params)
958
+ if self.is_ollama() and "num_ctx" not in kwargs:
959
+ num_ctx = int(self.token_count(messages) * 1.25) + 8192
960
+ kwargs["num_ctx"] = num_ctx
961
+ key = json.dumps(kwargs, sort_keys=True).encode()
962
+
963
+ # dump(kwargs)
964
+
965
+ hash_object = hashlib.sha1(key)
966
+ if "timeout" not in kwargs:
967
+ kwargs["timeout"] = request_timeout
968
+ if self.verbose:
969
+ dump(kwargs)
970
+ kwargs["messages"] = messages
971
+
972
+ # Are we using github copilot?
973
+ if "GITHUB_COPILOT_TOKEN" in os.environ:
974
+ if "extra_headers" not in kwargs:
975
+ kwargs["extra_headers"] = {
976
+ "Editor-Version": f"aider/{__version__}",
977
+ "Copilot-Integration-Id": "vscode-chat",
978
+ }
979
+
980
+ self.github_copilot_token_to_open_ai_key(kwargs["extra_headers"])
981
+
982
+ res = litellm.completion(**kwargs)
983
+ return hash_object, res
984
+
985
+ def simple_send_with_retries(self, messages):
986
+ from aider.exceptions import LiteLLMExceptions
987
+
988
+ litellm_ex = LiteLLMExceptions()
989
+ if "deepseek-reasoner" in self.name:
990
+ messages = ensure_alternating_roles(messages)
991
+ retry_delay = 0.125
992
+
993
+ if self.verbose:
994
+ dump(messages)
995
+
996
+ while True:
997
+ try:
998
+ kwargs = {
999
+ "messages": messages,
1000
+ "functions": None,
1001
+ "stream": False,
1002
+ }
1003
+
1004
+ _hash, response = self.send_completion(**kwargs)
1005
+ if not response or not hasattr(response, "choices") or not response.choices:
1006
+ return None
1007
+ res = response.choices[0].message.content
1008
+ from aider.reasoning_tags import remove_reasoning_content
1009
+
1010
+ return remove_reasoning_content(res, self.reasoning_tag)
1011
+
1012
+ except litellm_ex.exceptions_tuple() as err:
1013
+ ex_info = litellm_ex.get_ex_info(err)
1014
+ print(str(err))
1015
+ if ex_info.description:
1016
+ print(ex_info.description)
1017
+ should_retry = ex_info.retry
1018
+ if should_retry:
1019
+ retry_delay *= 2
1020
+ if retry_delay > RETRY_TIMEOUT:
1021
+ should_retry = False
1022
+ if not should_retry:
1023
+ return None
1024
+ print(f"Retrying in {retry_delay:.1f} seconds...")
1025
+ time.sleep(retry_delay)
1026
+ continue
1027
+ except AttributeError:
1028
+ return None
1029
+
1030
+
1031
+ def register_models(model_settings_fnames):
1032
+ files_loaded = []
1033
+ for model_settings_fname in model_settings_fnames:
1034
+ if not os.path.exists(model_settings_fname):
1035
+ continue
1036
+
1037
+ if not Path(model_settings_fname).read_text().strip():
1038
+ continue
1039
+
1040
+ try:
1041
+ with open(model_settings_fname, "r") as model_settings_file:
1042
+ model_settings_list = yaml.safe_load(model_settings_file)
1043
+
1044
+ for model_settings_dict in model_settings_list:
1045
+ model_settings = ModelSettings(**model_settings_dict)
1046
+ existing_model_settings = next(
1047
+ (ms for ms in MODEL_SETTINGS if ms.name == model_settings.name), None
1048
+ )
1049
+
1050
+ if existing_model_settings:
1051
+ MODEL_SETTINGS.remove(existing_model_settings)
1052
+ MODEL_SETTINGS.append(model_settings)
1053
+ except Exception as e:
1054
+ raise Exception(f"Error loading model settings from {model_settings_fname}: {e}")
1055
+ files_loaded.append(model_settings_fname)
1056
+
1057
+ return files_loaded
1058
+
1059
+
1060
+ def register_litellm_models(model_fnames):
1061
+ files_loaded = []
1062
+ for model_fname in model_fnames:
1063
+ if not os.path.exists(model_fname):
1064
+ continue
1065
+
1066
+ try:
1067
+ data = Path(model_fname).read_text()
1068
+ if not data.strip():
1069
+ continue
1070
+ model_def = json5.loads(data)
1071
+ if not model_def:
1072
+ continue
1073
+
1074
+ # Defer registration with litellm to faster path.
1075
+ model_info_manager.local_model_metadata.update(model_def)
1076
+ except Exception as e:
1077
+ raise Exception(f"Error loading model definition from {model_fname}: {e}")
1078
+
1079
+ files_loaded.append(model_fname)
1080
+
1081
+ return files_loaded
1082
+
1083
+
1084
+ def validate_variables(vars):
1085
+ missing = []
1086
+ for var in vars:
1087
+ if var not in os.environ:
1088
+ missing.append(var)
1089
+ if missing:
1090
+ return dict(keys_in_environment=False, missing_keys=missing)
1091
+ return dict(keys_in_environment=True, missing_keys=missing)
1092
+
1093
+
1094
+ def sanity_check_models(io, main_model):
1095
+ problem_main = sanity_check_model(io, main_model)
1096
+
1097
+ problem_weak = None
1098
+ if main_model.weak_model and main_model.weak_model is not main_model:
1099
+ problem_weak = sanity_check_model(io, main_model.weak_model)
1100
+
1101
+ problem_editor = None
1102
+ if (
1103
+ main_model.editor_model
1104
+ and main_model.editor_model is not main_model
1105
+ and main_model.editor_model is not main_model.weak_model
1106
+ ):
1107
+ problem_editor = sanity_check_model(io, main_model.editor_model)
1108
+
1109
+ return problem_main or problem_weak or problem_editor
1110
+
1111
+
1112
+ def sanity_check_model(io, model):
1113
+ show = False
1114
+
1115
+ if model.missing_keys:
1116
+ show = True
1117
+ io.tool_warning(f"Warning: {model} expects these environment variables")
1118
+ for key in model.missing_keys:
1119
+ value = os.environ.get(key, "")
1120
+ status = "Set" if value else "Not set"
1121
+ io.tool_output(f"- {key}: {status}")
1122
+
1123
+ if platform.system() == "Windows":
1124
+ io.tool_output(
1125
+ "Note: You may need to restart your terminal or command prompt for `setx` to take"
1126
+ " effect."
1127
+ )
1128
+
1129
+ elif not model.keys_in_environment:
1130
+ show = True
1131
+ io.tool_warning(f"Warning for {model}: Unknown which environment variables are required.")
1132
+
1133
+ # Check for model-specific dependencies
1134
+ check_for_dependencies(io, model.name)
1135
+
1136
+ if not model.info:
1137
+ show = True
1138
+ io.tool_warning(
1139
+ f"Warning for {model}: Unknown context window size and costs, using sane defaults."
1140
+ )
1141
+
1142
+ possible_matches = fuzzy_match_models(model.name)
1143
+ if possible_matches:
1144
+ io.tool_output("Did you mean one of these?")
1145
+ for match in possible_matches:
1146
+ io.tool_output(f"- {match}")
1147
+
1148
+ return show
1149
+
1150
+
1151
+ def check_for_dependencies(io, model_name):
1152
+ """
1153
+ Check for model-specific dependencies and install them if needed.
1154
+
1155
+ Args:
1156
+ io: The IO object for user interaction
1157
+ model_name: The name of the model to check dependencies for
1158
+ """
1159
+ # Check if this is a Bedrock model and ensure boto3 is installed
1160
+ if model_name.startswith("bedrock/"):
1161
+ check_pip_install_extra(
1162
+ io, "boto3", "AWS Bedrock models require the boto3 package.", ["boto3"]
1163
+ )
1164
+
1165
+ # Check if this is a Vertex AI model and ensure google-cloud-aiplatform is installed
1166
+ elif model_name.startswith("vertex_ai/"):
1167
+ check_pip_install_extra(
1168
+ io,
1169
+ "google.cloud.aiplatform",
1170
+ "Google Vertex AI models require the google-cloud-aiplatform package.",
1171
+ ["google-cloud-aiplatform"],
1172
+ )
1173
+
1174
+
1175
+ def fuzzy_match_models(name):
1176
+ name = name.lower()
1177
+
1178
+ chat_models = set()
1179
+ model_metadata = list(litellm.model_cost.items())
1180
+ model_metadata += list(model_info_manager.local_model_metadata.items())
1181
+
1182
+ for orig_model, attrs in model_metadata:
1183
+ model = orig_model.lower()
1184
+ if attrs.get("mode") != "chat":
1185
+ continue
1186
+ provider = attrs.get("litellm_provider", "").lower()
1187
+ if not provider:
1188
+ continue
1189
+ provider += "/"
1190
+
1191
+ if model.startswith(provider):
1192
+ fq_model = orig_model
1193
+ else:
1194
+ fq_model = provider + orig_model
1195
+
1196
+ chat_models.add(fq_model)
1197
+ chat_models.add(orig_model)
1198
+
1199
+ chat_models = sorted(chat_models)
1200
+ # exactly matching model
1201
+ # matching_models = [
1202
+ # (fq,m) for fq,m in chat_models
1203
+ # if name == fq or name == m
1204
+ # ]
1205
+ # if matching_models:
1206
+ # return matching_models
1207
+
1208
+ # Check for model names containing the name
1209
+ matching_models = [m for m in chat_models if name in m]
1210
+ if matching_models:
1211
+ return sorted(set(matching_models))
1212
+
1213
+ # Check for slight misspellings
1214
+ models = set(chat_models)
1215
+ matching_models = difflib.get_close_matches(name, models, n=3, cutoff=0.8)
1216
+
1217
+ return sorted(set(matching_models))
1218
+
1219
+
1220
+ def print_matching_models(io, search):
1221
+ matches = fuzzy_match_models(search)
1222
+ if matches:
1223
+ io.tool_output(f'Models which match "{search}":')
1224
+ for model in matches:
1225
+ io.tool_output(f"- {model}")
1226
+ else:
1227
+ io.tool_output(f'No models match "{search}".')
1228
+
1229
+
1230
+ def get_model_settings_as_yaml():
1231
+ from dataclasses import fields
1232
+
1233
+ import yaml
1234
+
1235
+ model_settings_list = []
1236
+ # Add default settings first with all field values
1237
+ defaults = {}
1238
+ for field in fields(ModelSettings):
1239
+ defaults[field.name] = field.default
1240
+ defaults["name"] = "(default values)"
1241
+ model_settings_list.append(defaults)
1242
+
1243
+ # Sort model settings by name
1244
+ for ms in sorted(MODEL_SETTINGS, key=lambda x: x.name):
1245
+ # Create dict with explicit field order
1246
+ model_settings_dict = {}
1247
+ for field in fields(ModelSettings):
1248
+ value = getattr(ms, field.name)
1249
+ if value != field.default:
1250
+ model_settings_dict[field.name] = value
1251
+ model_settings_list.append(model_settings_dict)
1252
+ # Add blank line between entries
1253
+ model_settings_list.append(None)
1254
+
1255
+ # Filter out None values before dumping
1256
+ yaml_str = yaml.dump(
1257
+ [ms for ms in model_settings_list if ms is not None],
1258
+ default_flow_style=False,
1259
+ sort_keys=False, # Preserve field order from dataclass
1260
+ )
1261
+ # Add actual blank lines between entries
1262
+ return yaml_str.replace("\n- ", "\n\n- ")
1263
+
1264
+
1265
+ def main():
1266
+ if len(sys.argv) < 2:
1267
+ print("Usage: python models.py <model_name> or python models.py --yaml")
1268
+ sys.exit(1)
1269
+
1270
+ if sys.argv[1] == "--yaml":
1271
+ yaml_string = get_model_settings_as_yaml()
1272
+ print(yaml_string)
1273
+ else:
1274
+ model_name = sys.argv[1]
1275
+ matching_models = fuzzy_match_models(model_name)
1276
+
1277
+ if matching_models:
1278
+ print(f"Matching models for '{model_name}':")
1279
+ for model in matching_models:
1280
+ print(model)
1281
+ else:
1282
+ print(f"No matching models found for '{model_name}'.")
1283
+
1284
+
1285
+ if __name__ == "__main__":
1286
+ main()