code-muse 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (394) hide show
  1. code_muse/__init__.py +26 -0
  2. code_muse/__main__.py +10 -0
  3. code_muse/agents/__init__.py +31 -0
  4. code_muse/agents/_builder.py +214 -0
  5. code_muse/agents/_compaction.py +506 -0
  6. code_muse/agents/_diagnostics.py +171 -0
  7. code_muse/agents/_history.py +382 -0
  8. code_muse/agents/_key_listeners.py +148 -0
  9. code_muse/agents/_non_streaming_render.py +148 -0
  10. code_muse/agents/_runtime.py +596 -0
  11. code_muse/agents/agent_creator_agent.py +603 -0
  12. code_muse/agents/agent_helios.py +47 -0
  13. code_muse/agents/agent_manager.py +740 -0
  14. code_muse/agents/agent_muse.py +78 -0
  15. code_muse/agents/agent_planning.py +44 -0
  16. code_muse/agents/agent_qa_melpomene.py +207 -0
  17. code_muse/agents/base_agent.py +194 -0
  18. code_muse/agents/event_stream_handler.py +361 -0
  19. code_muse/agents/json_agent.py +201 -0
  20. code_muse/agents/prompt_v3.py +521 -0
  21. code_muse/agents/subagent_stream_handler.py +273 -0
  22. code_muse/callbacks.py +941 -0
  23. code_muse/chatgpt_codex_client.py +333 -0
  24. code_muse/claude_cache_client.py +853 -0
  25. code_muse/cli_runner/__init__.py +319 -0
  26. code_muse/cli_runner/args.py +63 -0
  27. code_muse/cli_runner/loop.py +510 -0
  28. code_muse/cli_runner/resume.py +72 -0
  29. code_muse/cli_runner/runner.py +161 -0
  30. code_muse/command_line/__init__.py +1 -0
  31. code_muse/command_line/add_model_menu.py +1331 -0
  32. code_muse/command_line/agent_menu.py +674 -0
  33. code_muse/command_line/attachments.py +397 -0
  34. code_muse/command_line/autosave_menu.py +709 -0
  35. code_muse/command_line/clipboard.py +528 -0
  36. code_muse/command_line/colors_menu.py +530 -0
  37. code_muse/command_line/command_handler.py +262 -0
  38. code_muse/command_line/command_registry.py +150 -0
  39. code_muse/command_line/config_commands.py +711 -0
  40. code_muse/command_line/core_commands.py +740 -0
  41. code_muse/command_line/diff_menu.py +865 -0
  42. code_muse/command_line/file_path_completion.py +73 -0
  43. code_muse/command_line/load_context_completion.py +57 -0
  44. code_muse/command_line/model_picker_completion.py +512 -0
  45. code_muse/command_line/model_settings_menu.py +983 -0
  46. code_muse/command_line/onboarding_slides.py +162 -0
  47. code_muse/command_line/onboarding_wizard.py +337 -0
  48. code_muse/command_line/pagination.py +41 -0
  49. code_muse/command_line/pin_command_completion.py +329 -0
  50. code_muse/command_line/prompt_toolkit_completion.py +886 -0
  51. code_muse/command_line/session_commands.py +304 -0
  52. code_muse/command_line/shell_passthrough.py +145 -0
  53. code_muse/command_line/skills_completion.py +158 -0
  54. code_muse/command_line/types.py +18 -0
  55. code_muse/command_line/uc_menu.py +908 -0
  56. code_muse/command_line/utils.py +105 -0
  57. code_muse/command_line/wiggum_state.py +77 -0
  58. code_muse/config.py +1138 -0
  59. code_muse/config_agent.py +168 -0
  60. code_muse/config_appearance.py +241 -0
  61. code_muse/config_model.py +357 -0
  62. code_muse/config_security.py +73 -0
  63. code_muse/error_logging.py +132 -0
  64. code_muse/evals/__init__.py +35 -0
  65. code_muse/evals/eval_helpers.py +81 -0
  66. code_muse/evals/eval_runner.py +299 -0
  67. code_muse/evals/sample_evals/__init__.py +1 -0
  68. code_muse/evals/sample_evals/eval_frugal_reads.py +59 -0
  69. code_muse/evals/sample_evals/eval_memory_planning.py +31 -0
  70. code_muse/evals/sample_evals/eval_shell_efficiency.py +39 -0
  71. code_muse/evals/sample_evals/eval_tool_masking.py +33 -0
  72. code_muse/fs_scan_cache/__init__.py +31 -0
  73. code_muse/fs_scan_cache/invalidation_hooks.py +89 -0
  74. code_muse/fs_scan_cache/scan_cache_core.cpython-314-darwin.so +0 -0
  75. code_muse/fs_scan_cache/scan_cache_core.pyx +203 -0
  76. code_muse/fs_scan_cache/tool_integration.py +309 -0
  77. code_muse/fs_scan_cache/ttl_policy.py +44 -0
  78. code_muse/gemini_code_assist.py +383 -0
  79. code_muse/gemini_model.py +838 -0
  80. code_muse/hook_engine/README.md +105 -0
  81. code_muse/hook_engine/__init__.py +21 -0
  82. code_muse/hook_engine/aliases.py +153 -0
  83. code_muse/hook_engine/engine.py +221 -0
  84. code_muse/hook_engine/executor.py +347 -0
  85. code_muse/hook_engine/matcher.py +154 -0
  86. code_muse/hook_engine/models.py +245 -0
  87. code_muse/hook_engine/registry.py +114 -0
  88. code_muse/hook_engine/trust.py +268 -0
  89. code_muse/hook_engine/validator.py +144 -0
  90. code_muse/http_utils.py +360 -0
  91. code_muse/keymap.py +128 -0
  92. code_muse/list_filtering.py +26 -0
  93. code_muse/main.py +10 -0
  94. code_muse/messaging/__init__.py +259 -0
  95. code_muse/messaging/bus.py +621 -0
  96. code_muse/messaging/commands.py +166 -0
  97. code_muse/messaging/markdown_patches.py +57 -0
  98. code_muse/messaging/message_queue.py +397 -0
  99. code_muse/messaging/messages.py +591 -0
  100. code_muse/messaging/queue_console.py +269 -0
  101. code_muse/messaging/renderers.py +308 -0
  102. code_muse/messaging/rich_renderer.py +1158 -0
  103. code_muse/messaging/shimmer.py +154 -0
  104. code_muse/messaging/spinner/__init__.py +87 -0
  105. code_muse/messaging/spinner/console_spinner.py +250 -0
  106. code_muse/messaging/spinner/spinner_base.py +82 -0
  107. code_muse/messaging/subagent_console.py +458 -0
  108. code_muse/model_factory.py +1203 -0
  109. code_muse/model_switching.py +59 -0
  110. code_muse/model_utils.py +156 -0
  111. code_muse/models.json +66 -0
  112. code_muse/models_cache/__init__.py +26 -0
  113. code_muse/models_cache/blocking_lru_cache.py +98 -0
  114. code_muse/models_cache/cache_writer.py +86 -0
  115. code_muse/models_cache/sha256_hash.cpython-314-darwin.so +0 -0
  116. code_muse/models_cache/sha256_hash.pyx +34 -0
  117. code_muse/models_cache/startup_integration.py +75 -0
  118. code_muse/models_dev_api.json +1 -0
  119. code_muse/models_dev_parser.py +590 -0
  120. code_muse/motion.py +126 -0
  121. code_muse/plugins/__init__.py +471 -0
  122. code_muse/plugins/agent_skills/__init__.py +32 -0
  123. code_muse/plugins/agent_skills/config.py +176 -0
  124. code_muse/plugins/agent_skills/discovery.py +309 -0
  125. code_muse/plugins/agent_skills/downloader.py +389 -0
  126. code_muse/plugins/agent_skills/installer.py +19 -0
  127. code_muse/plugins/agent_skills/metadata.py +293 -0
  128. code_muse/plugins/agent_skills/prompt_builder.py +66 -0
  129. code_muse/plugins/agent_skills/register_callbacks.py +298 -0
  130. code_muse/plugins/agent_skills/remote_catalog.py +320 -0
  131. code_muse/plugins/agent_skills/skill_catalog.py +254 -0
  132. code_muse/plugins/agent_skills/skills_install_menu.py +690 -0
  133. code_muse/plugins/agent_skills/skills_menu.py +791 -0
  134. code_muse/plugins/autonomous_memory/__init__.py +39 -0
  135. code_muse/plugins/autonomous_memory/bm25_scorer.cpython-314-darwin.so +0 -0
  136. code_muse/plugins/autonomous_memory/bm25_scorer.cpython-314-x86_64-linux-gnu.so +0 -0
  137. code_muse/plugins/autonomous_memory/bm25_scorer.pyx +291 -0
  138. code_muse/plugins/autonomous_memory/consolidation.py +82 -0
  139. code_muse/plugins/autonomous_memory/extraction.py +382 -0
  140. code_muse/plugins/autonomous_memory/lease_lock.py +105 -0
  141. code_muse/plugins/autonomous_memory/memory_injection.py +59 -0
  142. code_muse/plugins/autonomous_memory/register_callbacks.py +268 -0
  143. code_muse/plugins/autonomous_memory/secret_scanner.py +62 -0
  144. code_muse/plugins/autonomous_memory/session_scanner.py +163 -0
  145. code_muse/plugins/aws_bedrock/__init__.py +14 -0
  146. code_muse/plugins/aws_bedrock/config.py +99 -0
  147. code_muse/plugins/aws_bedrock/register_callbacks.py +241 -0
  148. code_muse/plugins/aws_bedrock/utils.py +153 -0
  149. code_muse/plugins/azure_foundry/README.md +238 -0
  150. code_muse/plugins/azure_foundry/__init__.py +15 -0
  151. code_muse/plugins/azure_foundry/config.py +125 -0
  152. code_muse/plugins/azure_foundry/discovery.py +187 -0
  153. code_muse/plugins/azure_foundry/register_callbacks.py +495 -0
  154. code_muse/plugins/azure_foundry/token.py +180 -0
  155. code_muse/plugins/azure_foundry/utils.py +345 -0
  156. code_muse/plugins/build_filter/__init__.py +1 -0
  157. code_muse/plugins/build_filter/register_callbacks.py +201 -0
  158. code_muse/plugins/build_filter/strategies/__init__.py +1 -0
  159. code_muse/plugins/build_filter/strategies/build.py +397 -0
  160. code_muse/plugins/chatgpt_oauth/__init__.py +6 -0
  161. code_muse/plugins/chatgpt_oauth/config.py +52 -0
  162. code_muse/plugins/chatgpt_oauth/oauth_flow.py +338 -0
  163. code_muse/plugins/chatgpt_oauth/register_callbacks.py +172 -0
  164. code_muse/plugins/chatgpt_oauth/test_plugin.py +301 -0
  165. code_muse/plugins/chatgpt_oauth/utils.py +538 -0
  166. code_muse/plugins/checkpointing/__init__.py +29 -0
  167. code_muse/plugins/checkpointing/checkpoint_hook.py +51 -0
  168. code_muse/plugins/checkpointing/conversation_snapshots.py +117 -0
  169. code_muse/plugins/checkpointing/register_callbacks.py +51 -0
  170. code_muse/plugins/checkpointing/restore_command.py +263 -0
  171. code_muse/plugins/checkpointing/rewind_shortcut.py +88 -0
  172. code_muse/plugins/checkpointing/shadow_git.py +90 -0
  173. code_muse/plugins/claude_code_hooks/__init__.py +1 -0
  174. code_muse/plugins/claude_code_hooks/config.py +188 -0
  175. code_muse/plugins/claude_code_hooks/register_callbacks.py +208 -0
  176. code_muse/plugins/claude_code_oauth/README.md +167 -0
  177. code_muse/plugins/claude_code_oauth/SETUP.md +93 -0
  178. code_muse/plugins/claude_code_oauth/__init__.py +25 -0
  179. code_muse/plugins/claude_code_oauth/config.py +52 -0
  180. code_muse/plugins/claude_code_oauth/fast_mode.py +124 -0
  181. code_muse/plugins/claude_code_oauth/prompt_handler.py +63 -0
  182. code_muse/plugins/claude_code_oauth/register_callbacks.py +547 -0
  183. code_muse/plugins/claude_code_oauth/test_fast_mode.py +165 -0
  184. code_muse/plugins/claude_code_oauth/test_plugin.py +283 -0
  185. code_muse/plugins/claude_code_oauth/token_refresh_heartbeat.py +237 -0
  186. code_muse/plugins/claude_code_oauth/utils.py +664 -0
  187. code_muse/plugins/copilot_auth/__init__.py +11 -0
  188. code_muse/plugins/copilot_auth/config.py +91 -0
  189. code_muse/plugins/copilot_auth/reasoning_client.py +409 -0
  190. code_muse/plugins/copilot_auth/register_callbacks.py +461 -0
  191. code_muse/plugins/copilot_auth/utils.py +584 -0
  192. code_muse/plugins/custom_commands/__init__.py +14 -0
  193. code_muse/plugins/custom_commands/args_injection.py +82 -0
  194. code_muse/plugins/custom_commands/command_discovery.py +89 -0
  195. code_muse/plugins/custom_commands/command_toml_schema.py +71 -0
  196. code_muse/plugins/custom_commands/register_callbacks.py +176 -0
  197. code_muse/plugins/customizable_commands/__init__.py +0 -0
  198. code_muse/plugins/customizable_commands/register_callbacks.py +136 -0
  199. code_muse/plugins/destructive_command_guard/__init__.py +14 -0
  200. code_muse/plugins/destructive_command_guard/detector.py +375 -0
  201. code_muse/plugins/destructive_command_guard/register_callbacks.py +148 -0
  202. code_muse/plugins/example_custom_command/README.md +280 -0
  203. code_muse/plugins/example_custom_command/register_callbacks.py +51 -0
  204. code_muse/plugins/file_permission_handler/__init__.py +4 -0
  205. code_muse/plugins/file_permission_handler/register_callbacks.py +441 -0
  206. code_muse/plugins/filter_engine/__init__.py +30 -0
  207. code_muse/plugins/filter_engine/classifier.py +153 -0
  208. code_muse/plugins/filter_engine/content_detector.py +184 -0
  209. code_muse/plugins/filter_engine/dispatcher.py +244 -0
  210. code_muse/plugins/filter_engine/register_callbacks.py +188 -0
  211. code_muse/plugins/filter_engine/registry.py +279 -0
  212. code_muse/plugins/filter_engine/strategies/__init__.py +8 -0
  213. code_muse/plugins/filter_engine/strategies/ast_compressor.cpython-314-darwin.so +0 -0
  214. code_muse/plugins/filter_engine/strategies/ast_compressor.cpython-314-x86_64-linux-gnu.so +0 -0
  215. code_muse/plugins/filter_engine/strategies/ast_compressor.pyx +348 -0
  216. code_muse/plugins/filter_engine/strategies/ast_parser.py +167 -0
  217. code_muse/plugins/filter_engine/strategies/code.cpython-314-darwin.so +0 -0
  218. code_muse/plugins/filter_engine/strategies/code.cpython-314-x86_64-linux-gnu.so +0 -0
  219. code_muse/plugins/filter_engine/strategies/code.pyx +584 -0
  220. code_muse/plugins/filter_engine/strategies/git.cpython-314-darwin.so +0 -0
  221. code_muse/plugins/filter_engine/strategies/git.cpython-314-x86_64-linux-gnu.so +0 -0
  222. code_muse/plugins/filter_engine/strategies/git.pyx +438 -0
  223. code_muse/plugins/filter_engine/strategies/json_compressor.cpython-314-darwin.so +0 -0
  224. code_muse/plugins/filter_engine/strategies/json_compressor.pyx +253 -0
  225. code_muse/plugins/filter_engine/strategies/json_patterns.cpython-314-darwin.so +0 -0
  226. code_muse/plugins/filter_engine/strategies/json_patterns.pyx +178 -0
  227. code_muse/plugins/filter_engine/strategies/lint.cpython-314-darwin.so +0 -0
  228. code_muse/plugins/filter_engine/strategies/lint.cpython-314-x86_64-linux-gnu.so +0 -0
  229. code_muse/plugins/filter_engine/strategies/lint.pyx +626 -0
  230. code_muse/plugins/filter_engine/strategies/test.cpython-314-darwin.so +0 -0
  231. code_muse/plugins/filter_engine/strategies/test.cpython-314-x86_64-linux-gnu.so +0 -0
  232. code_muse/plugins/filter_engine/strategies/test.pyx +431 -0
  233. code_muse/plugins/filter_engine/verbosity.py +63 -0
  234. code_muse/plugins/force_push_guard/__init__.py +5 -0
  235. code_muse/plugins/force_push_guard/detector.py +96 -0
  236. code_muse/plugins/force_push_guard/register_callbacks.py +144 -0
  237. code_muse/plugins/force_push_guard/test_detector.py +143 -0
  238. code_muse/plugins/frontend_emitter/__init__.py +25 -0
  239. code_muse/plugins/frontend_emitter/emitter.py +121 -0
  240. code_muse/plugins/frontend_emitter/register_callbacks.py +259 -0
  241. code_muse/plugins/gac/__init__.py +4 -0
  242. code_muse/plugins/gac/git_ops.py +136 -0
  243. code_muse/plugins/gac/prompt.py +191 -0
  244. code_muse/plugins/gac/register_callbacks.py +82 -0
  245. code_muse/plugins/hook_creator/__init__.py +1 -0
  246. code_muse/plugins/hook_creator/register_callbacks.py +34 -0
  247. code_muse/plugins/hook_manager/__init__.py +1 -0
  248. code_muse/plugins/hook_manager/config.py +289 -0
  249. code_muse/plugins/hook_manager/hooks_menu.py +563 -0
  250. code_muse/plugins/hook_manager/register_callbacks.py +227 -0
  251. code_muse/plugins/hook_monitor/register_callbacks.py +36 -0
  252. code_muse/plugins/mindpack/__init__.py +0 -0
  253. code_muse/plugins/mindpack/factory.py +930 -0
  254. code_muse/plugins/mindpack/judge.py +573 -0
  255. code_muse/plugins/mindpack/memory.py +100 -0
  256. code_muse/plugins/mindpack/mindpack_menu.py +1552 -0
  257. code_muse/plugins/mindpack/orchestration.py +605 -0
  258. code_muse/plugins/mindpack/register_callbacks.py +175 -0
  259. code_muse/plugins/mindpack/schemas.py +358 -0
  260. code_muse/plugins/mindpack/tools.py +387 -0
  261. code_muse/plugins/oauth_muse_html.py +226 -0
  262. code_muse/plugins/ollama_setup/__init__.py +5 -0
  263. code_muse/plugins/ollama_setup/completer.py +36 -0
  264. code_muse/plugins/ollama_setup/register_callbacks.py +410 -0
  265. code_muse/plugins/plan_command/__init__.py +0 -0
  266. code_muse/plugins/plan_command/register_callbacks.py +206 -0
  267. code_muse/plugins/plan_mode/__init__.py +37 -0
  268. code_muse/plugins/plan_mode/mode_cycling.py +40 -0
  269. code_muse/plugins/plan_mode/plan_generation.py +68 -0
  270. code_muse/plugins/plan_mode/plan_hooks.py +74 -0
  271. code_muse/plugins/plan_mode/plan_mode_tools.py +138 -0
  272. code_muse/plugins/plan_mode/register_callbacks.py +121 -0
  273. code_muse/plugins/plugin_trust/register_callbacks.py +140 -0
  274. code_muse/plugins/policy_engine/__init__.py +46 -0
  275. code_muse/plugins/policy_engine/approval_flow_integration.py +59 -0
  276. code_muse/plugins/policy_engine/policy_evaluator.py +75 -0
  277. code_muse/plugins/policy_engine/policy_file_discovery.py +90 -0
  278. code_muse/plugins/policy_engine/policy_toml_schema.py +115 -0
  279. code_muse/plugins/policy_engine/register_callbacks.py +112 -0
  280. code_muse/plugins/pop_command/__init__.py +1 -0
  281. code_muse/plugins/pop_command/register_callbacks.py +189 -0
  282. code_muse/plugins/prompt_newline/__init__.py +13 -0
  283. code_muse/plugins/prompt_newline/config.py +19 -0
  284. code_muse/plugins/prompt_newline/register_callbacks.py +159 -0
  285. code_muse/plugins/safety_status/__init__.py +0 -0
  286. code_muse/plugins/safety_status/register_callbacks.py +113 -0
  287. code_muse/plugins/semantic_compression/__init__.py +6 -0
  288. code_muse/plugins/semantic_compression/compressor.py +295 -0
  289. code_muse/plugins/semantic_compression/config.py +123 -0
  290. code_muse/plugins/semantic_compression/register_callbacks.py +320 -0
  291. code_muse/plugins/shell_minimizer/__init__.py +50 -0
  292. code_muse/plugins/shell_minimizer/builtin_filters.toml +393 -0
  293. code_muse/plugins/shell_minimizer/pipeline.py +556 -0
  294. code_muse/plugins/shell_minimizer/primitives.py +482 -0
  295. code_muse/plugins/shell_minimizer/register_callbacks.py +276 -0
  296. code_muse/plugins/shell_safety/__init__.py +6 -0
  297. code_muse/plugins/shell_safety/agent_shell_safety.py +69 -0
  298. code_muse/plugins/shell_safety/command_cache.py +149 -0
  299. code_muse/plugins/shell_safety/register_callbacks.py +202 -0
  300. code_muse/plugins/synthetic_status/__init__.py +1 -0
  301. code_muse/plugins/synthetic_status/register_callbacks.py +128 -0
  302. code_muse/plugins/synthetic_status/status_api.py +145 -0
  303. code_muse/plugins/token_caching/__init__.py +21 -0
  304. code_muse/plugins/token_caching/cache_hit_tracking.py +128 -0
  305. code_muse/plugins/token_caching/cacheable_prefix_detection.py +28 -0
  306. code_muse/plugins/token_caching/register_callbacks.py +54 -0
  307. code_muse/plugins/token_caching/stats_display.py +35 -0
  308. code_muse/plugins/token_tracking/__init__.py +26 -0
  309. code_muse/plugins/token_tracking/database.py +381 -0
  310. code_muse/plugins/token_tracking/edit_analyzer.py +97 -0
  311. code_muse/plugins/token_tracking/record.py +55 -0
  312. code_muse/plugins/token_tracking/register_callbacks.py +277 -0
  313. code_muse/plugins/token_tracking/reports.py +329 -0
  314. code_muse/plugins/universal_constructor/__init__.py +13 -0
  315. code_muse/plugins/universal_constructor/models.py +136 -0
  316. code_muse/plugins/universal_constructor/register_callbacks.py +47 -0
  317. code_muse/plugins/universal_constructor/registry.py +390 -0
  318. code_muse/plugins/universal_constructor/runner.py +474 -0
  319. code_muse/plugins/universal_constructor/safety.py +440 -0
  320. code_muse/plugins/universal_constructor/sandbox.py +584 -0
  321. code_muse/provider_identity.py +105 -0
  322. code_muse/pydantic_patches.py +410 -0
  323. code_muse/reopenable_async_client.py +233 -0
  324. code_muse/round_robin_model.py +151 -0
  325. code_muse/secret_storage.py +74 -0
  326. code_muse/security/__init__.py +1 -0
  327. code_muse/security/redaction.cpython-314-darwin.so +0 -0
  328. code_muse/security/redaction.cpython-314-x86_64-linux-gnu.so +0 -0
  329. code_muse/security/redaction.pyx +135 -0
  330. code_muse/session_storage.py +565 -0
  331. code_muse/status_display.py +261 -0
  332. code_muse/stream_parser/__init__.py +76 -0
  333. code_muse/stream_parser/assistant_text_parser.py +90 -0
  334. code_muse/stream_parser/citation_parser.py +76 -0
  335. code_muse/stream_parser/inline_hidden_tag_parser.py +236 -0
  336. code_muse/stream_parser/proposed_plan_parser.py +158 -0
  337. code_muse/stream_parser/stream_text_chunk.py +23 -0
  338. code_muse/stream_parser/stream_text_parser.py +27 -0
  339. code_muse/stream_parser/tagged_line_parser.cpython-314-darwin.so +0 -0
  340. code_muse/stream_parser/tagged_line_parser.pyx +251 -0
  341. code_muse/stream_parser/utf8_stream_parser.cpython-314-darwin.so +0 -0
  342. code_muse/stream_parser/utf8_stream_parser.pyx +206 -0
  343. code_muse/summarization_agent.py +308 -0
  344. code_muse/terminal_utils.cpython-314-darwin.so +0 -0
  345. code_muse/terminal_utils.cpython-314-x86_64-linux-gnu.so +0 -0
  346. code_muse/terminal_utils.pyx +483 -0
  347. code_muse/tools/__init__.py +459 -0
  348. code_muse/tools/agent_tools.py +613 -0
  349. code_muse/tools/ask_user_question/__init__.py +26 -0
  350. code_muse/tools/ask_user_question/constants.py +73 -0
  351. code_muse/tools/ask_user_question/demo_tui.py +55 -0
  352. code_muse/tools/ask_user_question/handler.py +232 -0
  353. code_muse/tools/ask_user_question/models.py +302 -0
  354. code_muse/tools/ask_user_question/registration.py +37 -0
  355. code_muse/tools/ask_user_question/renderers.py +336 -0
  356. code_muse/tools/ask_user_question/terminal_ui.py +327 -0
  357. code_muse/tools/ask_user_question/theme.py +156 -0
  358. code_muse/tools/ask_user_question/tui_loop.py +422 -0
  359. code_muse/tools/background_jobs.py +99 -0
  360. code_muse/tools/browser/__init__.py +37 -0
  361. code_muse/tools/browser/browser_control.py +289 -0
  362. code_muse/tools/browser/browser_interactions.py +545 -0
  363. code_muse/tools/browser/browser_locators.py +640 -0
  364. code_muse/tools/browser/browser_manager.py +376 -0
  365. code_muse/tools/browser/browser_navigation.py +251 -0
  366. code_muse/tools/browser/browser_screenshot.py +180 -0
  367. code_muse/tools/browser/browser_scripts.py +462 -0
  368. code_muse/tools/browser/browser_workflows.py +222 -0
  369. code_muse/tools/chrome_cdp/__init__.py +1070 -0
  370. code_muse/tools/chrome_cdp/register_callbacks.py +61 -0
  371. code_muse/tools/command_runner.py +1401 -0
  372. code_muse/tools/common.py +1407 -0
  373. code_muse/tools/display.py +87 -0
  374. code_muse/tools/file_modifications.py +1099 -0
  375. code_muse/tools/file_operations.py +860 -0
  376. code_muse/tools/image_tools.py +185 -0
  377. code_muse/tools/meetin_proxy/__init__.py +243 -0
  378. code_muse/tools/meetin_proxy/capture_addon.py +82 -0
  379. code_muse/tools/meetin_proxy/proxy_manager.py +326 -0
  380. code_muse/tools/meetin_proxy/register_callbacks.py +45 -0
  381. code_muse/tools/path_policy.py +219 -0
  382. code_muse/tools/skills_tools.py +586 -0
  383. code_muse/tools/subagent_context.py +158 -0
  384. code_muse/tools/tools_content.py +50 -0
  385. code_muse/tools/universal_constructor.py +965 -0
  386. code_muse/uvx_detection.py +241 -0
  387. code_muse/version_checker.py +86 -0
  388. code_muse-0.0.1.data/data/code_muse/models.json +66 -0
  389. code_muse-0.0.1.data/data/code_muse/models_dev_api.json +1 -0
  390. code_muse-0.0.1.dist-info/METADATA +845 -0
  391. code_muse-0.0.1.dist-info/RECORD +394 -0
  392. code_muse-0.0.1.dist-info/WHEEL +4 -0
  393. code_muse-0.0.1.dist-info/entry_points.txt +2 -0
  394. code_muse-0.0.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1203 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ import pathlib
5
+ from typing import Any
6
+
7
+ import httpx
8
+ from anthropic import AsyncAnthropic
9
+ from openai import AsyncAzureOpenAI
10
+ from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings
11
+ from pydantic_ai.models.openai import (
12
+ OpenAIChatModel,
13
+ OpenAIChatModelSettings,
14
+ OpenAIResponsesModel,
15
+ OpenAIResponsesModelSettings,
16
+ )
17
+ from pydantic_ai.profiles import ModelProfile
18
+ from pydantic_ai.providers.cerebras import CerebrasProvider
19
+ from pydantic_ai.providers.openrouter import OpenRouterProvider
20
+ from pydantic_ai.settings import ModelSettings
21
+
22
+ from code_muse.gemini_model import GeminiModel
23
+ from code_muse.messaging import emit_warning
24
+
25
+ from . import callbacks
26
+ from .claude_cache_client import ClaudeCacheAsyncClient, patch_anthropic_client_messages
27
+ from .config import EXTRA_MODELS_FILE, MODELS_FILE, get_value
28
+ from .http_utils import create_async_client, get_cert_bundle_path, get_http2
29
+ from .provider_identity import (
30
+ make_anthropic_provider,
31
+ make_openai_provider,
32
+ resolve_provider_identity,
33
+ )
34
+ from .round_robin_model import RoundRobinModel
35
+
36
+ logger = logging.getLogger(__name__)
37
+
38
+ # Registry for custom model provider classes from plugins
39
+ _CUSTOM_MODEL_PROVIDERS: dict[str, type] = {}
40
+
41
+ # ---------------------------------------------------------------------------
42
+ # PERF-06: Mtime-based config cache to avoid re-reading JSON files on every
43
+ # ModelFactory.load_config() call. Mirrors the pattern already used in
44
+ # summarization_agent.get_cached_models_config(). Invalidation: any source
45
+ # file's mtime changes, or invalidate_models_config_cache() is called
46
+ # explicitly (e.g. after /set commands).
47
+ # ---------------------------------------------------------------------------
48
+ import hashlib as _hashlib # noqa: E402
49
+ import threading as _threading # noqa: E402
50
+
51
+ _models_config_cache: tuple[dict[str, Any] | None, tuple[float, str] | None] = (
52
+ None,
53
+ None,
54
+ )
55
+ # FREE-THREADED: _models_config_lock guards sync-only cache access.
56
+ # All callers are sync; keep as threading.Lock.
57
+ _models_config_lock = _threading.Lock()
58
+
59
+
60
+ def _models_config_fingerprint() -> tuple[float, str]:
61
+ """Compute a lightweight fingerprint of all model config source files.
62
+
63
+ Returns (max_mtime, content_hash) — if either changes, the cached
64
+ config is stale and must be reloaded.
65
+ """
66
+ source_paths: list[pathlib.Path] = []
67
+
68
+ bundled = pathlib.Path(__file__).parent / "models.json"
69
+ source_paths.append(bundled)
70
+
71
+ try:
72
+ from code_muse.config import (
73
+ CHATGPT_MODELS_FILE,
74
+ CLAUDE_MODELS_FILE,
75
+ COPILOT_MODELS_FILE,
76
+ EXTRA_MODELS_FILE,
77
+ GEMINI_MODELS_FILE,
78
+ MODELS_FILE,
79
+ )
80
+
81
+ for p in (
82
+ MODELS_FILE,
83
+ EXTRA_MODELS_FILE,
84
+ CHATGPT_MODELS_FILE,
85
+ CLAUDE_MODELS_FILE,
86
+ GEMINI_MODELS_FILE,
87
+ COPILOT_MODELS_FILE,
88
+ ):
89
+ source_paths.append(pathlib.Path(p))
90
+ except Exception:
91
+ pass
92
+
93
+ max_mtime = 0.0
94
+ hasher = _hashlib.md5(usedforsecurity=False)
95
+ for sp in source_paths:
96
+ try:
97
+ if sp.exists():
98
+ stat = sp.stat()
99
+ mtime = stat.st_mtime
100
+ if isinstance(mtime, (int, float)):
101
+ max_mtime = max(max_mtime, mtime)
102
+ hasher.update(f"{sp}:{stat.st_size}:{mtime}".encode())
103
+ else:
104
+ # Mocked stat objects in tests — force cache miss
105
+ max_mtime = float("inf")
106
+ except OSError:
107
+ pass
108
+
109
+ return max_mtime, hasher.hexdigest()
110
+
111
+
112
+ def invalidate_models_config_cache() -> None:
113
+ """Force the next ``ModelFactory.load_config()`` call to reload from disk.
114
+
115
+ Call this when settings or model files are known to have changed
116
+ (e.g. after a ``/set`` command that modifies model config).
117
+ """
118
+ global _models_config_cache
119
+ with _models_config_lock:
120
+ _models_config_cache = (None, None)
121
+
122
+
123
+ def _load_plugin_model_providers():
124
+ """Load custom model providers from plugins."""
125
+ global _CUSTOM_MODEL_PROVIDERS
126
+ try:
127
+ from code_muse.callbacks import on_register_model_providers
128
+
129
+ results = on_register_model_providers()
130
+ for result in results:
131
+ if isinstance(result, dict):
132
+ _CUSTOM_MODEL_PROVIDERS.update(result)
133
+ except Exception as e:
134
+ logger.warning("Failed to load plugin model providers: %s", e)
135
+
136
+
137
+ # Load plugin model providers at module initialization
138
+ _load_plugin_model_providers()
139
+
140
+
141
+ # Anthropic beta header required for 1M context window support.
142
+ CONTEXT_1M_BETA = "context-1m-2025-08-07"
143
+
144
+
145
+ def _build_anthropic_beta_header(
146
+ model_config: dict,
147
+ *,
148
+ interleaved_thinking: bool = False,
149
+ ) -> str | None:
150
+ """Build the anthropic-beta header value for an Anthropic model.
151
+
152
+ Combines beta flags based on model capabilities:
153
+ - interleaved-thinking-2025-05-14 (when interleaved_thinking is enabled)
154
+ - context-1m-2025-08-07 (when context_length >= 1_000_000)
155
+
156
+ Returns None if no beta flags are needed.
157
+ """
158
+ parts: list[str] = []
159
+ if interleaved_thinking:
160
+ parts.append("interleaved-thinking-2025-05-14")
161
+ if model_config.get("context_length", 0) >= 1_000_000:
162
+ parts.append(CONTEXT_1M_BETA)
163
+ return ",".join(parts) if parts else None
164
+
165
+
166
+ def get_api_key(env_var_name: str) -> str | None:
167
+ """Get an API key from config first, then fall back to environment variable.
168
+
169
+ This allows users to set API keys via `/set KIMI_API_KEY=xxx` in addition to
170
+ setting them as environment variables.
171
+
172
+ Args:
173
+ env_var_name: The name of the environment variable (e.g., "OPENAI_API_KEY")
174
+
175
+ Returns:
176
+ The API key value, or None if not found in either config or environment.
177
+ """
178
+ # First check config (case-insensitive key lookup)
179
+ config_value = get_value(env_var_name.lower())
180
+ if config_value:
181
+ return config_value
182
+
183
+ # Fall back to environment variable
184
+ return os.environ.get(env_var_name)
185
+
186
+
187
+ # Model types that use the Anthropic Messages API under the hood.
188
+ # These all need Anthropic-specific settings (thinking, effort, etc.).
189
+ _ANTHROPIC_MODEL_TYPES = frozenset(
190
+ {"anthropic", "aws_bedrock", "azure_foundry", "claude_code"}
191
+ )
192
+
193
+
194
+ def _is_anthropic_model(model_name: str, model_config: dict[str, Any]) -> bool:
195
+ """Check if a model uses the Anthropic API (by name prefix or config type)."""
196
+ if model_name.startswith("claude-") or model_name.startswith("anthropic-"):
197
+ return True
198
+ return model_config.get("type") in _ANTHROPIC_MODEL_TYPES
199
+
200
+
201
+ def make_model_settings(
202
+ model_name: str, max_tokens: int | None = None
203
+ ) -> ModelSettings:
204
+ """Create appropriate ModelSettings for a given model.
205
+
206
+ This handles model-specific settings:
207
+ - GPT-5 models: reasoning_effort and verbosity (non-codex only)
208
+ - Claude/Anthropic models: extended_thinking and budget_tokens
209
+ - Automatic max_tokens calculation based on model context length
210
+
211
+ Args:
212
+ model_name: The name of the model to create settings for.
213
+ max_tokens: Optional max tokens limit. If None, automatically calculated
214
+ as: max(2048, min(15% of context_length, 65536))
215
+
216
+ Returns:
217
+ Appropriate ModelSettings subclass instance for the model.
218
+ """
219
+ from code_muse.config import (
220
+ get_effective_model_settings,
221
+ get_openai_reasoning_effort,
222
+ get_openai_reasoning_summary,
223
+ get_openai_verbosity,
224
+ model_supports_setting,
225
+ )
226
+
227
+ model_settings_dict: dict = {}
228
+
229
+ # Calculate max_tokens if not explicitly provided
230
+ model_config: dict[str, Any] = {}
231
+ if max_tokens is None:
232
+ # Load model config to get context length
233
+ try:
234
+ models_config = ModelFactory.load_config()
235
+ model_config = models_config.get(model_name, {})
236
+ context_length = model_config.get("context_length", 128000)
237
+ except Exception:
238
+ # Fallback if config loading fails (e.g., in CI environments)
239
+ context_length = 128000
240
+ # min 2048, 15% of context, max 65536
241
+ max_tokens = max(2048, min(int(0.15 * context_length), 65536))
242
+ elif not model_config:
243
+ try:
244
+ model_config = ModelFactory.load_config().get(model_name, {})
245
+ except Exception:
246
+ model_config = {}
247
+
248
+ model_settings_dict["max_tokens"] = max_tokens
249
+ effective_settings = get_effective_model_settings(model_name)
250
+ model_settings_dict.update(effective_settings)
251
+
252
+ # Parallel tool calls are always enabled. Read-only tools (file reads,
253
+ # greps, list_files, etc.) are safe to run concurrently without user
254
+ # review. Destructive tools still go through the approval loop before
255
+ # execution, and the CLI user can cancel individual calls.
256
+
257
+ # Default to clear_thinking=False for GLM-4.7 and GLM-5 models (preserved thinking)
258
+ if "glm-4.7" in model_name.lower() or "glm-5" in model_name.lower():
259
+ clear_thinking = effective_settings.get("clear_thinking", False)
260
+ model_settings_dict["thinking"] = {
261
+ "type": "enabled",
262
+ "clear_thinking": clear_thinking,
263
+ }
264
+
265
+ model_settings: ModelSettings = ModelSettings(**model_settings_dict)
266
+
267
+ # Copilot models use OpenAI-compatible format even for Claude backends.
268
+ # Claude thinking translates to reasoning_effort; GPT models get the
269
+ # standard OpenAI reasoning settings.
270
+ model_type = model_config.get("type")
271
+ is_copilot = model_type == "copilot"
272
+ copilot_underlying = model_config.get("name", "").lower() if is_copilot else ""
273
+
274
+ if is_copilot and copilot_underlying.startswith("claude-"):
275
+ # Copilot wraps Claude behind an OpenAI-compatible API.
276
+ # Translate extended_thinking / effort into reasoning_effort.
277
+ from code_muse.model_utils import get_default_extended_thinking
278
+
279
+ default_thinking = get_default_extended_thinking(copilot_underlying)
280
+ extended_thinking = effective_settings.get(
281
+ "extended_thinking", default_thinking
282
+ )
283
+ # Legacy boolean compat
284
+ if extended_thinking is True:
285
+ extended_thinking = "enabled"
286
+ elif extended_thinking is False:
287
+ extended_thinking = "off"
288
+
289
+ if extended_thinking in ("enabled", "adaptive"):
290
+ # Map effort setting to reasoning_effort for the OpenAI format
291
+ effort = effective_settings.get("effort", "high")
292
+ model_settings_dict["openai_reasoning_effort"] = effort
293
+
294
+ # Strip Anthropic-only keys that leaked from effective_settings
295
+ for key in ("extended_thinking", "budget_tokens", "interleaved_thinking"):
296
+ model_settings_dict.pop(key, None)
297
+
298
+ model_settings = OpenAIChatModelSettings(**model_settings_dict)
299
+
300
+ elif is_copilot and (
301
+ copilot_underlying.startswith("gpt-")
302
+ or copilot_underlying.startswith("o3")
303
+ or copilot_underlying.startswith("o4")
304
+ ):
305
+ # Copilot GPT/O-series — the Copilot API currently does NOT
306
+ # support reasoning_effort for GPT models (400 Bad Request).
307
+ # Just use plain OpenAIChatModelSettings without reasoning params.
308
+ model_settings = OpenAIChatModelSettings(**model_settings_dict)
309
+
310
+ elif "gpt-5" in model_name:
311
+ model_settings_dict["openai_reasoning_effort"] = get_openai_reasoning_effort()
312
+
313
+ uses_responses_api = (
314
+ model_type == "chatgpt_oauth"
315
+ or model_type == "azure_foundry_openai"
316
+ or (model_type == "openai" and "codex" in model_name)
317
+ or (model_type == "custom_openai" and "codex" in model_name)
318
+ )
319
+
320
+ if uses_responses_api:
321
+ model_settings_dict["openai_reasoning_summary"] = (
322
+ get_openai_reasoning_summary()
323
+ )
324
+ if "codex" not in model_name:
325
+ model_settings_dict["openai_text_verbosity"] = get_openai_verbosity()
326
+ model_settings = OpenAIResponsesModelSettings(**model_settings_dict)
327
+ else:
328
+ # Chat Completions models don't support configurable reasoning summaries.
329
+ # Keep the old verbosity injection path for non-Responses GPT-5 models.
330
+ if "codex" not in model_name:
331
+ verbosity = get_openai_verbosity()
332
+ model_settings_dict["extra_body"] = {"verbosity": verbosity}
333
+ model_settings = OpenAIChatModelSettings(**model_settings_dict)
334
+ elif _is_anthropic_model(model_name, model_config):
335
+ # Handle Anthropic extended thinking settings
336
+ # Remove top_p as Anthropic doesn't support it with extended thinking
337
+ model_settings_dict.pop("top_p", None)
338
+
339
+ # Claude extended thinking requires temperature=1.0 (API restriction)
340
+ # Default to 1.0 if not explicitly set by user
341
+ if model_settings_dict.get("temperature") is None:
342
+ model_settings_dict["temperature"] = 1.0
343
+
344
+ from code_muse.model_utils import (
345
+ get_default_extended_thinking,
346
+ should_use_anthropic_thinking_summary,
347
+ )
348
+
349
+ actual_model_id = model_config.get("name", model_name)
350
+ default_thinking = get_default_extended_thinking(model_name, actual_model_id)
351
+ extended_thinking = effective_settings.get(
352
+ "extended_thinking", default_thinking
353
+ )
354
+ # Backwards compat: handle legacy boolean values
355
+ if extended_thinking is True:
356
+ extended_thinking = "enabled"
357
+ elif extended_thinking is False:
358
+ extended_thinking = "off"
359
+
360
+ budget_tokens = effective_settings.get("budget_tokens", 10000)
361
+ if extended_thinking in ("enabled", "adaptive"):
362
+ model_settings_dict["anthropic_thinking"] = {
363
+ "type": extended_thinking,
364
+ }
365
+ if (
366
+ extended_thinking == "adaptive"
367
+ and should_use_anthropic_thinking_summary(model_name, actual_model_id)
368
+ ):
369
+ model_settings_dict["anthropic_thinking"]["display"] = "summarized"
370
+ # Only send budget_tokens for classic "enabled" mode
371
+ if extended_thinking == "enabled" and budget_tokens:
372
+ model_settings_dict["anthropic_thinking"]["budget_tokens"] = (
373
+ budget_tokens
374
+ )
375
+
376
+ # Opus 4-6 models support the `effort` setting via output_config.
377
+ # pydantic-ai doesn't have a native field for output_config yet,
378
+ # so we inject it through extra_body which gets merged into the
379
+ # HTTP request body.
380
+ # NOTE: effort/output_config only applies to adaptive thinking.
381
+ # With standard "enabled" thinking, budget_tokens controls depth.
382
+ if (
383
+ model_supports_setting(model_name, "effort")
384
+ and extended_thinking == "adaptive"
385
+ ):
386
+ effort = effective_settings.get(
387
+ "effort", model_config.get("default_effort", "high")
388
+ )
389
+ if "anthropic_thinking" in model_settings_dict:
390
+ extra_body = model_settings_dict.get("extra_body") or {}
391
+ extra_body["output_config"] = {"effort": effort}
392
+ model_settings_dict["extra_body"] = extra_body
393
+
394
+ model_settings = AnthropicModelSettings(**model_settings_dict)
395
+
396
+ # Handle thinking models
397
+ # Check if model supports thinking settings and apply defaults
398
+ if model_supports_setting(model_name, "thinking_level"):
399
+ # Apply defaults if not explicitly set by user
400
+ # Default: thinking_enabled=True, thinking_level="low"
401
+ if "thinking_enabled" not in model_settings_dict:
402
+ model_settings_dict["thinking_enabled"] = True
403
+ if "thinking_level" not in model_settings_dict:
404
+ model_settings_dict["thinking_level"] = "low"
405
+ # Recreate settings with Gemini thinking config
406
+ model_settings = ModelSettings(**model_settings_dict)
407
+
408
+ return model_settings
409
+
410
+
411
+ class ZaiChatModel(OpenAIChatModel):
412
+ def _process_response(self, response):
413
+ response.object = "chat.completion"
414
+ return super()._process_response(response)
415
+
416
+
417
+ def get_custom_config(model_config):
418
+ custom_config = model_config.get("custom_endpoint", {})
419
+ if not custom_config:
420
+ raise ValueError("Custom model requires 'custom_endpoint' configuration")
421
+
422
+ url = custom_config.get("url")
423
+ if not url:
424
+ raise ValueError("Custom endpoint requires 'url' field")
425
+
426
+ headers = {}
427
+ for key, value in custom_config.get("headers", {}).items():
428
+ if value.startswith("$"):
429
+ env_var_name = value[1:]
430
+ resolved_value = get_api_key(env_var_name)
431
+ if resolved_value is None:
432
+ emit_warning(
433
+ f"'{env_var_name}' is not set (check config or environment) for custom endpoint header '{key}'. Proceeding with empty value."
434
+ )
435
+ resolved_value = ""
436
+ value = resolved_value
437
+ elif "$" in value:
438
+ tokens = value.split(" ")
439
+ resolved_values = []
440
+ for token in tokens:
441
+ if token.startswith("$"):
442
+ env_var = token[1:]
443
+ resolved_value = get_api_key(env_var)
444
+ if resolved_value is None:
445
+ emit_warning(
446
+ f"'{env_var}' is not set (check config or environment) for custom endpoint header '{key}'. Proceeding with empty value."
447
+ )
448
+ resolved_values.append("")
449
+ else:
450
+ resolved_values.append(resolved_value)
451
+ else:
452
+ resolved_values.append(token)
453
+ value = " ".join(resolved_values)
454
+ headers[key] = value
455
+ api_key = None
456
+ if "api_key" in custom_config:
457
+ if custom_config["api_key"].startswith("$"):
458
+ env_var_name = custom_config["api_key"][1:]
459
+ api_key = get_api_key(env_var_name)
460
+ if api_key is None:
461
+ emit_warning(
462
+ f"API key '{env_var_name}' is not set (checked config and environment); proceeding without API key."
463
+ )
464
+ else:
465
+ api_key = custom_config["api_key"]
466
+ if "ca_certs_path" in custom_config:
467
+ verify = custom_config["ca_certs_path"]
468
+ else:
469
+ verify = None
470
+
471
+ timeout = model_config.get("timeout", custom_config.get("timeout"))
472
+ if timeout is not None:
473
+ if isinstance(timeout, bool):
474
+ raise ValueError("Custom endpoint timeout must be a number")
475
+ if isinstance(timeout, str):
476
+ try:
477
+ timeout = float(timeout)
478
+ except ValueError as exc:
479
+ raise ValueError("Custom endpoint timeout must be a number") from exc
480
+ if not isinstance(timeout, (int, float)):
481
+ raise ValueError("Custom endpoint timeout must be a number")
482
+ if timeout <= 0:
483
+ raise ValueError("Custom endpoint timeout must be greater than zero")
484
+
485
+ return url, headers, verify, api_key, timeout
486
+
487
+
488
+ class ModelFactory:
489
+ """A factory for creating and managing different AI models."""
490
+
491
+ @staticmethod
492
+ def load_config() -> dict[str, Any]:
493
+ global _models_config_cache
494
+ # PERF-06: Return cached config when source files haven't changed.
495
+ fingerprint = _models_config_fingerprint()
496
+ with _models_config_lock:
497
+ cached_config, cached_fp = _models_config_cache
498
+ if cached_config is not None and cached_fp == fingerprint:
499
+ return cached_config
500
+
501
+ # --- Original loading logic (cache miss) ---
502
+ load_model_config_callbacks = callbacks.get_callbacks("load_model_config")
503
+ if len(load_model_config_callbacks) > 0:
504
+ if len(load_model_config_callbacks) > 1:
505
+ logging.getLogger(__name__).warning(
506
+ "Multiple load_model_config callbacks registered, using the first"
507
+ )
508
+ config = callbacks.on_load_model_config()[0]
509
+ else:
510
+ # Always load from the bundled models.json so upstream
511
+ # updates propagate automatically. User additions belong
512
+ # in extra_models.json (overlay loaded below).
513
+ bundled_models = pathlib.Path(__file__).parent / "models.json"
514
+ with open(bundled_models) as f:
515
+ config = json.load(f)
516
+
517
+ # User-level models.json overrides bundled config
518
+ user_models = pathlib.Path(MODELS_FILE)
519
+ if user_models.exists():
520
+ try:
521
+ with open(user_models) as f:
522
+ config.update(json.load(f))
523
+ except json.JSONDecodeError as exc:
524
+ logging.getLogger(__name__).warning(
525
+ f"Failed to load user models config from {user_models}: Invalid JSON - {exc}"
526
+ )
527
+ except Exception as exc:
528
+ logging.getLogger(__name__).warning(
529
+ f"Failed to load user models config from {user_models}: {exc}"
530
+ )
531
+
532
+ # Import OAuth model file paths from main config
533
+ from code_muse.config import (
534
+ CHATGPT_MODELS_FILE,
535
+ CLAUDE_MODELS_FILE,
536
+ COPILOT_MODELS_FILE,
537
+ GEMINI_MODELS_FILE,
538
+ )
539
+
540
+ # Build list of extra model sources (user models handled above)
541
+ extra_sources: list[tuple[pathlib.Path, str, bool]] = [
542
+ (pathlib.Path(EXTRA_MODELS_FILE), "extra models", False),
543
+ (pathlib.Path(CHATGPT_MODELS_FILE), "ChatGPT OAuth models", False),
544
+ (pathlib.Path(CLAUDE_MODELS_FILE), "Claude Code OAuth models", True),
545
+ (pathlib.Path(GEMINI_MODELS_FILE), "Gemini OAuth models", False),
546
+ (pathlib.Path(COPILOT_MODELS_FILE), "Copilot models", False),
547
+ ]
548
+
549
+ for source_path, label, use_filtered in extra_sources:
550
+ if not source_path.exists():
551
+ continue
552
+ try:
553
+ # Use filtered loading for Claude Code OAuth models to show only latest versions
554
+ if use_filtered:
555
+ try:
556
+ from code_muse.plugins.claude_code_oauth.utils import (
557
+ load_claude_models_filtered,
558
+ )
559
+
560
+ extra_config = load_claude_models_filtered()
561
+ except ImportError:
562
+ # Plugin not available, fall back to standard JSON loading
563
+ logging.getLogger(__name__).debug(
564
+ f"claude_code_oauth plugin not available, loading {label} as plain JSON"
565
+ )
566
+ with open(source_path) as f:
567
+ extra_config = json.load(f)
568
+ else:
569
+ with open(source_path) as f:
570
+ extra_config = json.load(f)
571
+ config.update(extra_config)
572
+ except json.JSONDecodeError as exc:
573
+ logging.getLogger(__name__).warning(
574
+ f"Failed to load {label} config from {source_path}: Invalid JSON - {exc}"
575
+ )
576
+ except Exception as exc:
577
+ logging.getLogger(__name__).warning(
578
+ f"Failed to load {label} config from {source_path}: {exc}"
579
+ )
580
+
581
+ # Let plugins add/override models via load_models_config hook
582
+ try:
583
+ from code_muse.callbacks import on_load_models_config
584
+
585
+ results = on_load_models_config()
586
+ for result in results:
587
+ if isinstance(result, dict):
588
+ config.update(result) # Plugin models override built-in
589
+ except Exception as exc:
590
+ logging.getLogger(__name__).debug(
591
+ f"Failed to load plugin models config: {exc}"
592
+ )
593
+
594
+ # --- End original loading logic ---
595
+
596
+ # Store in cache
597
+ with _models_config_lock:
598
+ _models_config_cache = (config, fingerprint)
599
+ return config
600
+
601
+ @staticmethod
602
+ def get_model(model_name: str, config: dict[str, Any]) -> Any:
603
+ """Returns a configured model instance based on the provided name and config.
604
+
605
+ API key validation happens naturally within each model type's initialization,
606
+ which emits warnings and returns None if keys are missing.
607
+ """
608
+ model_config = config.get(model_name)
609
+ if not model_config:
610
+ raise ValueError(f"Model '{model_name}' not found in configuration.")
611
+
612
+ model_type = model_config.get("type")
613
+ provider_identity = resolve_provider_identity(model_name, model_config)
614
+
615
+ # Check for plugin-registered model provider classes first
616
+ if model_type in _CUSTOM_MODEL_PROVIDERS:
617
+ provider_class = _CUSTOM_MODEL_PROVIDERS[model_type]
618
+ try:
619
+ return provider_class(
620
+ model_name=model_name, model_config=model_config, config=config
621
+ )
622
+ except Exception as e:
623
+ logger.error(f"Custom model provider '{model_type}' failed: {e}")
624
+ return None
625
+
626
+ if model_type == "gemini":
627
+ api_key = get_api_key("GEMINI_API_KEY")
628
+ if not api_key:
629
+ emit_warning(
630
+ f"GEMINI_API_KEY is not set (check config or environment); skipping Gemini model '{model_config.get('name')}'."
631
+ )
632
+ return None
633
+
634
+ model = GeminiModel(model_name=model_config["name"], api_key=api_key)
635
+ return model
636
+
637
+ elif model_type == "openai":
638
+ api_key = get_api_key("OPENAI_API_KEY")
639
+ if not api_key:
640
+ emit_warning(
641
+ f"OPENAI_API_KEY is not set (check config or environment); skipping OpenAI model '{model_config.get('name')}'."
642
+ )
643
+ return None
644
+
645
+ provider = make_openai_provider(provider_identity, api_key=api_key)
646
+ model = OpenAIChatModel(model_name=model_config["name"], provider=provider)
647
+ if "codex" in model_name:
648
+ model = OpenAIResponsesModel(
649
+ model_name=model_config["name"], provider=provider
650
+ )
651
+ return model
652
+
653
+ elif model_type == "anthropic":
654
+ api_key = get_api_key("ANTHROPIC_API_KEY")
655
+ if not api_key:
656
+ emit_warning(
657
+ f"ANTHROPIC_API_KEY is not set (check config or environment); skipping Anthropic model '{model_config.get('name')}'."
658
+ )
659
+ return None
660
+
661
+ # Use the same caching client as claude_code models
662
+ verify = get_cert_bundle_path()
663
+ http2_enabled = get_http2()
664
+
665
+ client = ClaudeCacheAsyncClient(
666
+ verify=verify,
667
+ timeout=180,
668
+ http2=http2_enabled,
669
+ )
670
+
671
+ # Check if interleaved thinking is enabled for this model
672
+ # Only applies to Claude 4 models (Opus 4.5, Opus 4.1, Opus 4, Sonnet 4)
673
+ from code_muse.config import get_effective_model_settings
674
+
675
+ effective_settings = get_effective_model_settings(model_name)
676
+ interleaved_thinking = effective_settings.get("interleaved_thinking", False)
677
+
678
+ beta_header = _build_anthropic_beta_header(
679
+ model_config, interleaved_thinking=interleaved_thinking
680
+ )
681
+ default_headers = {}
682
+ if beta_header:
683
+ default_headers["anthropic-beta"] = beta_header
684
+
685
+ anthropic_client = AsyncAnthropic(
686
+ api_key=api_key,
687
+ http_client=client,
688
+ default_headers=default_headers if default_headers else None,
689
+ )
690
+
691
+ # Ensure cache_control is injected at the Anthropic SDK layer
692
+ patch_anthropic_client_messages(anthropic_client)
693
+
694
+ provider = make_anthropic_provider(
695
+ provider_identity, anthropic_client=anthropic_client
696
+ )
697
+ return AnthropicModel(model_name=model_config["name"], provider=provider)
698
+
699
+ elif model_type == "custom_anthropic":
700
+ url, headers, verify, api_key, timeout = get_custom_config(model_config)
701
+ if not api_key:
702
+ emit_warning(
703
+ f"API key is not set for custom Anthropic endpoint; skipping model '{model_config.get('name')}'."
704
+ )
705
+ return None
706
+
707
+ # Use the same caching client as claude_code models
708
+ if verify is None:
709
+ verify = get_cert_bundle_path()
710
+
711
+ http2_enabled = get_http2()
712
+
713
+ client = ClaudeCacheAsyncClient(
714
+ headers=headers,
715
+ verify=verify,
716
+ timeout=timeout if timeout is not None else 180,
717
+ http2=http2_enabled,
718
+ )
719
+
720
+ # Check if interleaved thinking is enabled for this model
721
+ from code_muse.config import get_effective_model_settings
722
+
723
+ effective_settings = get_effective_model_settings(model_name)
724
+ interleaved_thinking = effective_settings.get("interleaved_thinking", False)
725
+
726
+ beta_header = _build_anthropic_beta_header(
727
+ model_config, interleaved_thinking=interleaved_thinking
728
+ )
729
+ default_headers = {}
730
+ if beta_header:
731
+ default_headers["anthropic-beta"] = beta_header
732
+
733
+ anthropic_client = AsyncAnthropic(
734
+ base_url=url,
735
+ http_client=client,
736
+ api_key=api_key,
737
+ default_headers=default_headers if default_headers else None,
738
+ )
739
+
740
+ # Ensure cache_control is injected at the Anthropic SDK layer
741
+ patch_anthropic_client_messages(anthropic_client)
742
+
743
+ provider = make_anthropic_provider(
744
+ provider_identity, anthropic_client=anthropic_client
745
+ )
746
+ return AnthropicModel(model_name=model_config["name"], provider=provider)
747
+ # NOTE: 'claude_code' model type is now handled by the claude_code_oauth plugin
748
+ # via the register_model_type callback. See plugins/claude_code_oauth/register_callbacks.py
749
+
750
+ elif model_type == "azure_openai":
751
+ azure_endpoint_config = model_config.get("azure_endpoint")
752
+ if not azure_endpoint_config:
753
+ raise ValueError(
754
+ "Azure OpenAI model type requires 'azure_endpoint' in its configuration."
755
+ )
756
+ azure_endpoint = azure_endpoint_config
757
+ if azure_endpoint_config.startswith("$"):
758
+ azure_endpoint = get_api_key(azure_endpoint_config[1:])
759
+ if not azure_endpoint:
760
+ emit_warning(
761
+ f"Azure OpenAI endpoint '{azure_endpoint_config[1:] if azure_endpoint_config.startswith('$') else azure_endpoint_config}' not found (check config or environment); skipping model '{model_config.get('name')}'."
762
+ )
763
+ return None
764
+
765
+ api_version_config = model_config.get("api_version")
766
+ if not api_version_config:
767
+ raise ValueError(
768
+ "Azure OpenAI model type requires 'api_version' in its configuration."
769
+ )
770
+ api_version = api_version_config
771
+ if api_version_config.startswith("$"):
772
+ api_version = get_api_key(api_version_config[1:])
773
+ if not api_version:
774
+ emit_warning(
775
+ f"Azure OpenAI API version '{api_version_config[1:] if api_version_config.startswith('$') else api_version_config}' not found (check config or environment); skipping model '{model_config.get('name')}'."
776
+ )
777
+ return None
778
+
779
+ api_key_config = model_config.get("api_key")
780
+ if not api_key_config:
781
+ raise ValueError(
782
+ "Azure OpenAI model type requires 'api_key' in its configuration."
783
+ )
784
+ api_key = api_key_config
785
+ if api_key_config.startswith("$"):
786
+ api_key = get_api_key(api_key_config[1:])
787
+ if not api_key:
788
+ emit_warning(
789
+ f"Azure OpenAI API key '{api_key_config[1:] if api_key_config.startswith('$') else api_key_config}' not found (check config or environment); skipping model '{model_config.get('name')}'."
790
+ )
791
+ return None
792
+
793
+ # Configure max_retries for the Azure client, defaulting if not specified in config
794
+ azure_max_retries = model_config.get("max_retries", 2)
795
+
796
+ azure_client = AsyncAzureOpenAI(
797
+ azure_endpoint=azure_endpoint,
798
+ api_version=api_version,
799
+ api_key=api_key,
800
+ max_retries=azure_max_retries,
801
+ )
802
+ provider = make_openai_provider(
803
+ provider_identity, openai_client=azure_client
804
+ )
805
+ return OpenAIChatModel(model_name=model_config["name"], provider=provider)
806
+
807
+ elif model_type == "custom_openai":
808
+ url, headers, verify, api_key, timeout = get_custom_config(model_config)
809
+ client = create_async_client(
810
+ headers=headers,
811
+ verify=verify,
812
+ timeout=timeout if timeout is not None else 180,
813
+ )
814
+ provider_args = {"base_url": url}
815
+ if isinstance(client, httpx.AsyncClient):
816
+ provider_args["http_client"] = client
817
+ if api_key:
818
+ provider_args["api_key"] = api_key
819
+ provider = make_openai_provider(provider_identity, **provider_args)
820
+
821
+ # Provider-specific compatibility shims.
822
+ #
823
+ # - Some providers (e.g. crof.ai / kimi) don't support OpenAI's
824
+ # ``strict: true`` on tool schemas.
825
+ # - Some providers appear to accept tool *calls* in assistant
826
+ # messages but error when the next request includes tool *results*
827
+ # with ``role='tool'``. For these providers we degrade tool results
828
+ # into regular user messages (the model still sees the output and
829
+ # can continue, but we avoid a hard API failure).
830
+ provider_name = str(model_config.get("provider") or "")
831
+ strip_strict_tools = model_config.get("strict_tools") is False
832
+ tool_results_as_user = provider_name == "crof"
833
+ flatten_tool_calls = provider_name == "crof"
834
+
835
+ if strip_strict_tools or tool_results_as_user or flatten_tool_calls:
836
+
837
+ class _CompatChatModel(OpenAIChatModel):
838
+ """OpenAIChatModel with provider compatibility patches."""
839
+
840
+ def _map_tool_definition(self, f): # type: ignore[override]
841
+ tool_param = super()._map_tool_definition(f)
842
+ if strip_strict_tools:
843
+ tool_param["function"].pop("strict", None)
844
+ return tool_param
845
+
846
+ def _map_model_response(self, message): # type: ignore[override]
847
+ """Optionally flatten tool calls into assistant text.
848
+
849
+ Some OpenAI-compatible providers accept tool calling in
850
+ responses but error on tool call/result message wiring
851
+ in subsequent requests. For these providers we avoid
852
+ sending `tool_calls` in assistant messages at all and
853
+ instead embed a human-readable representation in the
854
+ assistant content.
855
+ """
856
+ if not flatten_tool_calls:
857
+ return super()._map_model_response(message)
858
+
859
+ import json
860
+
861
+ from openai.types.chat import (
862
+ ChatCompletionAssistantMessageParam,
863
+ )
864
+ from pydantic_ai.messages import TextPart, ToolCallPart
865
+
866
+ chunks: list[str] = []
867
+ for part in message.parts:
868
+ if isinstance(part, TextPart):
869
+ if part.content:
870
+ chunks.append(part.content)
871
+ elif isinstance(part, ToolCallPart):
872
+ args = part.args
873
+ if isinstance(args, dict):
874
+ args_str = json.dumps(args, sort_keys=True)
875
+ else:
876
+ args_str = "" if args is None else str(args)
877
+ chunks.append(
878
+ f"TOOL CALL ({part.tool_name}, id={part.tool_call_id}): {args_str}"
879
+ )
880
+ else:
881
+ # Ignore other part kinds (thinking, builtin, etc.) for provider safety.
882
+ continue
883
+
884
+ content = "\n\n".join([c for c in chunks if c is not None])
885
+ return ChatCompletionAssistantMessageParam(
886
+ role="assistant",
887
+ content=content or None,
888
+ )
889
+
890
+ async def _map_user_message(self, message): # type: ignore[override]
891
+ # Import locally to keep import-time cost down.
892
+ from openai.types.chat import (
893
+ ChatCompletionDeveloperMessageParam,
894
+ ChatCompletionSystemMessageParam,
895
+ ChatCompletionToolMessageParam,
896
+ ChatCompletionUserMessageParam,
897
+ )
898
+ from pydantic_ai._utils import (
899
+ guard_tool_call_id as _guard_tool_call_id,
900
+ )
901
+ from pydantic_ai.messages import (
902
+ RetryPromptPart,
903
+ SystemPromptPart,
904
+ ToolReturnPart,
905
+ UserPromptPart,
906
+ )
907
+ from pydantic_ai.profiles.openai import OpenAIModelProfile
908
+
909
+ for part in message.parts:
910
+ if isinstance(part, SystemPromptPart):
911
+ system_prompt_role = OpenAIModelProfile.from_profile(
912
+ self.profile
913
+ ).openai_system_prompt_role
914
+ if system_prompt_role == "developer":
915
+ yield ChatCompletionDeveloperMessageParam(
916
+ role="developer", content=part.content
917
+ )
918
+ elif system_prompt_role == "user":
919
+ yield ChatCompletionUserMessageParam(
920
+ role="user", content=part.content
921
+ )
922
+ else:
923
+ yield ChatCompletionSystemMessageParam(
924
+ role="system", content=part.content
925
+ )
926
+ elif isinstance(part, UserPromptPart):
927
+ yield await super()._map_user_prompt(part)
928
+ elif isinstance(part, ToolReturnPart):
929
+ if tool_results_as_user:
930
+ yield ChatCompletionUserMessageParam(
931
+ role="user",
932
+ content=(
933
+ f"TOOL RESULT ({part.tool_name}, id={part.tool_call_id}):\n"
934
+ f"{part.model_response_str()}"
935
+ ),
936
+ )
937
+ else:
938
+ yield ChatCompletionToolMessageParam(
939
+ role="tool",
940
+ tool_call_id=_guard_tool_call_id(t=part),
941
+ content=part.model_response_str(),
942
+ )
943
+ elif isinstance(part, RetryPromptPart):
944
+ if part.tool_name is None:
945
+ yield ChatCompletionUserMessageParam(
946
+ role="user", content=part.model_response()
947
+ )
948
+ else:
949
+ if tool_results_as_user:
950
+ yield ChatCompletionUserMessageParam(
951
+ role="user",
952
+ content=(
953
+ f"TOOL RESULT ({part.tool_name}, id={part.tool_call_id}):\n"
954
+ f"{part.model_response()}"
955
+ ),
956
+ )
957
+ else:
958
+ yield ChatCompletionToolMessageParam(
959
+ role="tool",
960
+ tool_call_id=_guard_tool_call_id(t=part),
961
+ content=part.model_response(),
962
+ )
963
+ else:
964
+ raise TypeError(
965
+ f"Unsupported request part type: {type(part).__name__}"
966
+ )
967
+
968
+ model = _CompatChatModel(
969
+ model_name=model_config["name"], provider=provider
970
+ )
971
+ else:
972
+ model = OpenAIChatModel(
973
+ model_name=model_config["name"], provider=provider
974
+ )
975
+ if model_name == "chatgpt-gpt-5-codex":
976
+ model = OpenAIResponsesModel(model_config["name"], provider=provider)
977
+ return model
978
+ elif model_type == "zai_coding":
979
+ api_key = get_api_key("ZAI_API_KEY")
980
+ if not api_key:
981
+ emit_warning(
982
+ f"ZAI_API_KEY is not set (check config or environment); skipping ZAI coding model '{model_config.get('name')}'."
983
+ )
984
+ return None
985
+ provider = make_openai_provider(
986
+ provider_identity,
987
+ api_key=api_key,
988
+ base_url="https://api.z.ai/api/coding/paas/v4",
989
+ )
990
+ return ZaiChatModel(
991
+ model_name=model_config["name"],
992
+ provider=provider,
993
+ )
994
+ elif model_type == "zai_api":
995
+ api_key = get_api_key("ZAI_API_KEY")
996
+ if not api_key:
997
+ emit_warning(
998
+ f"ZAI_API_KEY is not set (check config or environment); skipping ZAI API model '{model_config.get('name')}'."
999
+ )
1000
+ return None
1001
+ provider = make_openai_provider(
1002
+ provider_identity,
1003
+ api_key=api_key,
1004
+ base_url="https://api.z.ai/api/paas/v4/",
1005
+ )
1006
+ return ZaiChatModel(
1007
+ model_name=model_config["name"],
1008
+ provider=provider,
1009
+ )
1010
+
1011
+ elif model_type == "custom_gemini":
1012
+ url, headers, verify, api_key, timeout = get_custom_config(model_config)
1013
+ if not api_key:
1014
+ emit_warning(
1015
+ f"API key is not set for custom Gemini endpoint; skipping model '{model_config.get('name')}'."
1016
+ )
1017
+ return None
1018
+
1019
+ client = create_async_client(
1020
+ headers=headers,
1021
+ verify=verify,
1022
+ timeout=timeout if timeout is not None else 180,
1023
+ )
1024
+ model = GeminiModel(
1025
+ model_name=model_config["name"],
1026
+ api_key=api_key,
1027
+ base_url=url,
1028
+ http_client=client,
1029
+ )
1030
+ return model
1031
+ elif model_type == "cerebras":
1032
+
1033
+ class ZaiCerebrasProvider(CerebrasProvider):
1034
+ def model_profile(self, model_name: str) -> ModelProfile | None:
1035
+ profile = super().model_profile(model_name)
1036
+ if model_name.startswith("zai"):
1037
+ from pydantic_ai.profiles.qwen import qwen_model_profile
1038
+
1039
+ profile = profile.update(qwen_model_profile("qwen-3-coder"))
1040
+ return profile
1041
+
1042
+ url, headers, verify, api_key, timeout = get_custom_config(model_config)
1043
+ if not api_key:
1044
+ emit_warning(
1045
+ f"API key is not set for Cerebras endpoint; skipping model '{model_config.get('name')}'."
1046
+ )
1047
+ return None
1048
+ # Add Cerebras 3rd party integration header
1049
+ headers["X-Cerebras-3rd-Party-Integration"] = "muse"
1050
+ # Pass "cerebras" so RetryingAsyncClient knows to ignore Cerebras's
1051
+ # absurdly aggressive Retry-After headers (they send 60s!)
1052
+ # Note: model_config["name"] is the model's internal name, not the provider
1053
+ client = create_async_client(
1054
+ headers=headers,
1055
+ verify=verify,
1056
+ model_name="cerebras",
1057
+ timeout=timeout if timeout is not None else 180,
1058
+ )
1059
+ provider_args = dict(
1060
+ api_key=api_key,
1061
+ http_client=client,
1062
+ )
1063
+ provider = ZaiCerebrasProvider(**provider_args)
1064
+
1065
+ return OpenAIChatModel(model_name=model_config["name"], provider=provider)
1066
+
1067
+ elif model_type == "openrouter":
1068
+ # Get API key from config, which can be an environment variable reference or raw value
1069
+ api_key_config = model_config.get("api_key")
1070
+ api_key = None
1071
+
1072
+ if api_key_config:
1073
+ if api_key_config.startswith("$"):
1074
+ # It's an environment variable reference
1075
+ env_var_name = api_key_config[1:] # Remove the $ prefix
1076
+ api_key = get_api_key(env_var_name)
1077
+ if api_key is None:
1078
+ emit_warning(
1079
+ f"OpenRouter API key '{env_var_name}' not found (check config or environment); skipping model '{model_config.get('name')}'."
1080
+ )
1081
+ return None
1082
+ else:
1083
+ # It's a raw API key value
1084
+ api_key = api_key_config
1085
+ else:
1086
+ # No API key in config, try to get it from config or the default environment variable
1087
+ api_key = get_api_key("OPENROUTER_API_KEY")
1088
+ if api_key is None:
1089
+ emit_warning(
1090
+ f"OPENROUTER_API_KEY is not set (check config or environment); skipping OpenRouter model '{model_config.get('name')}'."
1091
+ )
1092
+ return None
1093
+
1094
+ provider = OpenRouterProvider(api_key=api_key)
1095
+
1096
+ return OpenAIChatModel(model_name=model_config["name"], provider=provider)
1097
+
1098
+ elif model_type == "gemini_oauth":
1099
+ # Gemini OAuth models use the Code Assist API (cloudcode-pa.googleapis.com)
1100
+ # This is a different API than the standard Generative Language API
1101
+ try:
1102
+ # Try user plugin first, then built-in plugin
1103
+ try:
1104
+ from gemini_oauth.config import GEMINI_OAUTH_CONFIG
1105
+ from gemini_oauth.utils import (
1106
+ get_project_id,
1107
+ get_valid_access_token,
1108
+ )
1109
+ except ImportError:
1110
+ from code_muse.plugins.gemini_oauth.config import (
1111
+ GEMINI_OAUTH_CONFIG,
1112
+ )
1113
+ from code_muse.plugins.gemini_oauth.utils import (
1114
+ get_project_id,
1115
+ get_valid_access_token,
1116
+ )
1117
+ except ImportError as exc:
1118
+ emit_warning(
1119
+ f"Gemini OAuth plugin not available; skipping model '{model_config.get('name')}'. "
1120
+ f"Error: {exc}"
1121
+ )
1122
+ return None
1123
+
1124
+ # Get a valid access token (refreshing if needed)
1125
+ access_token = get_valid_access_token()
1126
+ if not access_token:
1127
+ emit_warning(
1128
+ f"Failed to get valid Gemini OAuth token; skipping model '{model_config.get('name')}'. "
1129
+ "Run /gemini-auth to re-authenticate."
1130
+ )
1131
+ return None
1132
+
1133
+ # Get project ID from stored tokens
1134
+ project_id = get_project_id()
1135
+ if not project_id:
1136
+ emit_warning(
1137
+ f"No Code Assist project ID found; skipping model '{model_config.get('name')}'. "
1138
+ "Run /gemini-auth to re-authenticate."
1139
+ )
1140
+ return None
1141
+
1142
+ # Import the Code Assist model wrapper
1143
+ from code_muse.gemini_code_assist import GeminiCodeAssistModel
1144
+
1145
+ # Create the Code Assist model
1146
+ model = GeminiCodeAssistModel(
1147
+ model_name=model_config["name"],
1148
+ access_token=access_token,
1149
+ project_id=project_id,
1150
+ api_base_url=GEMINI_OAUTH_CONFIG["api_base_url"],
1151
+ api_version=GEMINI_OAUTH_CONFIG["api_version"],
1152
+ )
1153
+ return model
1154
+
1155
+ # NOTE: 'chatgpt_oauth' model type is now handled by the chatgpt_oauth plugin
1156
+ # via the register_model_type callback. See plugins/chatgpt_oauth/register_callbacks.py
1157
+
1158
+ elif model_type == "round_robin":
1159
+ # Get the list of model names to use in the round-robin
1160
+ model_names = model_config.get("models")
1161
+ if not model_names or not isinstance(model_names, list):
1162
+ raise ValueError(
1163
+ f"Round-robin model '{model_name}' requires a 'models' list in its configuration."
1164
+ )
1165
+
1166
+ # Get the rotate_every parameter (default: 1)
1167
+ rotate_every = model_config.get("rotate_every", 1)
1168
+
1169
+ # Resolve each model name to an actual model instance
1170
+ models = []
1171
+ for name in model_names:
1172
+ # Recursively get each model using the factory
1173
+ model = ModelFactory.get_model(name, config)
1174
+ models.append(model)
1175
+
1176
+ # Create and return the round-robin model
1177
+ return RoundRobinModel(*models, rotate_every=rotate_every)
1178
+
1179
+ else:
1180
+ # Check for plugin-registered model type handlers
1181
+ registered_handlers = callbacks.on_register_model_types()
1182
+ for handler_info in registered_handlers:
1183
+ # Handler info can be a list of dicts or a single dict
1184
+ if isinstance(handler_info, list):
1185
+ handlers = handler_info
1186
+ else:
1187
+ handlers = [handler_info] if handler_info else []
1188
+
1189
+ for handler_entry in handlers:
1190
+ if not isinstance(handler_entry, dict):
1191
+ continue
1192
+ if handler_entry.get("type") == model_type:
1193
+ handler = handler_entry.get("handler")
1194
+ if callable(handler):
1195
+ try:
1196
+ return handler(model_name, model_config, config)
1197
+ except Exception as e:
1198
+ logger.error(
1199
+ f"Plugin handler for model type '{model_type}' failed: {e}"
1200
+ )
1201
+ return None
1202
+
1203
+ raise ValueError(f"Unsupported model type: {model_type}")