code-puppy 0.0.379__py3-none-any.whl → 0.0.381__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,8 +3,11 @@
3
3
  import asyncio
4
4
  import json
5
5
  import math
6
+ import pathlib
6
7
  import signal
7
8
  import threading
9
+ import time
10
+ import traceback
8
11
  import uuid
9
12
  from abc import ABC, abstractmethod
10
13
  from typing import (
@@ -37,6 +40,7 @@ from pydantic_ai.durable_exec.dbos import DBOSAgent
37
40
  from pydantic_ai.messages import (
38
41
  ModelMessage,
39
42
  ModelRequest,
43
+ ModelResponse,
40
44
  TextPart,
41
45
  ThinkingPart,
42
46
  ToolCallPart,
@@ -88,6 +92,49 @@ _delayed_compaction_requested = False
88
92
  _reload_count = 0
89
93
 
90
94
 
95
+ def _log_error_to_file(exc: Exception) -> Optional[str]:
96
+ """Log detailed error information to ~/.code_puppy/error_logs/log_{timestamp}.txt.
97
+
98
+ Args:
99
+ exc: The exception to log.
100
+
101
+ Returns:
102
+ The path to the log file if successful, None otherwise.
103
+ """
104
+ try:
105
+ error_logs_dir = pathlib.Path.home() / ".code_puppy" / "error_logs"
106
+ error_logs_dir.mkdir(parents=True, exist_ok=True)
107
+
108
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
109
+ log_file = error_logs_dir / f"log_{timestamp}.txt"
110
+
111
+ with open(log_file, "w", encoding="utf-8") as f:
112
+ f.write(f"Timestamp: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
113
+ f.write(f"Exception Type: {type(exc).__name__}\n")
114
+ f.write(f"Exception Message: {str(exc)}\n")
115
+ f.write(f"Exception Args: {exc.args}\n")
116
+ f.write("\n--- Full Traceback ---\n")
117
+ f.write(traceback.format_exc())
118
+ f.write("\n--- Exception Chain ---\n")
119
+ # Walk the exception chain for chained exceptions
120
+ current = exc
121
+ chain_depth = 0
122
+ while current is not None and chain_depth < 10:
123
+ f.write(
124
+ f"\n[Cause {chain_depth}] {type(current).__name__}: {current}\n"
125
+ )
126
+ f.write("".join(traceback.format_tb(current.__traceback__)))
127
+ current = (
128
+ current.__cause__ if current.__cause__ else current.__context__
129
+ )
130
+ chain_depth += 1
131
+
132
+ return str(log_file)
133
+ except Exception:
134
+ # Don't let logging errors break the main flow
135
+ return None
136
+
137
+
91
138
  class BaseAgent(ABC):
92
139
  """Base class for all agent configurations."""
93
140
 
@@ -264,6 +311,33 @@ class BaseAgent(ABC):
264
311
  cleaned.append(message)
265
312
  return cleaned
266
313
 
314
+ def ensure_history_ends_with_request(
315
+ self, messages: List[ModelMessage]
316
+ ) -> List[ModelMessage]:
317
+ """Ensure message history ends with a ModelRequest.
318
+
319
+ pydantic_ai requires that processed message history ends with a ModelRequest.
320
+ This can fail when swapping models mid-conversation if the history ends with
321
+ a ModelResponse from the previous model.
322
+
323
+ This method trims trailing ModelResponse messages to ensure compatibility.
324
+
325
+ Args:
326
+ messages: List of messages to validate/fix.
327
+
328
+ Returns:
329
+ List of messages guaranteed to end with ModelRequest, or empty list
330
+ if no ModelRequest is found.
331
+ """
332
+ if not messages:
333
+ return messages
334
+
335
+ # Trim trailing ModelResponse messages
336
+ while messages and isinstance(messages[-1], ModelResponse):
337
+ messages = messages[:-1]
338
+
339
+ return messages
340
+
267
341
  # Message history processing methods (moved from state_management.py and message_history_processor.py)
268
342
  def _stringify_part(self, part: Any) -> str:
269
343
  """Create a stable string representation for a message part.
@@ -372,10 +446,10 @@ class BaseAgent(ABC):
372
446
 
373
447
  def estimate_token_count(self, text: str) -> int:
374
448
  """
375
- Simple token estimation using len(message) / 3.
449
+ Simple token estimation using len(message) / 2.5.
376
450
  This replaces tiktoken with a much simpler approach.
377
451
  """
378
- return max(1, math.floor((len(text) / 3)))
452
+ return max(1, math.floor((len(text) / 2.5)))
379
453
 
380
454
  def estimate_tokens_for_message(self, message: ModelMessage) -> int:
381
455
  """
code_puppy/callbacks.py CHANGED
@@ -13,6 +13,7 @@ PhaseType = Literal[
13
13
  "delete_file",
14
14
  "run_shell_command",
15
15
  "load_model_config",
16
+ "load_models_config",
16
17
  "load_prompt",
17
18
  "agent_reload",
18
19
  "custom_command",
@@ -27,6 +28,10 @@ PhaseType = Literal[
27
28
  "get_model_system_prompt",
28
29
  "agent_run_start",
29
30
  "agent_run_end",
31
+ "register_mcp_catalog_servers",
32
+ "register_browser_types",
33
+ "get_motd",
34
+ "register_model_providers",
30
35
  ]
31
36
  CallbackFunc = Callable[..., Any]
32
37
 
@@ -40,6 +45,7 @@ _callbacks: Dict[PhaseType, List[CallbackFunc]] = {
40
45
  "delete_file": [],
41
46
  "run_shell_command": [],
42
47
  "load_model_config": [],
48
+ "load_models_config": [],
43
49
  "load_prompt": [],
44
50
  "agent_reload": [],
45
51
  "custom_command": [],
@@ -54,6 +60,10 @@ _callbacks: Dict[PhaseType, List[CallbackFunc]] = {
54
60
  "get_model_system_prompt": [],
55
61
  "agent_run_start": [],
56
62
  "agent_run_end": [],
63
+ "register_mcp_catalog_servers": [],
64
+ "register_browser_types": [],
65
+ "get_motd": [],
66
+ "register_model_providers": [],
57
67
  }
58
68
 
59
69
  logger = logging.getLogger(__name__)
@@ -204,6 +214,19 @@ def on_load_model_config(*args, **kwargs) -> List[Any]:
204
214
  return _trigger_callbacks_sync("load_model_config", *args, **kwargs)
205
215
 
206
216
 
217
+ def on_load_models_config() -> List[Any]:
218
+ """Trigger callbacks to load additional model configurations.
219
+
220
+ Plugins can register callbacks that return a dict of model configurations
221
+ to be merged with the built-in models.json. Plugin models override built-in
222
+ models with the same name.
223
+
224
+ Returns:
225
+ List of model config dicts from all registered callbacks.
226
+ """
227
+ return _trigger_callbacks_sync("load_models_config")
228
+
229
+
207
230
  def on_edit_file(*args, **kwargs) -> Any:
208
231
  return _trigger_callbacks_sync("edit_file", *args, **kwargs)
209
232
 
@@ -517,3 +540,64 @@ async def on_agent_run_end(
517
540
  response_text,
518
541
  metadata,
519
542
  )
543
+
544
+
545
+ def on_register_mcp_catalog_servers() -> List[Any]:
546
+ """Trigger callbacks to register additional MCP catalog servers.
547
+
548
+ Plugins can register callbacks that return List[MCPServerTemplate] to add
549
+ servers to the MCP catalog/marketplace.
550
+
551
+ Returns:
552
+ List of results from all registered callbacks (each should be a list of MCPServerTemplate).
553
+ """
554
+ return _trigger_callbacks_sync("register_mcp_catalog_servers")
555
+
556
+
557
+ def on_register_browser_types() -> List[Any]:
558
+ """Trigger callbacks to register custom browser types/providers.
559
+
560
+ Plugins can register callbacks that return a dict mapping browser type names
561
+ to initialization functions. This allows plugins to provide custom browser
562
+ implementations (like Camoufox for stealth browsing).
563
+
564
+ Each callback should return a dict with:
565
+ - key: str - the browser type name (e.g., "camoufox", "firefox-stealth")
566
+ - value: callable - async initialization function that takes (manager, **kwargs)
567
+ and sets up the browser on the manager instance
568
+
569
+ Example callback:
570
+ def register_my_browser_types():
571
+ return {
572
+ "camoufox": initialize_camoufox,
573
+ "my-stealth-browser": initialize_my_stealth,
574
+ }
575
+
576
+ Returns:
577
+ List of dicts from all registered callbacks.
578
+ """
579
+ return _trigger_callbacks_sync("register_browser_types")
580
+
581
+
582
+ def on_get_motd() -> List[Any]:
583
+ """Trigger callbacks to get custom MOTD content.
584
+
585
+ Plugins can register callbacks that return a tuple of (message, version).
586
+ The last non-None result will be used as the MOTD.
587
+
588
+ Returns:
589
+ List of (message, version) tuples from registered callbacks.
590
+ """
591
+ return _trigger_callbacks_sync("get_motd")
592
+
593
+
594
+ def on_register_model_providers() -> List[Any]:
595
+ """Trigger callbacks to register custom model provider classes.
596
+
597
+ Plugins can register callbacks that return a dict mapping provider names
598
+ to model classes. Example: {"walmart_gemini": WalmartGeminiModel}
599
+
600
+ Returns:
601
+ List of dicts from all registered callbacks.
602
+ """
603
+ return _trigger_callbacks_sync("register_model_providers")
code_puppy/cli_runner.py CHANGED
@@ -551,7 +551,13 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
551
551
  reset_windows_terminal_full()
552
552
  from code_puppy.messaging import emit_warning
553
553
 
554
- emit_warning("\nInput cancelled")
554
+ # Stop wiggum mode on Ctrl+C
555
+ from code_puppy.command_line.wiggum_state import is_wiggum_active, stop_wiggum
556
+ if is_wiggum_active():
557
+ stop_wiggum()
558
+ emit_warning("\n🍩 Wiggum loop stopped!")
559
+ else:
560
+ emit_warning("\nInput cancelled")
555
561
  continue
556
562
 
557
563
  # Check for exit commands (plain text or command form)
@@ -718,6 +724,12 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
718
724
  ensure_ctrl_c_disabled()
719
725
  except ImportError:
720
726
  pass
727
+ # Stop wiggum mode on cancellation
728
+ from code_puppy.command_line.wiggum_state import is_wiggum_active, stop_wiggum
729
+ if is_wiggum_active():
730
+ stop_wiggum()
731
+ from code_puppy.messaging import emit_warning
732
+ emit_warning("🍩 Wiggum loop stopped due to cancellation")
721
733
  continue
722
734
  # Get the structured response
723
735
  agent_response = result.output
@@ -758,6 +770,86 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
758
770
 
759
771
  auto_save_session_if_enabled()
760
772
 
773
+ # ================================================================
774
+ # WIGGUM LOOP: Re-run prompt if wiggum mode is active
775
+ # ================================================================
776
+ from code_puppy.command_line.wiggum_state import (
777
+ get_wiggum_prompt,
778
+ get_wiggum_count,
779
+ increment_wiggum_count,
780
+ is_wiggum_active,
781
+ stop_wiggum,
782
+ )
783
+
784
+ while is_wiggum_active():
785
+ wiggum_prompt = get_wiggum_prompt()
786
+ if not wiggum_prompt:
787
+ stop_wiggum()
788
+ break
789
+
790
+ # Increment and show debug message
791
+ loop_num = increment_wiggum_count()
792
+ from code_puppy.messaging import emit_warning, emit_system_message
793
+
794
+ emit_warning(f"\n🍩 WIGGUM RELOOPING! (Loop #{loop_num})")
795
+ emit_system_message(f"Re-running prompt: {wiggum_prompt}")
796
+
797
+ # Reset context/history for fresh start
798
+ new_session_id = finalize_autosave_session()
799
+ current_agent.clear_message_history()
800
+ emit_system_message(f"Context cleared. Session rotated to: {new_session_id}")
801
+
802
+ # Small delay to let user see the debug message
803
+ import time
804
+ time.sleep(0.5)
805
+
806
+ try:
807
+ # Re-run the wiggum prompt
808
+ result, current_agent_task = await run_prompt_with_attachments(
809
+ current_agent,
810
+ wiggum_prompt,
811
+ spinner_console=message_renderer.console,
812
+ )
813
+
814
+ if result is None:
815
+ # Cancelled - stop wiggum mode
816
+ emit_warning("Wiggum loop cancelled by user")
817
+ stop_wiggum()
818
+ break
819
+
820
+ # Get the structured response
821
+ agent_response = result.output
822
+
823
+ # Emit structured message for proper markdown rendering
824
+ response_msg = AgentResponseMessage(
825
+ content=agent_response,
826
+ is_markdown=True,
827
+ )
828
+ get_message_bus().emit(response_msg)
829
+
830
+ # Update message history
831
+ if hasattr(result, "all_messages"):
832
+ current_agent.set_message_history(list(result.all_messages()))
833
+
834
+ # Flush console
835
+ display_console.file.flush() if hasattr(
836
+ display_console.file, "flush"
837
+ ) else None
838
+ time.sleep(0.1)
839
+
840
+ # Auto-save
841
+ auto_save_session_if_enabled()
842
+
843
+ except KeyboardInterrupt:
844
+ emit_warning("\n🍩 Wiggum loop interrupted by Ctrl+C")
845
+ stop_wiggum()
846
+ break
847
+ except Exception as e:
848
+ from code_puppy.messaging import emit_error
849
+ emit_error(f"Wiggum loop error: {e}")
850
+ stop_wiggum()
851
+ break
852
+
761
853
  # Re-disable Ctrl+C if needed (uvx mode) - must be done after
762
854
  # each iteration as various operations may restore console mode
763
855
  try:
@@ -790,3 +790,64 @@ def handle_generate_pr_description_command(command: str) -> str:
790
790
 
791
791
  # Return the prompt to be processed by the main chat system
792
792
  return pr_prompt
793
+
794
+
795
+ @register_command(
796
+ name="wiggum",
797
+ description="Loop mode: re-run the same prompt when agent finishes (like Wiggum chasing donuts 🍩)",
798
+ usage="/wiggum <prompt>",
799
+ category="core",
800
+ )
801
+ def handle_wiggum_command(command: str) -> str | bool:
802
+ """Start wiggum loop mode.
803
+
804
+ When active, the agent will automatically re-run the same prompt
805
+ after completing, resetting context each time. Use Ctrl+C to stop.
806
+
807
+ Example:
808
+ /wiggum say hello world
809
+ """
810
+ from code_puppy.command_line.wiggum_state import start_wiggum, stop_wiggum
811
+ from code_puppy.messaging import emit_info, emit_success, emit_warning
812
+
813
+ # Extract the prompt after /wiggum
814
+ parts = command.split(maxsplit=1)
815
+ if len(parts) < 2 or not parts[1].strip():
816
+ emit_warning("Usage: /wiggum <prompt>")
817
+ emit_info("Example: /wiggum say hello world")
818
+ emit_info("This will repeatedly run 'say hello world' after each completion.")
819
+ emit_info("Press Ctrl+C to stop the loop.")
820
+ return True
821
+
822
+ prompt = parts[1].strip()
823
+
824
+ # Start wiggum mode
825
+ start_wiggum(prompt)
826
+ emit_success(f"🍩 WIGGUM MODE ACTIVATED!")
827
+ emit_info(f"Prompt: {prompt}")
828
+ emit_info("The agent will re-loop this prompt after each completion.")
829
+ emit_info("Press Ctrl+C to stop the wiggum loop.")
830
+
831
+ # Return the prompt to execute immediately
832
+ return prompt
833
+
834
+
835
+ @register_command(
836
+ name="wiggum_stop",
837
+ description="Stop wiggum loop mode",
838
+ usage="/wiggum_stop",
839
+ aliases=["stopwiggum", "ws"],
840
+ category="core",
841
+ )
842
+ def handle_wiggum_stop_command(command: str) -> bool:
843
+ """Stop wiggum loop mode."""
844
+ from code_puppy.command_line.wiggum_state import is_wiggum_active, stop_wiggum
845
+ from code_puppy.messaging import emit_info, emit_success
846
+
847
+ if is_wiggum_active():
848
+ stop_wiggum()
849
+ emit_success("🍩 Wiggum mode stopped!")
850
+ else:
851
+ emit_info("Wiggum mode is not active.")
852
+
853
+ return True
@@ -25,6 +25,28 @@ Reminder that Code Puppy supports three different OAuth subscriptions:
25
25
  MOTD_TRACK_FILE = os.path.join(CONFIG_DIR, "motd.txt")
26
26
 
27
27
 
28
+ def get_motd_content() -> tuple[str, str]:
29
+ """Get MOTD content, checking plugins first.
30
+
31
+ Returns:
32
+ Tuple of (message, version) - either from plugin or built-in.
33
+ """
34
+ # Check if plugins want to override MOTD
35
+ try:
36
+ from code_puppy.callbacks import on_get_motd
37
+
38
+ results = on_get_motd()
39
+ # Use the last non-None result
40
+ for result in reversed(results):
41
+ if result is not None and isinstance(result, tuple) and len(result) == 2:
42
+ return result
43
+ except Exception:
44
+ pass
45
+
46
+ # Fall back to built-in MOTD
47
+ return (MOTD_MESSAGE, MOTD_VERSION)
48
+
49
+
28
50
  def has_seen_motd(version: str) -> bool: # 🐕 Check if puppy has seen this MOTD!
29
51
  if not os.path.exists(MOTD_TRACK_FILE):
30
52
  return False
@@ -62,12 +84,13 @@ def print_motd(
62
84
  Returns:
63
85
  True if the MOTD was printed, False otherwise 🐾
64
86
  """
65
- if force or not has_seen_motd(MOTD_VERSION):
87
+ message, version = get_motd_content()
88
+ if force or not has_seen_motd(version):
66
89
  # Create a Rich Markdown object for proper rendering 🎨🐶
67
90
  from rich.markdown import Markdown
68
91
 
69
- markdown_content = Markdown(MOTD_MESSAGE)
92
+ markdown_content = Markdown(message)
70
93
  emit_info(markdown_content)
71
- mark_motd_seen(MOTD_VERSION)
94
+ mark_motd_seen(version)
72
95
  return True
73
96
  return False
@@ -0,0 +1,78 @@
1
+ """Wiggum loop state management.
2
+
3
+ This module tracks the state for the /wiggum command, which causes
4
+ the agent to automatically re-run the same prompt after completing,
5
+ like Chief Wiggum chasing donuts in circles. 🍩
6
+
7
+ Usage:
8
+ /wiggum <prompt> - Start looping with the given prompt
9
+ Ctrl+C - Stop the wiggum loop
10
+ """
11
+
12
+ from dataclasses import dataclass, field
13
+ from typing import Optional
14
+
15
+
16
+ @dataclass
17
+ class WiggumState:
18
+ """State container for wiggum loop mode."""
19
+
20
+ active: bool = False
21
+ prompt: Optional[str] = None
22
+ loop_count: int = 0
23
+
24
+ def start(self, prompt: str) -> None:
25
+ """Start wiggum mode with the given prompt."""
26
+ self.active = True
27
+ self.prompt = prompt
28
+ self.loop_count = 0
29
+
30
+ def stop(self) -> None:
31
+ """Stop wiggum mode."""
32
+ self.active = False
33
+ self.prompt = None
34
+ self.loop_count = 0
35
+
36
+ def increment(self) -> int:
37
+ """Increment and return the loop count."""
38
+ self.loop_count += 1
39
+ return self.loop_count
40
+
41
+
42
+ # Global singleton for wiggum state
43
+ _wiggum_state = WiggumState()
44
+
45
+
46
+ def get_wiggum_state() -> WiggumState:
47
+ """Get the global wiggum state."""
48
+ return _wiggum_state
49
+
50
+
51
+ def is_wiggum_active() -> bool:
52
+ """Check if wiggum mode is currently active."""
53
+ return _wiggum_state.active
54
+
55
+
56
+ def get_wiggum_prompt() -> Optional[str]:
57
+ """Get the current wiggum prompt, if active."""
58
+ return _wiggum_state.prompt if _wiggum_state.active else None
59
+
60
+
61
+ def start_wiggum(prompt: str) -> None:
62
+ """Start wiggum mode with the given prompt."""
63
+ _wiggum_state.start(prompt)
64
+
65
+
66
+ def stop_wiggum() -> None:
67
+ """Stop wiggum mode."""
68
+ _wiggum_state.stop()
69
+
70
+
71
+ def increment_wiggum_count() -> int:
72
+ """Increment wiggum loop count and return the new value."""
73
+ return _wiggum_state.increment()
74
+
75
+
76
+ def get_wiggum_count() -> int:
77
+ """Get the current wiggum loop count."""
78
+ return _wiggum_state.loop_count
code_puppy/config.py CHANGED
@@ -144,6 +144,19 @@ def set_universal_constructor_enabled(enabled: bool) -> None:
144
144
  set_value("enable_universal_constructor", "true" if enabled else "false")
145
145
 
146
146
 
147
+ def get_enable_streaming() -> bool:
148
+ """
149
+ Get the enable_streaming configuration value.
150
+ Controls whether streaming (SSE) is used for model responses.
151
+ Returns True if streaming is enabled, False otherwise.
152
+ Defaults to True.
153
+ """
154
+ val = get_value("enable_streaming")
155
+ if val is None:
156
+ return True # Default to True for better UX
157
+ return str(val).lower() in ("1", "true", "yes", "on")
158
+
159
+
147
160
  DEFAULT_SECTION = "puppy"
148
161
  REQUIRED_KEYS = ["puppy_name", "owner_name"]
149
162
 
@@ -289,6 +302,8 @@ def get_config_keys():
289
302
  default_keys.append("enable_pack_agents")
290
303
  # Add universal constructor control key
291
304
  default_keys.append("enable_universal_constructor")
305
+ # Add streaming control key
306
+ default_keys.append("enable_streaming")
292
307
  # Add cancel agent key configuration
293
308
  default_keys.append("cancel_agent_key")
294
309
  # Add banner color keys
@@ -1025,7 +1025,20 @@ class MCPServerCatalog:
1025
1025
  """Catalog for searching and managing pre-configured MCP servers."""
1026
1026
 
1027
1027
  def __init__(self):
1028
- self.servers = MCP_SERVER_REGISTRY
1028
+ # Start with built-in servers
1029
+ self.servers = list(MCP_SERVER_REGISTRY)
1030
+
1031
+ # Let plugins add their own catalog entries
1032
+ try:
1033
+ from code_puppy.callbacks import on_register_mcp_catalog_servers
1034
+
1035
+ plugin_results = on_register_mcp_catalog_servers()
1036
+ for result in plugin_results:
1037
+ if isinstance(result, list):
1038
+ self.servers.extend(result)
1039
+ except Exception:
1040
+ pass # Don't break catalog if plugins fail
1041
+
1029
1042
  self._build_index()
1030
1043
 
1031
1044
  def _build_index(self):
@@ -30,6 +30,26 @@ from .round_robin_model import RoundRobinModel
30
30
 
31
31
  logger = logging.getLogger(__name__)
32
32
 
33
+ # Registry for custom model provider classes from plugins
34
+ _CUSTOM_MODEL_PROVIDERS: Dict[str, type] = {}
35
+
36
+
37
+ def _load_plugin_model_providers():
38
+ """Load custom model providers from plugins."""
39
+ global _CUSTOM_MODEL_PROVIDERS
40
+ try:
41
+ from code_puppy.callbacks import on_register_model_providers
42
+ results = on_register_model_providers()
43
+ for result in results:
44
+ if isinstance(result, dict):
45
+ _CUSTOM_MODEL_PROVIDERS.update(result)
46
+ except Exception:
47
+ pass # Don't break if plugins fail
48
+
49
+
50
+ # Load plugin model providers at module initialization
51
+ _load_plugin_model_providers()
52
+
33
53
 
34
54
  def get_api_key(env_var_name: str) -> str | None:
35
55
  """Get an API key from config first, then fall back to environment variable.
@@ -279,6 +299,20 @@ class ModelFactory:
279
299
  logging.getLogger(__name__).warning(
280
300
  f"Failed to load {label} config from {source_path}: {exc}"
281
301
  )
302
+
303
+ # Let plugins add/override models via load_models_config hook
304
+ try:
305
+ from code_puppy.callbacks import on_load_models_config
306
+
307
+ results = on_load_models_config()
308
+ for result in results:
309
+ if isinstance(result, dict):
310
+ config.update(result) # Plugin models override built-in
311
+ except Exception as exc:
312
+ logging.getLogger(__name__).debug(
313
+ f"Failed to load plugin models config: {exc}"
314
+ )
315
+
282
316
  return config
283
317
 
284
318
  @staticmethod
@@ -294,6 +328,15 @@ class ModelFactory:
294
328
 
295
329
  model_type = model_config.get("type")
296
330
 
331
+ # Check for plugin-registered model provider classes first
332
+ if model_type in _CUSTOM_MODEL_PROVIDERS:
333
+ provider_class = _CUSTOM_MODEL_PROVIDERS[model_type]
334
+ try:
335
+ return provider_class(model_name=model_name, model_config=model_config, config=config)
336
+ except Exception as e:
337
+ logger.error(f"Custom model provider '{model_type}' failed: {e}")
338
+ return None
339
+
297
340
  if model_type == "gemini":
298
341
  api_key = get_api_key("GEMINI_API_KEY")
299
342
  if not api_key: