vibecore 0.3.0b1__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vibecore might be problematic. Click here for more details.

@@ -0,0 +1,131 @@
1
+ """Token management for OAuth authentication."""
2
+
3
+ import asyncio
4
+ import time
5
+
6
+ import httpx
7
+
8
+ from vibecore.auth.config import ANTHROPIC_CONFIG
9
+ from vibecore.auth.models import OAuthCredentials
10
+ from vibecore.auth.storage import SecureAuthStorage
11
+
12
+
13
+ class TokenRefreshManager:
14
+ """Manages OAuth token refresh with automatic renewal."""
15
+
16
+ def __init__(self, storage: SecureAuthStorage):
17
+ """
18
+ Initialize token refresh manager.
19
+
20
+ Args:
21
+ storage: Secure storage for credentials.
22
+ """
23
+ self.storage = storage
24
+ self.refresh_lock = asyncio.Lock()
25
+ self.refresh_task: asyncio.Task | None = None
26
+
27
+ async def get_valid_token(self) -> str:
28
+ """
29
+ Get a valid access token, refreshing if necessary.
30
+
31
+ Returns:
32
+ Valid access token.
33
+
34
+ Raises:
35
+ ValueError: If not authenticated or refresh fails.
36
+ """
37
+ auth = await self.storage.load("anthropic")
38
+
39
+ if not auth:
40
+ raise ValueError("Not authenticated")
41
+
42
+ if auth.type == "api": # API key auth
43
+ return auth.key # type: ignore
44
+
45
+ # OAuth auth - check if token needs refresh
46
+ now = int(time.time() * 1000)
47
+ buffer_ms = ANTHROPIC_CONFIG.TOKEN_REFRESH_BUFFER_SECONDS * 1000
48
+ needs_refresh = not auth.access or auth.expires <= now + buffer_ms # type: ignore
49
+
50
+ if not needs_refresh:
51
+ return auth.access # type: ignore
52
+
53
+ # Refresh token with lock to prevent concurrent refreshes
54
+ async with self.refresh_lock:
55
+ # Re-check after acquiring lock
56
+ auth = await self.storage.load("anthropic")
57
+ if auth and auth.type == "oauth":
58
+ now = int(time.time() * 1000)
59
+ if auth.access and auth.expires > now + buffer_ms: # type: ignore
60
+ return auth.access # type: ignore
61
+
62
+ # Perform refresh
63
+ if auth and auth.type == "oauth":
64
+ return await self._refresh_token(auth.refresh) # type: ignore
65
+ else:
66
+ raise ValueError("Cannot refresh non-OAuth credentials")
67
+
68
+ async def _refresh_token(self, refresh_token: str) -> str:
69
+ """
70
+ Refresh the access token.
71
+
72
+ Args:
73
+ refresh_token: Refresh token.
74
+
75
+ Returns:
76
+ New access token.
77
+
78
+ Raises:
79
+ httpx.HTTPError: If refresh fails after retries.
80
+ """
81
+ last_error: Exception | None = None
82
+
83
+ # Retry logic
84
+ for attempt in range(ANTHROPIC_CONFIG.TOKEN_MAX_RETRY_ATTEMPTS):
85
+ try:
86
+ # Exponential backoff for retries
87
+ if attempt > 0:
88
+ delay = ANTHROPIC_CONFIG.TOKEN_RETRY_DELAY_MS * (2 ** (attempt - 1)) / 1000
89
+ await asyncio.sleep(delay)
90
+
91
+ # Make refresh request
92
+ async with httpx.AsyncClient() as client:
93
+ response = await client.post(
94
+ ANTHROPIC_CONFIG.TOKEN_EXCHANGE,
95
+ headers={
96
+ "Content-Type": "application/json",
97
+ "Accept": "application/json",
98
+ },
99
+ json={
100
+ "grant_type": "refresh_token",
101
+ "refresh_token": refresh_token,
102
+ "client_id": ANTHROPIC_CONFIG.OAUTH_CLIENT_ID,
103
+ },
104
+ timeout=30.0,
105
+ )
106
+
107
+ if response.status_code != 200:
108
+ error_text = response.text
109
+ raise httpx.HTTPError(f"Token refresh failed: {response.status_code} - {error_text}")
110
+
111
+ tokens_data = response.json()
112
+
113
+ # Update stored credentials
114
+ new_credentials = OAuthCredentials(
115
+ type="oauth",
116
+ refresh=tokens_data.get("refresh_token", refresh_token),
117
+ access=tokens_data["access_token"],
118
+ expires=int(time.time() * 1000) + tokens_data["expires_in"] * 1000,
119
+ )
120
+
121
+ await self.storage.save("anthropic", new_credentials)
122
+ return tokens_data["access_token"]
123
+
124
+ except Exception as error:
125
+ last_error = error
126
+ print(f"Token refresh attempt {attempt + 1} failed: {error}")
127
+
128
+ # All retries failed
129
+ raise ValueError(
130
+ f"Token refresh failed after {ANTHROPIC_CONFIG.TOKEN_MAX_RETRY_ATTEMPTS} attempts: {last_error}"
131
+ )
vibecore/cli.py CHANGED
@@ -1,5 +1,6 @@
1
1
  """Vibecore CLI interface using typer."""
2
2
 
3
+ import asyncio
3
4
  import logging
4
5
  from importlib.metadata import version
5
6
  from pathlib import Path
@@ -15,6 +16,10 @@ from vibecore.settings import settings
15
16
 
16
17
  app = typer.Typer()
17
18
 
19
+ # Create auth subcommand group
20
+ auth_app = typer.Typer(help="Manage Anthropic authentication")
21
+ app.add_typer(auth_app, name="auth")
22
+
18
23
 
19
24
  def version_callback(value: bool):
20
25
  """Handle --version flag."""
@@ -57,9 +62,15 @@ def find_latest_session(project_path: Path | None = None, base_dir: Path | None
57
62
  return session_files[0].stem
58
63
 
59
64
 
60
- @app.command()
61
- def run(
62
- prompt: str | None = typer.Argument(None, help="Prompt text (requires -p flag)"),
65
+ @app.callback(invoke_without_command=True)
66
+ def main(
67
+ ctx: typer.Context,
68
+ prompt: str | None = typer.Option(
69
+ None,
70
+ "--prompt",
71
+ "-p",
72
+ help="Initial prompt to send to the agent (reads from stdin if -p is used without argument)",
73
+ ),
63
74
  continue_session: bool = typer.Option(
64
75
  False,
65
76
  "--continue",
@@ -75,7 +86,6 @@ def run(
75
86
  print_mode: bool = typer.Option(
76
87
  False,
77
88
  "--print",
78
- "-p",
79
89
  help="Print response and exit (useful for pipes)",
80
90
  ),
81
91
  version: bool | None = typer.Option(
@@ -87,6 +97,10 @@ def run(
87
97
  ),
88
98
  ):
89
99
  """Run the Vibecore TUI application."""
100
+ # If a subcommand was invoked, don't run the main app
101
+ if ctx.invoked_subcommand is not None:
102
+ return
103
+
90
104
  # Set up logging
91
105
  logging.basicConfig(
92
106
  level="WARNING",
@@ -97,14 +111,14 @@ def run(
97
111
  logger.addHandler(TextualHandler())
98
112
 
99
113
  # Create context
100
- ctx = VibecoreContext()
114
+ vibecore_ctx = VibecoreContext()
101
115
 
102
116
  # Initialize MCP manager if configured
103
117
  mcp_servers = []
104
118
  if settings.mcp_servers:
105
119
  # Create MCP manager
106
120
  mcp_manager = MCPManager(settings.mcp_servers)
107
- ctx.mcp_manager = mcp_manager
121
+ vibecore_ctx.mcp_manager = mcp_manager
108
122
 
109
123
  # Get the MCP servers from the manager
110
124
  mcp_servers = mcp_manager.servers
@@ -125,7 +139,7 @@ def run(
125
139
  typer.echo(f"Loading session: {session_to_load}")
126
140
 
127
141
  # Create app
128
- app_instance = VibecoreApp(ctx, agent, session_id=session_to_load, print_mode=print_mode)
142
+ app_instance = VibecoreApp(vibecore_ctx, agent, session_id=session_to_load, print_mode=print_mode)
129
143
 
130
144
  if print_mode:
131
145
  # Run in print mode
@@ -141,10 +155,85 @@ def run(
141
155
  app_instance.run()
142
156
 
143
157
 
144
- def main():
158
+ @auth_app.command("login")
159
+ def auth_login(
160
+ provider: str = typer.Argument("anthropic", help="Authentication provider (currently only 'anthropic')"),
161
+ api_key: str = typer.Option(None, "--api-key", "-k", help="Use API key instead of OAuth"),
162
+ mode: str = typer.Option(
163
+ "max", "--mode", "-m", help="OAuth mode: 'max' for claude.ai, 'console' for console.anthropic.com"
164
+ ),
165
+ ):
166
+ """Authenticate with Anthropic Pro/Max or API key."""
167
+ if provider.lower() != "anthropic":
168
+ typer.echo(f"❌ Provider '{provider}' not supported. Currently only 'anthropic' is supported.")
169
+ raise typer.Exit(1)
170
+
171
+ from vibecore.auth.manager import AnthropicAuthManager
172
+
173
+ auth_manager = AnthropicAuthManager()
174
+
175
+ if api_key:
176
+ # API key authentication
177
+ success = asyncio.run(auth_manager.authenticate_with_api_key(api_key))
178
+ if not success:
179
+ raise typer.Exit(1)
180
+ else:
181
+ # OAuth Pro/Max authentication
182
+ success = asyncio.run(auth_manager.authenticate_pro_max(mode))
183
+ if not success:
184
+ raise typer.Exit(1)
185
+
186
+
187
+ @auth_app.command("logout")
188
+ def auth_logout(
189
+ provider: str = typer.Argument("anthropic", help="Authentication provider"),
190
+ ):
191
+ """Remove stored authentication."""
192
+ if provider.lower() != "anthropic":
193
+ typer.echo(f"❌ Provider '{provider}' not supported. Currently only 'anthropic' is supported.")
194
+ raise typer.Exit(1)
195
+
196
+ from vibecore.auth.manager import AnthropicAuthManager
197
+
198
+ auth_manager = AnthropicAuthManager()
199
+ asyncio.run(auth_manager.logout())
200
+
201
+
202
+ @auth_app.command("status")
203
+ def auth_status():
204
+ """Check authentication status."""
205
+ from vibecore.auth.manager import AnthropicAuthManager
206
+
207
+ auth_manager = AnthropicAuthManager()
208
+
209
+ if asyncio.run(auth_manager.is_authenticated()):
210
+ auth_type = asyncio.run(auth_manager.get_auth_type())
211
+ if auth_type == "oauth":
212
+ typer.echo("✅ Authenticated with Anthropic Pro/Max (OAuth)")
213
+ else:
214
+ typer.echo("✅ Authenticated with Anthropic API key")
215
+ else:
216
+ typer.echo("❌ Not authenticated with Anthropic")
217
+
218
+
219
+ @auth_app.command("test")
220
+ def auth_test():
221
+ """Test authentication by making a simple API call."""
222
+ from vibecore.auth.manager import AnthropicAuthManager
223
+
224
+ auth_manager = AnthropicAuthManager()
225
+
226
+ typer.echo("🔍 Testing authentication...")
227
+ success = asyncio.run(auth_manager.test_connection())
228
+
229
+ if not success:
230
+ raise typer.Exit(1)
231
+
232
+
233
+ def cli_main():
145
234
  """Entry point for the CLI."""
146
235
  app()
147
236
 
148
237
 
149
238
  if __name__ == "__main__":
150
- main()
239
+ cli_main()
@@ -15,6 +15,7 @@ from agents import (
15
15
  ToolCallOutputItem,
16
16
  )
17
17
  from openai.types.responses import (
18
+ ResponseCompletedEvent,
18
19
  ResponseFunctionToolCall,
19
20
  ResponseOutputItemAddedEvent,
20
21
  ResponseOutputItemDoneEvent,
@@ -123,6 +124,7 @@ class AgentStreamHandler:
123
124
  """
124
125
  match event:
125
126
  case RawResponsesStreamEvent(data=data):
127
+ # log(f"RawResponsesStreamEvent data: {data.type}")
126
128
  match data:
127
129
  case ResponseOutputItemAddedEvent(item=ResponseReasoningItem() as item):
128
130
  reasoning_id = item.id
@@ -180,7 +182,15 @@ class AgentStreamHandler:
180
182
  else:
181
183
  await self.handle_tool_call(tool_name, arguments, call_id)
182
184
 
185
+ case ResponseCompletedEvent():
186
+ # When in agent handoff or stop at tool situations, the tools should be in executing status.
187
+ # We find all the executing status tool messages and mark them as success.
188
+ for tool_message in self.tool_messages.values():
189
+ if tool_message.status == MessageStatus.EXECUTING:
190
+ tool_message.status = MessageStatus.SUCCESS
191
+
183
192
  case RunItemStreamEvent(item=item):
193
+ # log(f"RunItemStreamEvent item: {item.type}")
184
194
  match item:
185
195
  case ToolCallItem():
186
196
  pass
@@ -196,6 +206,7 @@ class AgentStreamHandler:
196
206
  await self.handle_message_complete()
197
207
 
198
208
  case AgentUpdatedStreamEvent(new_agent=new_agent):
209
+ # log(f"AgentUpdatedStreamEvent new_agent: {new_agent.name}")
199
210
  await self.message_handler.handle_agent_update(new_agent)
200
211
 
201
212
  async def handle_task_tool_event(self, tool_name: str, tool_call_id: str, event: StreamEvent) -> None:
@@ -0,0 +1,226 @@
1
+ """Anthropic model with Pro/Max authentication support."""
2
+
3
+ import logging
4
+ from typing import Any, Literal, overload
5
+
6
+ import litellm
7
+ from agents.agent_output import AgentOutputSchemaBase
8
+ from agents.handoffs import Handoff
9
+ from agents.items import TResponseInputItem
10
+ from agents.model_settings import ModelSettings
11
+ from agents.models.interface import ModelTracing
12
+ from agents.tool import Tool
13
+ from agents.tracing.span_data import GenerationSpanData
14
+ from agents.tracing.spans import Span
15
+ from openai import AsyncStream
16
+ from openai.types.chat import ChatCompletionChunk
17
+ from openai.types.responses import Response
18
+
19
+ from vibecore.auth.config import ANTHROPIC_CONFIG
20
+ from vibecore.auth.interceptor import AnthropicRequestInterceptor
21
+ from vibecore.auth.manager import AnthropicAuthManager
22
+ from vibecore.auth.storage import SecureAuthStorage
23
+ from vibecore.models.anthropic import AnthropicModel, _transform_messages_for_cache
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class AnthropicProMaxModel(AnthropicModel):
29
+ """Anthropic model with Pro/Max authentication and Claude Code spoofing."""
30
+
31
+ def __init__(self, model_name: str, base_url: str | None = None, api_key: str | None = None, use_auth: bool = True):
32
+ """
33
+ Initialize AnthropicProMaxModel.
34
+
35
+ Args:
36
+ model_name: Name of the model.
37
+ base_url: Optional base URL override.
38
+ api_key: Optional API key (ignored if Pro/Max auth is active).
39
+ use_auth: Whether to use Pro/Max authentication.
40
+ """
41
+ super().__init__(model_name, base_url, api_key)
42
+ self.use_auth = use_auth
43
+ self.auth_manager: AnthropicAuthManager | None = None
44
+ self.interceptor: AnthropicRequestInterceptor | None = None
45
+
46
+ if self.use_auth:
47
+ self._initialize_auth()
48
+
49
+ def _initialize_auth(self) -> None:
50
+ """Initialize authentication components."""
51
+ storage = SecureAuthStorage()
52
+ self.auth_manager = AnthropicAuthManager()
53
+ self.interceptor = AnthropicRequestInterceptor(storage)
54
+
55
+ # Check if authenticated (we'll check async later when needed)
56
+ logger.info("AnthropicProMaxModel initialized with authentication support")
57
+
58
+ async def _inject_system_prompt(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
59
+ """
60
+ Inject Claude Code identity into system messages.
61
+
62
+ Args:
63
+ messages: Original messages.
64
+
65
+ Returns:
66
+ Messages with Claude Code identity injected.
67
+ """
68
+ if not self.use_auth or not self.interceptor:
69
+ return messages
70
+
71
+ # Check if using Pro/Max auth
72
+ storage = SecureAuthStorage()
73
+ auth = await storage.load("anthropic")
74
+ if not auth or auth.type != "oauth":
75
+ return messages # Only inject for Pro/Max users
76
+
77
+ # Find or create system message
78
+ messages_copy = messages.copy()
79
+ system_index = next((i for i, msg in enumerate(messages_copy) if msg.get("role") == "system"), None)
80
+
81
+ if system_index is not None:
82
+ # Prepend Claude Code identity to existing system message
83
+ current_content = messages_copy[system_index].get("content", "")
84
+ messages_copy[system_index]["content"] = f"{ANTHROPIC_CONFIG.CLAUDE_CODE_IDENTITY}\n\n{current_content}"
85
+ else:
86
+ # Add new system message at the beginning
87
+ messages_copy.insert(0, {"role": "system", "content": ANTHROPIC_CONFIG.CLAUDE_CODE_IDENTITY})
88
+
89
+ return messages_copy
90
+
91
+ async def _apply_auth_headers(self, headers: dict[str, str]) -> dict[str, str]:
92
+ """
93
+ Apply authentication and Claude Code headers.
94
+
95
+ Args:
96
+ headers: Original headers.
97
+
98
+ Returns:
99
+ Modified headers with auth and Claude Code spoofing.
100
+ """
101
+ if not self.use_auth or not self.interceptor:
102
+ return headers
103
+
104
+ # Use interceptor to apply auth and Claude Code headers
105
+ modified_request = await self.interceptor.intercept_request(url="https://api.anthropic.com", headers=headers)
106
+
107
+ return modified_request.get("headers", headers)
108
+
109
+ @overload
110
+ async def _fetch_response(
111
+ self,
112
+ system_instructions: str | None,
113
+ input: str | list[TResponseInputItem],
114
+ model_settings: ModelSettings,
115
+ tools: list[Tool],
116
+ output_schema: AgentOutputSchemaBase | None,
117
+ handoffs: list[Handoff],
118
+ span: Span[GenerationSpanData],
119
+ tracing: ModelTracing,
120
+ stream: Literal[True],
121
+ prompt: Any | None = None,
122
+ ) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
123
+
124
+ @overload
125
+ async def _fetch_response(
126
+ self,
127
+ system_instructions: str | None,
128
+ input: str | list[TResponseInputItem],
129
+ model_settings: ModelSettings,
130
+ tools: list[Tool],
131
+ output_schema: AgentOutputSchemaBase | None,
132
+ handoffs: list[Handoff],
133
+ span: Span[GenerationSpanData],
134
+ tracing: ModelTracing,
135
+ stream: Literal[False],
136
+ prompt: Any | None = None,
137
+ ) -> Any: ...
138
+
139
+ async def _fetch_response(
140
+ self,
141
+ system_instructions: str | None,
142
+ input: str | list[TResponseInputItem],
143
+ model_settings: ModelSettings,
144
+ tools: list[Tool],
145
+ output_schema: AgentOutputSchemaBase | None,
146
+ handoffs: list[Handoff],
147
+ span: Span[GenerationSpanData],
148
+ tracing: ModelTracing,
149
+ stream: bool = False,
150
+ prompt: Any | None = None,
151
+ ) -> Any | tuple[Response, AsyncStream[ChatCompletionChunk]]:
152
+ """Override _fetch_response to add auth and Claude Code support."""
153
+ # Store the original litellm.acompletion function
154
+ original_acompletion = litellm.acompletion
155
+
156
+ async def _intercepting_acompletion(*args, **kwargs):
157
+ """Intercept litellm.acompletion calls to transform messages and headers."""
158
+ # Only transform for this Anthropic model
159
+ if kwargs.get("model") == self.model:
160
+ if "messages" in kwargs:
161
+ messages = kwargs["messages"]
162
+ logger.debug(f"Intercepting Anthropic API call with {len(messages)} messages")
163
+
164
+ # Add Claude Code identity to system prompt
165
+ messages = await self._inject_system_prompt(messages)
166
+
167
+ # Transform messages to add cache_control
168
+ messages = _transform_messages_for_cache(messages)
169
+ kwargs["messages"] = messages
170
+
171
+ # Apply auth headers if available
172
+ if self.use_auth and self.interceptor:
173
+ # Get existing headers or create new dict
174
+ headers = kwargs.get("extra_headers", {})
175
+
176
+ # Apply auth and Claude Code headers
177
+ headers = await self._apply_auth_headers(headers)
178
+
179
+ # Update kwargs
180
+ kwargs["extra_headers"] = headers
181
+
182
+ # For Pro/Max users, prevent API key from being added
183
+ storage = SecureAuthStorage()
184
+ auth = await storage.load("anthropic")
185
+ if auth and auth.type == "oauth":
186
+ # CRITICAL: Set api_key to None to prevent litellm from adding x-api-key header
187
+ # when using OAuth authentication
188
+ kwargs["api_key"] = None
189
+
190
+ # Call the original function with transformed kwargs
191
+ return await original_acompletion(*args, **kwargs)
192
+
193
+ try:
194
+ # Temporarily replace litellm.acompletion
195
+ litellm.acompletion = _intercepting_acompletion
196
+
197
+ # Call the parent's implementation
198
+ if stream:
199
+ return await super()._fetch_response(
200
+ system_instructions=system_instructions,
201
+ input=input,
202
+ model_settings=model_settings,
203
+ tools=tools,
204
+ output_schema=output_schema,
205
+ handoffs=handoffs,
206
+ span=span,
207
+ tracing=tracing,
208
+ stream=True,
209
+ prompt=prompt,
210
+ )
211
+ else:
212
+ return await super()._fetch_response(
213
+ system_instructions=system_instructions,
214
+ input=input,
215
+ model_settings=model_settings,
216
+ tools=tools,
217
+ output_schema=output_schema,
218
+ handoffs=handoffs,
219
+ span=span,
220
+ tracing=tracing,
221
+ stream=False,
222
+ prompt=prompt,
223
+ )
224
+ finally:
225
+ # Always restore the original function
226
+ litellm.acompletion = original_acompletion
vibecore/settings.py CHANGED
@@ -13,6 +13,19 @@ from pydantic_settings import BaseSettings, PydanticBaseSettingsSource, Settings
13
13
  from vibecore.models import AnthropicModel
14
14
 
15
15
 
16
+ class AuthSettings(BaseModel):
17
+ """Configuration for authentication."""
18
+
19
+ use_pro_max: bool = Field(
20
+ default=False,
21
+ description="Use Anthropic Pro/Max authentication if available",
22
+ )
23
+ auto_refresh: bool = Field(
24
+ default=True,
25
+ description="Automatically refresh OAuth tokens",
26
+ )
27
+
28
+
16
29
  class SessionSettings(BaseModel):
17
30
  """Configuration for session storage."""
18
31
 
@@ -124,6 +137,12 @@ class Settings(BaseSettings):
124
137
  return None
125
138
  return v
126
139
 
140
+ # Authentication configuration
141
+ auth: AuthSettings = Field(
142
+ default_factory=AuthSettings,
143
+ description="Authentication configuration",
144
+ )
145
+
127
146
  # Session configuration
128
147
  session: SessionSettings = Field(
129
148
  default_factory=SessionSettings,
@@ -136,17 +155,29 @@ class Settings(BaseSettings):
136
155
  description="List of MCP servers to connect to",
137
156
  )
138
157
 
158
+ rich_tool_names: list[str] = Field(
159
+ default_factory=list,
160
+ description="List of tools to render with RichToolMessage (temporary settings)",
161
+ )
162
+
139
163
  @property
140
164
  def model(self) -> str | Model:
141
165
  """Get the configured model.
142
166
 
143
- Returns an AnthropicModel instance if the model name starts with 'anthropic/',
167
+ Returns an AnthropicProMaxModel instance if auth is enabled and model is Anthropic,
168
+ returns an AnthropicModel instance if the model name starts with 'anthropic/',
144
169
  returns a OpenAIChatCompletionsModel instance if there is a custom base URL set,
145
170
  otherwise returns the model name as a plain string (for OpenAI/LiteLLM models).
146
171
  """
147
172
  custom_base = "OPENAI_BASE_URL" in os.environ
148
173
  if self.default_model.startswith("anthropic/"):
149
- return AnthropicModel(self.default_model)
174
+ # Check if Pro/Max auth should be used
175
+ if self.auth.use_pro_max:
176
+ from vibecore.models.anthropic_auth import AnthropicProMaxModel
177
+
178
+ return AnthropicProMaxModel(self.default_model, use_auth=True)
179
+ else:
180
+ return AnthropicModel(self.default_model)
150
181
  elif custom_base:
151
182
  openai_provider = MultiProvider().openai_provider
152
183
  return OpenAIChatCompletionsModel(self.default_model, openai_provider._get_client())
@@ -9,12 +9,14 @@ import contextlib
9
9
  import json
10
10
  from typing import Any
11
11
 
12
+ from vibecore.settings import settings
12
13
  from vibecore.widgets.messages import MessageStatus
13
14
  from vibecore.widgets.tool_messages import (
14
15
  BaseToolMessage,
15
16
  MCPToolMessage,
16
17
  PythonToolMessage,
17
18
  ReadToolMessage,
19
+ RichToolMessage,
18
20
  TaskToolMessage,
19
21
  TodoWriteToolMessage,
20
22
  ToolMessage,
@@ -129,6 +131,12 @@ def create_tool_message(
129
131
  else:
130
132
  return WebFetchToolMessage(url=url, status=status)
131
133
 
134
+ elif tool_name in settings.rich_tool_names:
135
+ if output is not None:
136
+ return RichToolMessage(tool_name=tool_name, arguments=arguments, output=output, status=status)
137
+ else:
138
+ return RichToolMessage(tool_name=tool_name, arguments=arguments, status=status)
139
+
132
140
  # Default to generic ToolMessage for all other tools
133
141
  else:
134
142
  if output is not None: