abstractcore 2.9.1__py3-none-any.whl → 2.11.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/__init__.py +7 -27
- abstractcore/apps/extractor.py +33 -100
- abstractcore/apps/intent.py +19 -0
- abstractcore/apps/judge.py +20 -1
- abstractcore/apps/summarizer.py +20 -1
- abstractcore/architectures/detection.py +34 -1
- abstractcore/architectures/response_postprocessing.py +313 -0
- abstractcore/assets/architecture_formats.json +38 -8
- abstractcore/assets/model_capabilities.json +781 -160
- abstractcore/compression/__init__.py +1 -2
- abstractcore/compression/glyph_processor.py +6 -4
- abstractcore/config/main.py +31 -19
- abstractcore/config/manager.py +389 -11
- abstractcore/config/vision_config.py +5 -5
- abstractcore/core/interface.py +151 -3
- abstractcore/core/session.py +16 -10
- abstractcore/download.py +1 -1
- abstractcore/embeddings/manager.py +20 -6
- abstractcore/endpoint/__init__.py +2 -0
- abstractcore/endpoint/app.py +458 -0
- abstractcore/mcp/client.py +3 -1
- abstractcore/media/__init__.py +52 -17
- abstractcore/media/auto_handler.py +42 -22
- abstractcore/media/base.py +44 -1
- abstractcore/media/capabilities.py +12 -33
- abstractcore/media/enrichment.py +105 -0
- abstractcore/media/handlers/anthropic_handler.py +19 -28
- abstractcore/media/handlers/local_handler.py +124 -70
- abstractcore/media/handlers/openai_handler.py +19 -31
- abstractcore/media/processors/__init__.py +4 -2
- abstractcore/media/processors/audio_processor.py +57 -0
- abstractcore/media/processors/office_processor.py +8 -3
- abstractcore/media/processors/pdf_processor.py +46 -3
- abstractcore/media/processors/text_processor.py +22 -24
- abstractcore/media/processors/video_processor.py +58 -0
- abstractcore/media/types.py +97 -4
- abstractcore/media/utils/image_scaler.py +20 -2
- abstractcore/media/utils/video_frames.py +219 -0
- abstractcore/media/vision_fallback.py +136 -22
- abstractcore/processing/__init__.py +32 -3
- abstractcore/processing/basic_deepsearch.py +15 -10
- abstractcore/processing/basic_intent.py +3 -2
- abstractcore/processing/basic_judge.py +3 -2
- abstractcore/processing/basic_summarizer.py +1 -1
- abstractcore/providers/__init__.py +3 -1
- abstractcore/providers/anthropic_provider.py +95 -8
- abstractcore/providers/base.py +1516 -81
- abstractcore/providers/huggingface_provider.py +546 -69
- abstractcore/providers/lmstudio_provider.py +35 -923
- abstractcore/providers/mlx_provider.py +382 -35
- abstractcore/providers/model_capabilities.py +5 -1
- abstractcore/providers/ollama_provider.py +99 -15
- abstractcore/providers/openai_compatible_provider.py +406 -180
- abstractcore/providers/openai_provider.py +188 -44
- abstractcore/providers/openrouter_provider.py +76 -0
- abstractcore/providers/registry.py +61 -5
- abstractcore/providers/streaming.py +138 -33
- abstractcore/providers/vllm_provider.py +92 -817
- abstractcore/server/app.py +461 -13
- abstractcore/server/audio_endpoints.py +139 -0
- abstractcore/server/vision_endpoints.py +1319 -0
- abstractcore/structured/handler.py +316 -41
- abstractcore/tools/common_tools.py +5501 -2012
- abstractcore/tools/comms_tools.py +1641 -0
- abstractcore/tools/core.py +37 -7
- abstractcore/tools/handler.py +4 -9
- abstractcore/tools/parser.py +49 -2
- abstractcore/tools/tag_rewriter.py +2 -1
- abstractcore/tools/telegram_tdlib.py +407 -0
- abstractcore/tools/telegram_tools.py +261 -0
- abstractcore/utils/cli.py +1085 -72
- abstractcore/utils/token_utils.py +2 -0
- abstractcore/utils/truncation.py +29 -0
- abstractcore/utils/version.py +3 -4
- abstractcore/utils/vlm_token_calculator.py +12 -2
- abstractcore-2.11.2.dist-info/METADATA +562 -0
- abstractcore-2.11.2.dist-info/RECORD +133 -0
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/WHEEL +1 -1
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/entry_points.txt +1 -0
- abstractcore-2.9.1.dist-info/METADATA +0 -1190
- abstractcore-2.9.1.dist-info/RECORD +0 -119
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.9.1.dist-info → abstractcore-2.11.2.dist-info}/top_level.txt +0 -0
|
@@ -5,6 +5,7 @@ OpenAI provider implementation.
|
|
|
5
5
|
import os
|
|
6
6
|
import json
|
|
7
7
|
import time
|
|
8
|
+
import warnings
|
|
8
9
|
from typing import List, Dict, Any, Optional, Union, Iterator, AsyncIterator, Type
|
|
9
10
|
|
|
10
11
|
try:
|
|
@@ -15,7 +16,6 @@ except ImportError:
|
|
|
15
16
|
BaseModel = None
|
|
16
17
|
from .base import BaseProvider
|
|
17
18
|
from ..core.types import GenerateResponse
|
|
18
|
-
from ..media import MediaHandler
|
|
19
19
|
from ..exceptions import AuthenticationError, ProviderAPIError, ModelNotFoundError, format_model_error, format_auth_error
|
|
20
20
|
from ..tools import UniversalToolHandler, execute_tools
|
|
21
21
|
from ..events import EventType
|
|
@@ -106,25 +106,64 @@ class OpenAIProvider(BaseProvider):
|
|
|
106
106
|
"content": msg["content"]
|
|
107
107
|
})
|
|
108
108
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
109
|
+
media_enrichment = None
|
|
110
|
+
|
|
111
|
+
# Handle media content regardless of prompt (media can be used with messages too)
|
|
112
|
+
if media:
|
|
113
|
+
# Get the last user message content to combine with media
|
|
114
|
+
user_message_text = prompt.strip() if prompt else ""
|
|
115
|
+
if not user_message_text and api_messages:
|
|
116
|
+
# If no prompt, try to get text from the last user message
|
|
117
|
+
for msg in reversed(api_messages):
|
|
118
|
+
if msg.get("role") == "user" and msg.get("content"):
|
|
119
|
+
user_message_text = msg["content"]
|
|
120
|
+
break
|
|
121
|
+
|
|
122
|
+
replace_last_user = False
|
|
123
|
+
if api_messages and api_messages[-1].get("role") == "user":
|
|
124
|
+
last_content = api_messages[-1].get("content")
|
|
125
|
+
# Only replace the last user message when prompt is empty (prompt already in messages)
|
|
126
|
+
# or when the prompt is the same as the last user content (avoid duplication).
|
|
127
|
+
if (not prompt.strip()) or (last_content == user_message_text):
|
|
128
|
+
replace_last_user = True
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
# Process media files into MediaContent objects first
|
|
132
|
+
processed_media = self._process_media_content(media)
|
|
133
|
+
|
|
134
|
+
# Use capability-based media handler selection (vision vs fallback)
|
|
135
|
+
media_handler = self._get_media_handler_for_model(self.model)
|
|
136
|
+
|
|
137
|
+
# Create multimodal message combining text and processed media
|
|
138
|
+
multimodal_message = media_handler.create_multimodal_message(user_message_text, processed_media)
|
|
139
|
+
media_enrichment = getattr(media_handler, "media_enrichment", None)
|
|
140
|
+
|
|
141
|
+
if isinstance(multimodal_message, str):
|
|
142
|
+
if replace_last_user:
|
|
143
|
+
api_messages[-1]["content"] = multimodal_message
|
|
144
|
+
else:
|
|
145
|
+
api_messages.append({"role": "user", "content": multimodal_message})
|
|
146
|
+
else:
|
|
147
|
+
if replace_last_user:
|
|
148
|
+
api_messages[-1] = multimodal_message
|
|
149
|
+
else:
|
|
150
|
+
api_messages.append(multimodal_message)
|
|
151
|
+
except ImportError:
|
|
152
|
+
self.logger.warning("Media processing not available. Install with: pip install \"abstractcore[media]\"")
|
|
153
|
+
if user_message_text and not replace_last_user:
|
|
154
|
+
api_messages.append({"role": "user", "content": user_message_text})
|
|
155
|
+
except Exception as e:
|
|
156
|
+
# Do not silently drop user-supplied media. Fail loudly so callers can
|
|
157
|
+
# choose an explicit fallback policy (e.g. audio_policy='speech_to_text').
|
|
158
|
+
from ..exceptions import UnsupportedFeatureError
|
|
159
|
+
|
|
160
|
+
raise UnsupportedFeatureError(
|
|
161
|
+
f"OpenAI provider could not format attached media for model '{self.model}': {e}"
|
|
162
|
+
) from e
|
|
163
|
+
|
|
164
|
+
# Add prompt as separate message if provided (for backward compatibility)
|
|
165
|
+
elif prompt and prompt not in [msg.get("content") for msg in (messages or [])]:
|
|
166
|
+
api_messages.append({"role": "user", "content": prompt})
|
|
128
167
|
|
|
129
168
|
# Prepare API call parameters using unified system
|
|
130
169
|
generation_kwargs = self._prepare_generation_kwargs(**kwargs)
|
|
@@ -136,18 +175,38 @@ class OpenAIProvider(BaseProvider):
|
|
|
136
175
|
"stream": stream
|
|
137
176
|
}
|
|
138
177
|
|
|
178
|
+
# Prompt caching (OpenAI): best-effort pass-through via `prompt_cache_key`.
|
|
179
|
+
prompt_cache_key = kwargs.get("prompt_cache_key")
|
|
180
|
+
if isinstance(prompt_cache_key, str) and prompt_cache_key.strip():
|
|
181
|
+
call_params["prompt_cache_key"] = prompt_cache_key.strip()
|
|
182
|
+
|
|
139
183
|
# Add parameters that are supported by this model
|
|
140
184
|
if not self._is_reasoning_model():
|
|
141
185
|
# Reasoning models (o1, gpt-5) don't support many parameters
|
|
142
|
-
call_params["temperature"] =
|
|
186
|
+
call_params["temperature"] = generation_kwargs.get("temperature", self.temperature)
|
|
143
187
|
call_params["top_p"] = kwargs.get("top_p", self.top_p)
|
|
144
188
|
call_params["frequency_penalty"] = kwargs.get("frequency_penalty", self.frequency_penalty)
|
|
145
189
|
call_params["presence_penalty"] = kwargs.get("presence_penalty", self.presence_penalty)
|
|
146
190
|
|
|
147
191
|
# Add seed if provided (OpenAI supports seed for deterministic outputs)
|
|
148
|
-
seed_value =
|
|
192
|
+
seed_value = generation_kwargs.get("seed")
|
|
149
193
|
if seed_value is not None:
|
|
150
194
|
call_params["seed"] = seed_value
|
|
195
|
+
else:
|
|
196
|
+
# Best-effort: expose a warning when a caller requests params that are ignored.
|
|
197
|
+
seed_value = generation_kwargs.get("seed")
|
|
198
|
+
if seed_value is not None:
|
|
199
|
+
warnings.warn(
|
|
200
|
+
f"Seed parameter ({seed_value}) requested but not supported by OpenAI reasoning models ({self.model}).",
|
|
201
|
+
RuntimeWarning,
|
|
202
|
+
stacklevel=2,
|
|
203
|
+
)
|
|
204
|
+
if ("temperature" in kwargs) or (getattr(self, "temperature", 0.7) != 0.7):
|
|
205
|
+
warnings.warn(
|
|
206
|
+
f"Temperature parameter requested but not supported by OpenAI reasoning models ({self.model}).",
|
|
207
|
+
RuntimeWarning,
|
|
208
|
+
stacklevel=2,
|
|
209
|
+
)
|
|
151
210
|
|
|
152
211
|
# Handle different token parameter names for different model families
|
|
153
212
|
if self._uses_max_completion_tokens():
|
|
@@ -199,6 +258,10 @@ class OpenAIProvider(BaseProvider):
|
|
|
199
258
|
# Runtime observability: capture the exact client payload we sent.
|
|
200
259
|
formatted.metadata = dict(formatted.metadata or {})
|
|
201
260
|
formatted.metadata["_provider_request"] = {"call_params": call_params}
|
|
261
|
+
if media_enrichment:
|
|
262
|
+
from ..media.enrichment import merge_enrichment_metadata
|
|
263
|
+
|
|
264
|
+
formatted.metadata = merge_enrichment_metadata(formatted.metadata, media_enrichment)
|
|
202
265
|
|
|
203
266
|
# Handle tool execution for OpenAI native responses
|
|
204
267
|
if tools and formatted.has_tool_calls():
|
|
@@ -237,25 +300,56 @@ class OpenAIProvider(BaseProvider):
|
|
|
237
300
|
"content": msg["content"]
|
|
238
301
|
})
|
|
239
302
|
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
303
|
+
media_enrichment = None
|
|
304
|
+
|
|
305
|
+
# Handle media content regardless of prompt (media can be used with messages too)
|
|
306
|
+
if media:
|
|
307
|
+
# Get the last user message content to combine with media
|
|
308
|
+
user_message_text = prompt.strip() if prompt else ""
|
|
309
|
+
if not user_message_text and api_messages:
|
|
310
|
+
# If no prompt, try to get text from the last user message
|
|
311
|
+
for msg in reversed(api_messages):
|
|
312
|
+
if msg.get("role") == "user" and msg.get("content"):
|
|
313
|
+
user_message_text = msg["content"]
|
|
314
|
+
break
|
|
315
|
+
|
|
316
|
+
replace_last_user = False
|
|
317
|
+
if api_messages and api_messages[-1].get("role") == "user":
|
|
318
|
+
last_content = api_messages[-1].get("content")
|
|
319
|
+
if (not prompt.strip()) or (last_content == user_message_text):
|
|
320
|
+
replace_last_user = True
|
|
321
|
+
|
|
322
|
+
try:
|
|
323
|
+
processed_media = self._process_media_content(media)
|
|
324
|
+
media_handler = self._get_media_handler_for_model(self.model)
|
|
325
|
+
multimodal_message = media_handler.create_multimodal_message(user_message_text, processed_media)
|
|
326
|
+
media_enrichment = getattr(media_handler, "media_enrichment", None)
|
|
327
|
+
|
|
328
|
+
if isinstance(multimodal_message, str):
|
|
329
|
+
if replace_last_user:
|
|
330
|
+
api_messages[-1]["content"] = multimodal_message
|
|
331
|
+
else:
|
|
332
|
+
api_messages.append({"role": "user", "content": multimodal_message})
|
|
333
|
+
else:
|
|
334
|
+
if replace_last_user:
|
|
335
|
+
api_messages[-1] = multimodal_message
|
|
336
|
+
else:
|
|
337
|
+
api_messages.append(multimodal_message)
|
|
338
|
+
|
|
339
|
+
except ImportError:
|
|
340
|
+
self.logger.warning("Media processing not available. Install with: pip install \"abstractcore[media]\"")
|
|
341
|
+
if user_message_text and not replace_last_user:
|
|
342
|
+
api_messages.append({"role": "user", "content": user_message_text})
|
|
343
|
+
except Exception as e:
|
|
344
|
+
from ..exceptions import UnsupportedFeatureError
|
|
345
|
+
|
|
346
|
+
raise UnsupportedFeatureError(
|
|
347
|
+
f"OpenAI provider could not format attached media for model '{self.model}': {e}"
|
|
348
|
+
) from e
|
|
349
|
+
|
|
350
|
+
# Add prompt as separate message if provided (for backward compatibility)
|
|
351
|
+
elif prompt and prompt not in [msg.get("content") for msg in (messages or [])]:
|
|
352
|
+
api_messages.append({"role": "user", "content": prompt})
|
|
259
353
|
|
|
260
354
|
# Prepare API call parameters using unified system (same logic as sync)
|
|
261
355
|
generation_kwargs = self._prepare_generation_kwargs(**kwargs)
|
|
@@ -267,18 +361,37 @@ class OpenAIProvider(BaseProvider):
|
|
|
267
361
|
"stream": stream
|
|
268
362
|
}
|
|
269
363
|
|
|
364
|
+
# Prompt caching (OpenAI): best-effort pass-through via `prompt_cache_key`.
|
|
365
|
+
prompt_cache_key = kwargs.get("prompt_cache_key")
|
|
366
|
+
if isinstance(prompt_cache_key, str) and prompt_cache_key.strip():
|
|
367
|
+
call_params["prompt_cache_key"] = prompt_cache_key.strip()
|
|
368
|
+
|
|
270
369
|
# Add parameters that are supported by this model
|
|
271
370
|
if not self._is_reasoning_model():
|
|
272
371
|
# Reasoning models (o1, gpt-5) don't support many parameters
|
|
273
|
-
call_params["temperature"] =
|
|
372
|
+
call_params["temperature"] = generation_kwargs.get("temperature", self.temperature)
|
|
274
373
|
call_params["top_p"] = kwargs.get("top_p", self.top_p)
|
|
275
374
|
call_params["frequency_penalty"] = kwargs.get("frequency_penalty", self.frequency_penalty)
|
|
276
375
|
call_params["presence_penalty"] = kwargs.get("presence_penalty", self.presence_penalty)
|
|
277
376
|
|
|
278
377
|
# Add seed if provided (OpenAI supports seed for deterministic outputs)
|
|
279
|
-
seed_value =
|
|
378
|
+
seed_value = generation_kwargs.get("seed")
|
|
280
379
|
if seed_value is not None:
|
|
281
380
|
call_params["seed"] = seed_value
|
|
381
|
+
else:
|
|
382
|
+
seed_value = generation_kwargs.get("seed")
|
|
383
|
+
if seed_value is not None:
|
|
384
|
+
warnings.warn(
|
|
385
|
+
f"Seed parameter ({seed_value}) requested but not supported by OpenAI reasoning models ({self.model}).",
|
|
386
|
+
RuntimeWarning,
|
|
387
|
+
stacklevel=2,
|
|
388
|
+
)
|
|
389
|
+
if ("temperature" in kwargs) or (getattr(self, "temperature", 0.7) != 0.7):
|
|
390
|
+
warnings.warn(
|
|
391
|
+
f"Temperature parameter requested but not supported by OpenAI reasoning models ({self.model}).",
|
|
392
|
+
RuntimeWarning,
|
|
393
|
+
stacklevel=2,
|
|
394
|
+
)
|
|
282
395
|
|
|
283
396
|
# Handle different token parameter names for different model families
|
|
284
397
|
if self._uses_max_completion_tokens():
|
|
@@ -329,6 +442,10 @@ class OpenAIProvider(BaseProvider):
|
|
|
329
442
|
formatted.gen_time = gen_time
|
|
330
443
|
formatted.metadata = dict(formatted.metadata or {})
|
|
331
444
|
formatted.metadata["_provider_request"] = {"call_params": call_params}
|
|
445
|
+
if media_enrichment:
|
|
446
|
+
from ..media.enrichment import merge_enrichment_metadata
|
|
447
|
+
|
|
448
|
+
formatted.metadata = merge_enrichment_metadata(formatted.metadata, media_enrichment)
|
|
332
449
|
|
|
333
450
|
# Handle tool execution for OpenAI native responses
|
|
334
451
|
if tools and formatted.has_tool_calls():
|
|
@@ -609,6 +726,33 @@ class OpenAIProvider(BaseProvider):
|
|
|
609
726
|
else:
|
|
610
727
|
yield chunk_response
|
|
611
728
|
|
|
729
|
+
def _get_media_handler_for_model(self, model_name: str):
|
|
730
|
+
"""Get appropriate media handler based on model vision capabilities."""
|
|
731
|
+
from ..media.handlers import OpenAIMediaHandler, LocalMediaHandler
|
|
732
|
+
|
|
733
|
+
# Determine if model supports vision
|
|
734
|
+
try:
|
|
735
|
+
from ..architectures.detection import supports_vision
|
|
736
|
+
|
|
737
|
+
use_vision_handler = supports_vision(model_name)
|
|
738
|
+
except Exception as e:
|
|
739
|
+
self.logger.debug(f"Vision detection failed: {e}, defaulting to LocalMediaHandler")
|
|
740
|
+
use_vision_handler = False
|
|
741
|
+
|
|
742
|
+
# Create appropriate handler
|
|
743
|
+
if use_vision_handler:
|
|
744
|
+
handler = OpenAIMediaHandler(self.model_capabilities, model_name=model_name)
|
|
745
|
+
self.logger.debug(f"Using OpenAIMediaHandler for vision model: {model_name}")
|
|
746
|
+
else:
|
|
747
|
+
handler = LocalMediaHandler(self.provider, self.model_capabilities, model_name=model_name)
|
|
748
|
+
self.logger.debug(f"Using LocalMediaHandler for model: {model_name}")
|
|
749
|
+
|
|
750
|
+
return handler
|
|
751
|
+
|
|
752
|
+
def supports_prompt_cache(self) -> bool:
|
|
753
|
+
"""OpenAI supports prompt caching via `prompt_cache_key` (server-managed)."""
|
|
754
|
+
return True
|
|
755
|
+
|
|
612
756
|
def get_capabilities(self) -> List[str]:
|
|
613
757
|
"""Get list of capabilities supported by this provider"""
|
|
614
758
|
capabilities = [
|
|
@@ -634,7 +778,7 @@ class OpenAIProvider(BaseProvider):
|
|
|
634
778
|
return False
|
|
635
779
|
return True
|
|
636
780
|
|
|
637
|
-
def
|
|
781
|
+
def unload_model(self, model_name: str) -> None:
|
|
638
782
|
"""Close async client if it was created."""
|
|
639
783
|
if self._async_client is not None:
|
|
640
784
|
import asyncio
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenRouter provider (OpenAI-compatible API).
|
|
3
|
+
|
|
4
|
+
OpenRouter exposes an OpenAI-compatible API at `https://openrouter.ai/api/v1`.
|
|
5
|
+
This provider subclasses `OpenAICompatibleProvider` and adds:
|
|
6
|
+
- API key support via `OPENROUTER_API_KEY` (or AbstractCore config fallback)
|
|
7
|
+
- Optional OpenRouter metadata headers (`HTTP-Referer`, `X-Title`)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
from typing import Optional, Dict
|
|
12
|
+
|
|
13
|
+
from .openai_compatible_provider import OpenAICompatibleProvider
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class OpenRouterProvider(OpenAICompatibleProvider):
|
|
17
|
+
"""OpenRouter provider using OpenAI-compatible API."""
|
|
18
|
+
|
|
19
|
+
PROVIDER_ID = "openrouter"
|
|
20
|
+
PROVIDER_DISPLAY_NAME = "OpenRouter"
|
|
21
|
+
BASE_URL_ENV_VAR = "OPENROUTER_BASE_URL"
|
|
22
|
+
API_KEY_ENV_VAR = "OPENROUTER_API_KEY"
|
|
23
|
+
DEFAULT_BASE_URL = "https://openrouter.ai/api/v1"
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
model: str = "openai/gpt-4o-mini",
|
|
28
|
+
base_url: Optional[str] = None,
|
|
29
|
+
api_key: Optional[str] = None,
|
|
30
|
+
**kwargs,
|
|
31
|
+
):
|
|
32
|
+
super().__init__(model=model, base_url=base_url, api_key=api_key, **kwargs)
|
|
33
|
+
|
|
34
|
+
if not self._has_api_key():
|
|
35
|
+
raise ValueError(
|
|
36
|
+
"OpenRouter API key required. Set OPENROUTER_API_KEY or configure via "
|
|
37
|
+
"`abstractcore --set-api-key openrouter <key>`."
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def _has_api_key(self) -> bool:
|
|
41
|
+
if self.api_key is None:
|
|
42
|
+
return False
|
|
43
|
+
key = str(self.api_key).strip()
|
|
44
|
+
if not key:
|
|
45
|
+
return False
|
|
46
|
+
return key.upper() != "EMPTY"
|
|
47
|
+
|
|
48
|
+
def _get_api_key_from_config(self) -> Optional[str]:
|
|
49
|
+
try:
|
|
50
|
+
from ..config.manager import get_config_manager
|
|
51
|
+
|
|
52
|
+
cfg = get_config_manager()
|
|
53
|
+
return getattr(cfg.config.api_keys, "openrouter", None)
|
|
54
|
+
except Exception:
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
def _validate_model(self):
|
|
58
|
+
# Avoid unauthenticated network calls on init; OpenRouter generally requires a key.
|
|
59
|
+
if not self._has_api_key():
|
|
60
|
+
return
|
|
61
|
+
return super()._validate_model()
|
|
62
|
+
|
|
63
|
+
def _get_headers(self) -> Dict[str, str]:
|
|
64
|
+
headers = super()._get_headers()
|
|
65
|
+
|
|
66
|
+
# OpenRouter recommends sending these for better analytics / abuse prevention.
|
|
67
|
+
site_url = os.getenv("OPENROUTER_SITE_URL")
|
|
68
|
+
if isinstance(site_url, str) and site_url.strip():
|
|
69
|
+
headers["HTTP-Referer"] = site_url.strip()
|
|
70
|
+
|
|
71
|
+
app_name = os.getenv("OPENROUTER_APP_NAME")
|
|
72
|
+
if isinstance(app_name, str) and app_name.strip():
|
|
73
|
+
headers["X-Title"] = app_name.strip()
|
|
74
|
+
|
|
75
|
+
return headers
|
|
76
|
+
|
|
@@ -6,6 +6,7 @@ eliminating the need for manual synchronization across factory.py, server/app.py
|
|
|
6
6
|
and __init__.py files.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
+
import os
|
|
9
10
|
from typing import List, Dict, Any, Optional, Type, Callable
|
|
10
11
|
from dataclasses import dataclass, field
|
|
11
12
|
from abc import ABC
|
|
@@ -49,6 +50,30 @@ class ProviderRegistry:
|
|
|
49
50
|
self._logger = get_logger("ProviderRegistry")
|
|
50
51
|
self._register_all_providers()
|
|
51
52
|
|
|
53
|
+
def _has_configured_base_url(self, provider_name: str, *, env_var: str) -> bool:
|
|
54
|
+
"""Return True if a provider has an explicit base_url configured.
|
|
55
|
+
|
|
56
|
+
We intentionally treat providers like `openai-compatible` / `vllm` as *opt-in* for
|
|
57
|
+
model probing: their defaults (8080/8000) are not universally valid and can cause
|
|
58
|
+
noisy timeouts in multi-service dev environments.
|
|
59
|
+
"""
|
|
60
|
+
if isinstance(env_var, str) and env_var.strip():
|
|
61
|
+
env_val = os.getenv(env_var)
|
|
62
|
+
if isinstance(env_val, str) and env_val.strip():
|
|
63
|
+
return True
|
|
64
|
+
|
|
65
|
+
try:
|
|
66
|
+
from ..config import get_provider_config
|
|
67
|
+
|
|
68
|
+
cfg = get_provider_config(provider_name)
|
|
69
|
+
base_url = cfg.get("base_url")
|
|
70
|
+
if isinstance(base_url, str) and base_url.strip():
|
|
71
|
+
return True
|
|
72
|
+
except Exception:
|
|
73
|
+
pass
|
|
74
|
+
|
|
75
|
+
return False
|
|
76
|
+
|
|
52
77
|
def _register_all_providers(self):
|
|
53
78
|
"""Register all available providers with their metadata."""
|
|
54
79
|
|
|
@@ -71,8 +96,8 @@ class ProviderRegistry:
|
|
|
71
96
|
name="anthropic",
|
|
72
97
|
display_name="Anthropic",
|
|
73
98
|
provider_class=None,
|
|
74
|
-
description="Commercial API with Claude
|
|
75
|
-
default_model="claude-
|
|
99
|
+
description="Commercial API with Claude models",
|
|
100
|
+
default_model="claude-haiku-4-5",
|
|
76
101
|
supported_features=["chat", "completion", "native_tools", "streaming", "structured_output"],
|
|
77
102
|
authentication_required=True,
|
|
78
103
|
local_provider=False,
|
|
@@ -166,6 +191,20 @@ class ProviderRegistry:
|
|
|
166
191
|
import_path="..providers.openai_compatible_provider"
|
|
167
192
|
))
|
|
168
193
|
|
|
194
|
+
# OpenRouter Provider
|
|
195
|
+
self.register_provider(ProviderInfo(
|
|
196
|
+
name="openrouter",
|
|
197
|
+
display_name="OpenRouter",
|
|
198
|
+
provider_class=None,
|
|
199
|
+
description="OpenAI-compatible aggregator API (multi-provider routing and unified billing)",
|
|
200
|
+
default_model="openai/gpt-4o-mini",
|
|
201
|
+
supported_features=["chat", "completion", "embeddings", "native_tools", "streaming", "structured_output"],
|
|
202
|
+
authentication_required=True,
|
|
203
|
+
local_provider=False,
|
|
204
|
+
installation_extras=None,
|
|
205
|
+
import_path="..providers.openrouter_provider"
|
|
206
|
+
))
|
|
207
|
+
|
|
169
208
|
|
|
170
209
|
def register_provider(self, provider_info: ProviderInfo):
|
|
171
210
|
"""Register a provider in the registry."""
|
|
@@ -223,13 +262,16 @@ class ProviderRegistry:
|
|
|
223
262
|
elif provider_info.name == "openai-compatible":
|
|
224
263
|
from ..providers.openai_compatible_provider import OpenAICompatibleProvider
|
|
225
264
|
return OpenAICompatibleProvider
|
|
265
|
+
elif provider_info.name == "openrouter":
|
|
266
|
+
from ..providers.openrouter_provider import OpenRouterProvider
|
|
267
|
+
return OpenRouterProvider
|
|
226
268
|
else:
|
|
227
269
|
raise ImportError(f"No import logic for provider: {provider_info.name}")
|
|
228
270
|
except ImportError as e:
|
|
229
271
|
self._logger.warning(f"Failed to load provider {provider_info.name}: {e}")
|
|
230
272
|
raise ImportError(
|
|
231
273
|
f"{provider_info.display_name} dependencies not installed. "
|
|
232
|
-
f"Install with: pip install abstractcore[{provider_info.installation_extras}]"
|
|
274
|
+
f"Install with: pip install \"abstractcore[{provider_info.installation_extras}]\""
|
|
233
275
|
) from e
|
|
234
276
|
|
|
235
277
|
def get_available_models(self, provider_name: str, **kwargs) -> List[str]:
|
|
@@ -248,6 +290,20 @@ class ProviderRegistry:
|
|
|
248
290
|
List of available model names, optionally filtered by capabilities
|
|
249
291
|
"""
|
|
250
292
|
try:
|
|
293
|
+
provider_name_norm = str(provider_name or "").strip().lower()
|
|
294
|
+
base_url_override = kwargs.get("base_url")
|
|
295
|
+
|
|
296
|
+
# Avoid probing generic OpenAI-compatible endpoints unless explicitly configured.
|
|
297
|
+
# (Default ports like :8080/:8000 are often wrong in real setups.)
|
|
298
|
+
if provider_name_norm == "openai-compatible" and not (
|
|
299
|
+
isinstance(base_url_override, str) and base_url_override.strip()
|
|
300
|
+
):
|
|
301
|
+
if not self._has_configured_base_url(provider_name_norm, env_var="OPENAI_COMPATIBLE_BASE_URL"):
|
|
302
|
+
return []
|
|
303
|
+
if provider_name_norm == "vllm" and not (isinstance(base_url_override, str) and base_url_override.strip()):
|
|
304
|
+
if not self._has_configured_base_url(provider_name_norm, env_var="VLLM_BASE_URL"):
|
|
305
|
+
return []
|
|
306
|
+
|
|
251
307
|
provider_class = self.get_provider_class(provider_name)
|
|
252
308
|
|
|
253
309
|
# Handle providers that need instance for model listing
|
|
@@ -413,7 +469,7 @@ class ProviderRegistry:
|
|
|
413
469
|
if provider_info.installation_extras:
|
|
414
470
|
raise ImportError(
|
|
415
471
|
f"{provider_info.display_name} dependencies not installed. "
|
|
416
|
-
f"Install with: pip install abstractcore[{provider_info.installation_extras}]"
|
|
472
|
+
f"Install with: pip install \"abstractcore[{provider_info.installation_extras}]\""
|
|
417
473
|
) from e
|
|
418
474
|
else:
|
|
419
475
|
raise ImportError(f"{provider_info.display_name} provider not available") from e
|
|
@@ -503,4 +559,4 @@ def get_available_models_for_provider(provider_name: str, **kwargs) -> List[str]
|
|
|
503
559
|
Returns:
|
|
504
560
|
List of available model names, optionally filtered by capabilities
|
|
505
561
|
"""
|
|
506
|
-
return get_provider_registry().get_available_models(provider_name, **kwargs)
|
|
562
|
+
return get_provider_registry().get_available_models(provider_name, **kwargs)
|