abstractcore 2.5.2__py3-none-any.whl → 2.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/__init__.py +12 -0
- abstractcore/architectures/detection.py +250 -4
- abstractcore/assets/architecture_formats.json +14 -1
- abstractcore/assets/model_capabilities.json +533 -10
- abstractcore/compression/__init__.py +29 -0
- abstractcore/compression/analytics.py +420 -0
- abstractcore/compression/cache.py +250 -0
- abstractcore/compression/config.py +279 -0
- abstractcore/compression/exceptions.py +30 -0
- abstractcore/compression/glyph_processor.py +381 -0
- abstractcore/compression/optimizer.py +388 -0
- abstractcore/compression/orchestrator.py +380 -0
- abstractcore/compression/pil_text_renderer.py +818 -0
- abstractcore/compression/quality.py +226 -0
- abstractcore/compression/text_formatter.py +666 -0
- abstractcore/compression/vision_compressor.py +371 -0
- abstractcore/config/main.py +64 -0
- abstractcore/config/manager.py +100 -5
- abstractcore/core/session.py +61 -6
- abstractcore/events/__init__.py +1 -1
- abstractcore/media/auto_handler.py +312 -18
- abstractcore/media/handlers/local_handler.py +14 -2
- abstractcore/media/handlers/openai_handler.py +62 -3
- abstractcore/media/processors/__init__.py +11 -1
- abstractcore/media/processors/direct_pdf_processor.py +210 -0
- abstractcore/media/processors/glyph_pdf_processor.py +227 -0
- abstractcore/media/processors/image_processor.py +7 -1
- abstractcore/media/processors/text_processor.py +18 -3
- abstractcore/media/types.py +164 -7
- abstractcore/providers/__init__.py +18 -0
- abstractcore/providers/anthropic_provider.py +28 -2
- abstractcore/providers/base.py +278 -6
- abstractcore/providers/huggingface_provider.py +563 -23
- abstractcore/providers/lmstudio_provider.py +38 -2
- abstractcore/providers/mlx_provider.py +27 -2
- abstractcore/providers/model_capabilities.py +352 -0
- abstractcore/providers/ollama_provider.py +38 -4
- abstractcore/providers/openai_provider.py +28 -2
- abstractcore/providers/registry.py +85 -13
- abstractcore/server/app.py +91 -81
- abstractcore/utils/__init__.py +4 -1
- abstractcore/utils/trace_export.py +287 -0
- abstractcore/utils/version.py +1 -1
- abstractcore/utils/vlm_token_calculator.py +655 -0
- {abstractcore-2.5.2.dist-info → abstractcore-2.5.3.dist-info}/METADATA +107 -6
- {abstractcore-2.5.2.dist-info → abstractcore-2.5.3.dist-info}/RECORD +50 -33
- {abstractcore-2.5.2.dist-info → abstractcore-2.5.3.dist-info}/WHEEL +0 -0
- {abstractcore-2.5.2.dist-info → abstractcore-2.5.3.dist-info}/entry_points.txt +0 -0
- {abstractcore-2.5.2.dist-info → abstractcore-2.5.3.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.5.2.dist-info → abstractcore-2.5.3.dist-info}/top_level.txt +0 -0
|
@@ -202,6 +202,15 @@ class LMStudioProvider(BaseProvider):
|
|
|
202
202
|
"max_tokens": max_output_tokens, # LMStudio uses max_tokens for output tokens
|
|
203
203
|
"top_p": kwargs.get("top_p", 0.9),
|
|
204
204
|
}
|
|
205
|
+
|
|
206
|
+
# Add additional generation parameters if provided (OpenAI-compatible)
|
|
207
|
+
if "frequency_penalty" in kwargs:
|
|
208
|
+
payload["frequency_penalty"] = kwargs["frequency_penalty"]
|
|
209
|
+
if "presence_penalty" in kwargs:
|
|
210
|
+
payload["presence_penalty"] = kwargs["presence_penalty"]
|
|
211
|
+
if "repetition_penalty" in kwargs:
|
|
212
|
+
# Some models support repetition_penalty directly
|
|
213
|
+
payload["repetition_penalty"] = kwargs["repetition_penalty"]
|
|
205
214
|
|
|
206
215
|
# Add seed if provided (LMStudio supports seed via OpenAI-compatible API)
|
|
207
216
|
seed_value = kwargs.get("seed", self.seed)
|
|
@@ -426,8 +435,21 @@ class LMStudioProvider(BaseProvider):
|
|
|
426
435
|
return handler
|
|
427
436
|
|
|
428
437
|
def list_available_models(self, **kwargs) -> List[str]:
|
|
429
|
-
"""
|
|
438
|
+
"""
|
|
439
|
+
List available models from LMStudio server.
|
|
440
|
+
|
|
441
|
+
Args:
|
|
442
|
+
**kwargs: Optional parameters including:
|
|
443
|
+
- base_url: LMStudio server URL
|
|
444
|
+
- input_capabilities: List of ModelInputCapability enums to filter by input capability
|
|
445
|
+
- output_capabilities: List of ModelOutputCapability enums to filter by output capability
|
|
446
|
+
|
|
447
|
+
Returns:
|
|
448
|
+
List of model names, optionally filtered by capabilities
|
|
449
|
+
"""
|
|
430
450
|
try:
|
|
451
|
+
from .model_capabilities import filter_models_by_capabilities
|
|
452
|
+
|
|
431
453
|
# Use provided base_url or fall back to instance base_url
|
|
432
454
|
base_url = kwargs.get('base_url', self.base_url)
|
|
433
455
|
|
|
@@ -435,7 +457,21 @@ class LMStudioProvider(BaseProvider):
|
|
|
435
457
|
if response.status_code == 200:
|
|
436
458
|
data = response.json()
|
|
437
459
|
models = [model["id"] for model in data.get("data", [])]
|
|
438
|
-
|
|
460
|
+
models = sorted(models)
|
|
461
|
+
|
|
462
|
+
# Apply new capability filtering if provided
|
|
463
|
+
input_capabilities = kwargs.get('input_capabilities')
|
|
464
|
+
output_capabilities = kwargs.get('output_capabilities')
|
|
465
|
+
|
|
466
|
+
if input_capabilities or output_capabilities:
|
|
467
|
+
models = filter_models_by_capabilities(
|
|
468
|
+
models,
|
|
469
|
+
input_capabilities=input_capabilities,
|
|
470
|
+
output_capabilities=output_capabilities
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
return models
|
|
439
475
|
else:
|
|
440
476
|
self.logger.warning(f"LMStudio API returned status {response.status_code}")
|
|
441
477
|
return []
|
|
@@ -494,8 +494,19 @@ class MLXProvider(BaseProvider):
|
|
|
494
494
|
|
|
495
495
|
@classmethod
|
|
496
496
|
def list_available_models(cls, **kwargs) -> List[str]:
|
|
497
|
-
"""
|
|
497
|
+
"""
|
|
498
|
+
List available MLX models from HuggingFace cache.
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
**kwargs: Optional parameters including:
|
|
502
|
+
- input_capabilities: List of ModelInputCapability enums to filter by input capability
|
|
503
|
+
- output_capabilities: List of ModelOutputCapability enums to filter by output capability
|
|
504
|
+
|
|
505
|
+
Returns:
|
|
506
|
+
List of model names, optionally filtered by capabilities
|
|
507
|
+
"""
|
|
498
508
|
from pathlib import Path
|
|
509
|
+
from .model_capabilities import filter_models_by_capabilities
|
|
499
510
|
|
|
500
511
|
try:
|
|
501
512
|
hf_cache = Path.home() / ".cache" / "huggingface" / "hub"
|
|
@@ -513,7 +524,21 @@ class MLXProvider(BaseProvider):
|
|
|
513
524
|
if "mlx" in model_name.lower():
|
|
514
525
|
models.append(model_name)
|
|
515
526
|
|
|
516
|
-
|
|
527
|
+
models = sorted(models)
|
|
528
|
+
|
|
529
|
+
# Apply new capability filtering if provided
|
|
530
|
+
input_capabilities = kwargs.get('input_capabilities')
|
|
531
|
+
output_capabilities = kwargs.get('output_capabilities')
|
|
532
|
+
|
|
533
|
+
if input_capabilities or output_capabilities:
|
|
534
|
+
models = filter_models_by_capabilities(
|
|
535
|
+
models,
|
|
536
|
+
input_capabilities=input_capabilities,
|
|
537
|
+
output_capabilities=output_capabilities
|
|
538
|
+
)
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
return models
|
|
517
542
|
|
|
518
543
|
except Exception:
|
|
519
544
|
return []
|
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model capability definitions for input and output filtering.
|
|
3
|
+
|
|
4
|
+
This module provides clear enums for filtering models based on what types of
|
|
5
|
+
input they can process and what types of output they can generate.
|
|
6
|
+
|
|
7
|
+
Key Concepts:
|
|
8
|
+
- Input Capabilities: What data types can the model accept and analyze?
|
|
9
|
+
- Output Capabilities: What data types can the model generate?
|
|
10
|
+
|
|
11
|
+
Examples:
|
|
12
|
+
>>> from abstractcore.providers.model_capabilities import ModelInputCapability, ModelOutputCapability
|
|
13
|
+
>>> from abstractcore.providers import OllamaProvider
|
|
14
|
+
>>>
|
|
15
|
+
>>> # Get models that can analyze images
|
|
16
|
+
>>> vision_models = OllamaProvider.list_available_models(
|
|
17
|
+
... input_capabilities=[ModelInputCapability.IMAGE]
|
|
18
|
+
... )
|
|
19
|
+
>>>
|
|
20
|
+
>>> # Get embedding models
|
|
21
|
+
>>> embedding_models = OllamaProvider.list_available_models(
|
|
22
|
+
... output_capabilities=[ModelOutputCapability.EMBEDDINGS]
|
|
23
|
+
... )
|
|
24
|
+
>>>
|
|
25
|
+
>>> # Get vision models that generate text (most common case)
|
|
26
|
+
>>> vision_text_models = OllamaProvider.list_available_models(
|
|
27
|
+
... input_capabilities=[ModelInputCapability.TEXT, ModelInputCapability.IMAGE],
|
|
28
|
+
... output_capabilities=[ModelOutputCapability.TEXT]
|
|
29
|
+
... )
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
from enum import Enum
|
|
33
|
+
from typing import List, Set, Optional, Dict, Any
|
|
34
|
+
from ..architectures.detection import get_model_capabilities
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ModelInputCapability(Enum):
|
|
38
|
+
"""
|
|
39
|
+
Enumeration of input data types that models can process and analyze.
|
|
40
|
+
|
|
41
|
+
These capabilities define what types of input data a model can accept
|
|
42
|
+
and understand. Most multimodal models support TEXT plus one or more
|
|
43
|
+
additional input types.
|
|
44
|
+
|
|
45
|
+
Values:
|
|
46
|
+
TEXT: Model can process text input (all models support this)
|
|
47
|
+
IMAGE: Model can analyze and understand images (vision models)
|
|
48
|
+
AUDIO: Model can process and analyze audio input
|
|
49
|
+
VIDEO: Model can analyze video content
|
|
50
|
+
|
|
51
|
+
Examples:
|
|
52
|
+
>>> # Text-only model
|
|
53
|
+
>>> text_only = [ModelInputCapability.TEXT]
|
|
54
|
+
>>>
|
|
55
|
+
>>> # Vision model (supports both text and images)
|
|
56
|
+
>>> vision_model = [ModelInputCapability.TEXT, ModelInputCapability.IMAGE]
|
|
57
|
+
>>>
|
|
58
|
+
>>> # Audio model (supports both text and audio)
|
|
59
|
+
>>> audio_model = [ModelInputCapability.TEXT, ModelInputCapability.AUDIO]
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
TEXT = "text"
|
|
63
|
+
"""Model can process and understand text input (supported by all models)"""
|
|
64
|
+
|
|
65
|
+
IMAGE = "image"
|
|
66
|
+
"""Model can analyze and understand image input (vision models)"""
|
|
67
|
+
|
|
68
|
+
AUDIO = "audio"
|
|
69
|
+
"""Model can process and analyze audio input"""
|
|
70
|
+
|
|
71
|
+
VIDEO = "video"
|
|
72
|
+
"""Model can analyze and understand video input"""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class ModelOutputCapability(Enum):
|
|
76
|
+
"""
|
|
77
|
+
Enumeration of output data types that models can generate.
|
|
78
|
+
|
|
79
|
+
These capabilities define what types of output a model can produce.
|
|
80
|
+
Currently, AbstractCore supports text generation and embedding generation.
|
|
81
|
+
|
|
82
|
+
Values:
|
|
83
|
+
TEXT: Model generates text responses (most common)
|
|
84
|
+
EMBEDDINGS: Model generates vector embeddings (embedding models)
|
|
85
|
+
|
|
86
|
+
Examples:
|
|
87
|
+
>>> # Regular chat/completion model
|
|
88
|
+
>>> text_model = [ModelOutputCapability.TEXT]
|
|
89
|
+
>>>
|
|
90
|
+
>>> # Embedding model
|
|
91
|
+
>>> embedding_model = [ModelOutputCapability.EMBEDDINGS]
|
|
92
|
+
|
|
93
|
+
Note:
|
|
94
|
+
Future versions may include IMAGE, AUDIO, VIDEO for generative models.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
TEXT = "text"
|
|
98
|
+
"""Model generates text responses (chat, completion, etc.)"""
|
|
99
|
+
|
|
100
|
+
EMBEDDINGS = "embeddings"
|
|
101
|
+
"""Model generates vector embeddings for semantic search/similarity"""
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def get_model_input_capabilities(model_name: str) -> List[ModelInputCapability]:
|
|
105
|
+
"""
|
|
106
|
+
Determine what input capabilities a model supports.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
model_name: Name of the model to check
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
List of input capabilities the model supports
|
|
113
|
+
|
|
114
|
+
Examples:
|
|
115
|
+
>>> caps = get_model_input_capabilities("gpt-4-vision-preview")
|
|
116
|
+
>>> print(caps)
|
|
117
|
+
[<ModelInputCapability.TEXT: 'text'>, <ModelInputCapability.IMAGE: 'image'>]
|
|
118
|
+
|
|
119
|
+
>>> caps = get_model_input_capabilities("gpt-4")
|
|
120
|
+
>>> print(caps)
|
|
121
|
+
[<ModelInputCapability.TEXT: 'text'>]
|
|
122
|
+
"""
|
|
123
|
+
try:
|
|
124
|
+
capabilities = get_model_capabilities(model_name)
|
|
125
|
+
except Exception:
|
|
126
|
+
# If we can't get capabilities, assume text-only
|
|
127
|
+
return [ModelInputCapability.TEXT]
|
|
128
|
+
|
|
129
|
+
input_caps = [ModelInputCapability.TEXT] # All models support text
|
|
130
|
+
|
|
131
|
+
if capabilities.get("vision_support", False):
|
|
132
|
+
input_caps.append(ModelInputCapability.IMAGE)
|
|
133
|
+
|
|
134
|
+
if capabilities.get("audio_support", False):
|
|
135
|
+
input_caps.append(ModelInputCapability.AUDIO)
|
|
136
|
+
|
|
137
|
+
if capabilities.get("video_support", False):
|
|
138
|
+
input_caps.append(ModelInputCapability.VIDEO)
|
|
139
|
+
|
|
140
|
+
return input_caps
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def get_model_output_capabilities(model_name: str) -> List[ModelOutputCapability]:
|
|
144
|
+
"""
|
|
145
|
+
Determine what output capabilities a model supports.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
model_name: Name of the model to check
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
List of output capabilities the model supports
|
|
152
|
+
|
|
153
|
+
Examples:
|
|
154
|
+
>>> caps = get_model_output_capabilities("gpt-4")
|
|
155
|
+
>>> print(caps)
|
|
156
|
+
[<ModelOutputCapability.TEXT: 'text'>]
|
|
157
|
+
|
|
158
|
+
>>> caps = get_model_output_capabilities("text-embedding-3-small")
|
|
159
|
+
>>> print(caps)
|
|
160
|
+
[<ModelOutputCapability.EMBEDDINGS: 'embeddings'>]
|
|
161
|
+
"""
|
|
162
|
+
try:
|
|
163
|
+
capabilities = get_model_capabilities(model_name)
|
|
164
|
+
except Exception:
|
|
165
|
+
# If we can't get capabilities, assume text generation
|
|
166
|
+
return [ModelOutputCapability.TEXT]
|
|
167
|
+
|
|
168
|
+
# Check if it's explicitly marked as an embedding model
|
|
169
|
+
if capabilities.get("model_type") == "embedding":
|
|
170
|
+
return [ModelOutputCapability.EMBEDDINGS]
|
|
171
|
+
|
|
172
|
+
# Check for embedding model name patterns
|
|
173
|
+
model_lower = model_name.lower()
|
|
174
|
+
embedding_patterns = [
|
|
175
|
+
"embedding", "embed", "embeddings",
|
|
176
|
+
"text-embedding", "sentence-transformer",
|
|
177
|
+
"all-minilm", "nomic-embed", "granite-embedding",
|
|
178
|
+
"qwen3-embedding", "embeddinggemma"
|
|
179
|
+
]
|
|
180
|
+
|
|
181
|
+
if any(pattern in model_lower for pattern in embedding_patterns):
|
|
182
|
+
return [ModelOutputCapability.EMBEDDINGS]
|
|
183
|
+
|
|
184
|
+
# Default to text generation
|
|
185
|
+
return [ModelOutputCapability.TEXT]
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def model_matches_input_capabilities(
|
|
189
|
+
model_name: str,
|
|
190
|
+
required_capabilities: List[ModelInputCapability]
|
|
191
|
+
) -> bool:
|
|
192
|
+
"""
|
|
193
|
+
Check if a model supports all required input capabilities.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
model_name: Name of the model to check
|
|
197
|
+
required_capabilities: List of required input capabilities
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
True if model supports all required capabilities, False otherwise
|
|
201
|
+
|
|
202
|
+
Examples:
|
|
203
|
+
>>> # Check if model supports both text and image input
|
|
204
|
+
>>> required = [ModelInputCapability.TEXT, ModelInputCapability.IMAGE]
|
|
205
|
+
>>> model_matches_input_capabilities("gpt-4-vision-preview", required)
|
|
206
|
+
True
|
|
207
|
+
|
|
208
|
+
>>> model_matches_input_capabilities("gpt-4", required)
|
|
209
|
+
False
|
|
210
|
+
"""
|
|
211
|
+
if not required_capabilities:
|
|
212
|
+
return True
|
|
213
|
+
|
|
214
|
+
model_caps = get_model_input_capabilities(model_name)
|
|
215
|
+
model_caps_set = set(model_caps)
|
|
216
|
+
required_set = set(required_capabilities)
|
|
217
|
+
|
|
218
|
+
return required_set.issubset(model_caps_set)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def model_matches_output_capabilities(
|
|
222
|
+
model_name: str,
|
|
223
|
+
required_capabilities: List[ModelOutputCapability]
|
|
224
|
+
) -> bool:
|
|
225
|
+
"""
|
|
226
|
+
Check if a model supports all required output capabilities.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
model_name: Name of the model to check
|
|
230
|
+
required_capabilities: List of required output capabilities
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
True if model supports all required capabilities, False otherwise
|
|
234
|
+
|
|
235
|
+
Examples:
|
|
236
|
+
>>> # Check if model generates text
|
|
237
|
+
>>> required = [ModelOutputCapability.TEXT]
|
|
238
|
+
>>> model_matches_output_capabilities("gpt-4", required)
|
|
239
|
+
True
|
|
240
|
+
|
|
241
|
+
>>> # Check if model generates embeddings
|
|
242
|
+
>>> required = [ModelOutputCapability.EMBEDDINGS]
|
|
243
|
+
>>> model_matches_output_capabilities("text-embedding-3-small", required)
|
|
244
|
+
True
|
|
245
|
+
>>> model_matches_output_capabilities("gpt-4", required)
|
|
246
|
+
False
|
|
247
|
+
"""
|
|
248
|
+
if not required_capabilities:
|
|
249
|
+
return True
|
|
250
|
+
|
|
251
|
+
model_caps = get_model_output_capabilities(model_name)
|
|
252
|
+
model_caps_set = set(model_caps)
|
|
253
|
+
required_set = set(required_capabilities)
|
|
254
|
+
|
|
255
|
+
return required_set.issubset(model_caps_set)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def filter_models_by_capabilities(
|
|
259
|
+
models: List[str],
|
|
260
|
+
input_capabilities: Optional[List[ModelInputCapability]] = None,
|
|
261
|
+
output_capabilities: Optional[List[ModelOutputCapability]] = None
|
|
262
|
+
) -> List[str]:
|
|
263
|
+
"""
|
|
264
|
+
Filter a list of models based on input and output capability requirements.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
models: List of model names to filter
|
|
268
|
+
input_capabilities: Required input capabilities (None = no filtering)
|
|
269
|
+
output_capabilities: Required output capabilities (None = no filtering)
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Filtered list of model names that match all requirements
|
|
273
|
+
|
|
274
|
+
Examples:
|
|
275
|
+
>>> models = ["gpt-4", "gpt-4-vision-preview", "text-embedding-3-small"]
|
|
276
|
+
>>>
|
|
277
|
+
>>> # Get vision models
|
|
278
|
+
>>> vision_models = filter_models_by_capabilities(
|
|
279
|
+
... models,
|
|
280
|
+
... input_capabilities=[ModelInputCapability.IMAGE]
|
|
281
|
+
... )
|
|
282
|
+
>>> print(vision_models)
|
|
283
|
+
['gpt-4-vision-preview']
|
|
284
|
+
>>>
|
|
285
|
+
>>> # Get embedding models
|
|
286
|
+
>>> embedding_models = filter_models_by_capabilities(
|
|
287
|
+
... models,
|
|
288
|
+
... output_capabilities=[ModelOutputCapability.EMBEDDINGS]
|
|
289
|
+
... )
|
|
290
|
+
>>> print(embedding_models)
|
|
291
|
+
['text-embedding-3-small']
|
|
292
|
+
>>>
|
|
293
|
+
>>> # Get text generation models
|
|
294
|
+
>>> text_models = filter_models_by_capabilities(
|
|
295
|
+
... models,
|
|
296
|
+
... output_capabilities=[ModelOutputCapability.TEXT]
|
|
297
|
+
... )
|
|
298
|
+
>>> print(text_models)
|
|
299
|
+
['gpt-4', 'gpt-4-vision-preview']
|
|
300
|
+
"""
|
|
301
|
+
filtered_models = []
|
|
302
|
+
|
|
303
|
+
for model_name in models:
|
|
304
|
+
try:
|
|
305
|
+
# Check input capabilities
|
|
306
|
+
if input_capabilities and not model_matches_input_capabilities(model_name, input_capabilities):
|
|
307
|
+
continue
|
|
308
|
+
|
|
309
|
+
# Check output capabilities
|
|
310
|
+
if output_capabilities and not model_matches_output_capabilities(model_name, output_capabilities):
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
filtered_models.append(model_name)
|
|
314
|
+
except Exception:
|
|
315
|
+
# If we can't get capabilities, skip this model
|
|
316
|
+
# (it likely doesn't have an entry in model_capabilities.json)
|
|
317
|
+
continue
|
|
318
|
+
|
|
319
|
+
return filtered_models
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
def get_capability_summary(model_name: str) -> Dict[str, Any]:
|
|
323
|
+
"""
|
|
324
|
+
Get a comprehensive summary of a model's input and output capabilities.
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
model_name: Name of the model to analyze
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
Dictionary containing input and output capabilities
|
|
331
|
+
|
|
332
|
+
Examples:
|
|
333
|
+
>>> summary = get_capability_summary("gpt-4-vision-preview")
|
|
334
|
+
>>> print(summary)
|
|
335
|
+
{
|
|
336
|
+
'model_name': 'gpt-4-vision-preview',
|
|
337
|
+
'input_capabilities': ['text', 'image'],
|
|
338
|
+
'output_capabilities': ['text'],
|
|
339
|
+
'is_multimodal': True,
|
|
340
|
+
'is_embedding_model': False
|
|
341
|
+
}
|
|
342
|
+
"""
|
|
343
|
+
input_caps = get_model_input_capabilities(model_name)
|
|
344
|
+
output_caps = get_model_output_capabilities(model_name)
|
|
345
|
+
|
|
346
|
+
return {
|
|
347
|
+
'model_name': model_name,
|
|
348
|
+
'input_capabilities': [cap.value for cap in input_caps],
|
|
349
|
+
'output_capabilities': [cap.value for cap in output_caps],
|
|
350
|
+
'is_multimodal': len(input_caps) > 1,
|
|
351
|
+
'is_embedding_model': ModelOutputCapability.EMBEDDINGS in output_caps
|
|
352
|
+
}
|
|
@@ -114,6 +114,7 @@ class OllamaProvider(BaseProvider):
|
|
|
114
114
|
media: Optional[List['MediaContent']] = None,
|
|
115
115
|
stream: bool = False,
|
|
116
116
|
response_model: Optional[Type[BaseModel]] = None,
|
|
117
|
+
media_metadata: Optional[List[Dict[str, Any]]] = None,
|
|
117
118
|
**kwargs) -> Union[GenerateResponse, Iterator[GenerateResponse]]:
|
|
118
119
|
"""Internal generation with Ollama"""
|
|
119
120
|
|
|
@@ -224,9 +225,9 @@ class OllamaProvider(BaseProvider):
|
|
|
224
225
|
if stream:
|
|
225
226
|
return self._stream_generate(endpoint, payload, tools, kwargs.get('tool_call_tags'))
|
|
226
227
|
else:
|
|
227
|
-
return self._single_generate(endpoint, payload, tools)
|
|
228
|
+
return self._single_generate(endpoint, payload, tools, media_metadata)
|
|
228
229
|
|
|
229
|
-
def _single_generate(self, endpoint: str, payload: Dict[str, Any], tools: Optional[List[Dict[str, Any]]] = None) -> GenerateResponse:
|
|
230
|
+
def _single_generate(self, endpoint: str, payload: Dict[str, Any], tools: Optional[List[Dict[str, Any]]] = None, media_metadata: Optional[List[Dict[str, Any]]] = None) -> GenerateResponse:
|
|
230
231
|
"""Generate single response"""
|
|
231
232
|
try:
|
|
232
233
|
# Track generation time
|
|
@@ -262,6 +263,12 @@ class OllamaProvider(BaseProvider):
|
|
|
262
263
|
},
|
|
263
264
|
gen_time=gen_time
|
|
264
265
|
)
|
|
266
|
+
|
|
267
|
+
# Attach media metadata if available
|
|
268
|
+
if media_metadata:
|
|
269
|
+
if not generate_response.metadata:
|
|
270
|
+
generate_response.metadata = {}
|
|
271
|
+
generate_response.metadata['media_metadata'] = media_metadata
|
|
265
272
|
|
|
266
273
|
# Execute tools if enabled and tools are present
|
|
267
274
|
if self.execute_tools and tools and self.tool_handler.supports_prompted and content:
|
|
@@ -446,8 +453,21 @@ class OllamaProvider(BaseProvider):
|
|
|
446
453
|
self.client = httpx.Client(timeout=self._timeout)
|
|
447
454
|
|
|
448
455
|
def list_available_models(self, **kwargs) -> List[str]:
|
|
449
|
-
"""
|
|
456
|
+
"""
|
|
457
|
+
List available models from Ollama server.
|
|
458
|
+
|
|
459
|
+
Args:
|
|
460
|
+
**kwargs: Optional parameters including:
|
|
461
|
+
- base_url: Ollama server URL
|
|
462
|
+
- input_capabilities: List of ModelInputCapability enums to filter by input capability
|
|
463
|
+
- output_capabilities: List of ModelOutputCapability enums to filter by output capability
|
|
464
|
+
|
|
465
|
+
Returns:
|
|
466
|
+
List of model names, optionally filtered by capabilities
|
|
467
|
+
"""
|
|
450
468
|
try:
|
|
469
|
+
from .model_capabilities import filter_models_by_capabilities
|
|
470
|
+
|
|
451
471
|
# Use provided base_url or fall back to instance base_url
|
|
452
472
|
base_url = kwargs.get('base_url', self.base_url)
|
|
453
473
|
|
|
@@ -455,7 +475,21 @@ class OllamaProvider(BaseProvider):
|
|
|
455
475
|
if response.status_code == 200:
|
|
456
476
|
data = response.json()
|
|
457
477
|
models = [model["name"] for model in data.get("models", [])]
|
|
458
|
-
|
|
478
|
+
models = sorted(models)
|
|
479
|
+
|
|
480
|
+
# Apply new capability filtering if provided
|
|
481
|
+
input_capabilities = kwargs.get('input_capabilities')
|
|
482
|
+
output_capabilities = kwargs.get('output_capabilities')
|
|
483
|
+
|
|
484
|
+
if input_capabilities or output_capabilities:
|
|
485
|
+
models = filter_models_by_capabilities(
|
|
486
|
+
models,
|
|
487
|
+
input_capabilities=input_capabilities,
|
|
488
|
+
output_capabilities=output_capabilities
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
return models
|
|
459
493
|
else:
|
|
460
494
|
self.logger.warning(f"Ollama API returned status {response.status_code}")
|
|
461
495
|
return []
|
|
@@ -511,9 +511,21 @@ class OpenAIProvider(BaseProvider):
|
|
|
511
511
|
|
|
512
512
|
@classmethod
|
|
513
513
|
def list_available_models(cls, **kwargs) -> List[str]:
|
|
514
|
-
"""
|
|
514
|
+
"""
|
|
515
|
+
List available models from OpenAI API.
|
|
516
|
+
|
|
517
|
+
Args:
|
|
518
|
+
**kwargs: Optional parameters including:
|
|
519
|
+
- api_key: OpenAI API key
|
|
520
|
+
- input_capabilities: List of ModelInputCapability enums to filter by input capability
|
|
521
|
+
- output_capabilities: List of ModelOutputCapability enums to filter by output capability
|
|
522
|
+
|
|
523
|
+
Returns:
|
|
524
|
+
List of model names, optionally filtered by capabilities
|
|
525
|
+
"""
|
|
515
526
|
try:
|
|
516
527
|
import openai
|
|
528
|
+
from .model_capabilities import filter_models_by_capabilities
|
|
517
529
|
|
|
518
530
|
# Get API key from kwargs or environment
|
|
519
531
|
api_key = kwargs.get('api_key') or os.getenv("OPENAI_API_KEY")
|
|
@@ -542,7 +554,21 @@ class OpenAIProvider(BaseProvider):
|
|
|
542
554
|
]):
|
|
543
555
|
chat_models.append(model_id)
|
|
544
556
|
|
|
545
|
-
|
|
557
|
+
chat_models = sorted(chat_models, reverse=True) # Latest models first
|
|
558
|
+
|
|
559
|
+
# Apply new capability filtering if provided
|
|
560
|
+
input_capabilities = kwargs.get('input_capabilities')
|
|
561
|
+
output_capabilities = kwargs.get('output_capabilities')
|
|
562
|
+
|
|
563
|
+
if input_capabilities or output_capabilities:
|
|
564
|
+
chat_models = filter_models_by_capabilities(
|
|
565
|
+
chat_models,
|
|
566
|
+
input_capabilities=input_capabilities,
|
|
567
|
+
output_capabilities=output_capabilities
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
return chat_models
|
|
546
572
|
|
|
547
573
|
except Exception:
|
|
548
574
|
return []
|