abstractcore 2.5.0__py3-none-any.whl → 2.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/__init__.py +12 -0
- abstractcore/apps/__main__.py +8 -1
- abstractcore/apps/deepsearch.py +644 -0
- abstractcore/apps/intent.py +614 -0
- abstractcore/architectures/detection.py +250 -4
- abstractcore/assets/architecture_formats.json +14 -1
- abstractcore/assets/model_capabilities.json +583 -44
- abstractcore/compression/__init__.py +29 -0
- abstractcore/compression/analytics.py +420 -0
- abstractcore/compression/cache.py +250 -0
- abstractcore/compression/config.py +279 -0
- abstractcore/compression/exceptions.py +30 -0
- abstractcore/compression/glyph_processor.py +381 -0
- abstractcore/compression/optimizer.py +388 -0
- abstractcore/compression/orchestrator.py +380 -0
- abstractcore/compression/pil_text_renderer.py +818 -0
- abstractcore/compression/quality.py +226 -0
- abstractcore/compression/text_formatter.py +666 -0
- abstractcore/compression/vision_compressor.py +371 -0
- abstractcore/config/main.py +66 -1
- abstractcore/config/manager.py +111 -5
- abstractcore/core/session.py +105 -5
- abstractcore/events/__init__.py +1 -1
- abstractcore/media/auto_handler.py +312 -18
- abstractcore/media/handlers/local_handler.py +14 -2
- abstractcore/media/handlers/openai_handler.py +62 -3
- abstractcore/media/processors/__init__.py +11 -1
- abstractcore/media/processors/direct_pdf_processor.py +210 -0
- abstractcore/media/processors/glyph_pdf_processor.py +227 -0
- abstractcore/media/processors/image_processor.py +7 -1
- abstractcore/media/processors/text_processor.py +18 -3
- abstractcore/media/types.py +164 -7
- abstractcore/processing/__init__.py +5 -1
- abstractcore/processing/basic_deepsearch.py +2173 -0
- abstractcore/processing/basic_intent.py +690 -0
- abstractcore/providers/__init__.py +18 -0
- abstractcore/providers/anthropic_provider.py +29 -2
- abstractcore/providers/base.py +279 -6
- abstractcore/providers/huggingface_provider.py +658 -27
- abstractcore/providers/lmstudio_provider.py +52 -2
- abstractcore/providers/mlx_provider.py +103 -4
- abstractcore/providers/model_capabilities.py +352 -0
- abstractcore/providers/ollama_provider.py +44 -6
- abstractcore/providers/openai_provider.py +29 -2
- abstractcore/providers/registry.py +91 -19
- abstractcore/server/app.py +91 -81
- abstractcore/structured/handler.py +161 -1
- abstractcore/tools/common_tools.py +98 -3
- abstractcore/utils/__init__.py +4 -1
- abstractcore/utils/cli.py +114 -1
- abstractcore/utils/trace_export.py +287 -0
- abstractcore/utils/version.py +1 -1
- abstractcore/utils/vlm_token_calculator.py +655 -0
- {abstractcore-2.5.0.dist-info → abstractcore-2.5.3.dist-info}/METADATA +140 -23
- abstractcore-2.5.3.dist-info/RECORD +107 -0
- {abstractcore-2.5.0.dist-info → abstractcore-2.5.3.dist-info}/entry_points.txt +4 -0
- abstractcore-2.5.0.dist-info/RECORD +0 -86
- {abstractcore-2.5.0.dist-info → abstractcore-2.5.3.dist-info}/WHEEL +0 -0
- {abstractcore-2.5.0.dist-info → abstractcore-2.5.3.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.5.0.dist-info → abstractcore-2.5.3.dist-info}/top_level.txt +0 -0
|
@@ -22,6 +22,16 @@ from .registry import (
|
|
|
22
22
|
get_available_models_for_provider
|
|
23
23
|
)
|
|
24
24
|
|
|
25
|
+
# Model capability filtering (new system)
|
|
26
|
+
from .model_capabilities import (
|
|
27
|
+
ModelInputCapability,
|
|
28
|
+
ModelOutputCapability,
|
|
29
|
+
get_model_input_capabilities,
|
|
30
|
+
get_model_output_capabilities,
|
|
31
|
+
filter_models_by_capabilities,
|
|
32
|
+
get_capability_summary
|
|
33
|
+
)
|
|
34
|
+
|
|
25
35
|
__all__ = [
|
|
26
36
|
# Provider classes
|
|
27
37
|
'BaseProvider',
|
|
@@ -43,4 +53,12 @@ __all__ = [
|
|
|
43
53
|
'get_all_providers_status',
|
|
44
54
|
'create_provider',
|
|
45
55
|
'get_available_models_for_provider',
|
|
56
|
+
|
|
57
|
+
# Model capability filtering (new system)
|
|
58
|
+
'ModelInputCapability',
|
|
59
|
+
'ModelOutputCapability',
|
|
60
|
+
'get_model_input_capabilities',
|
|
61
|
+
'get_model_output_capabilities',
|
|
62
|
+
'filter_models_by_capabilities',
|
|
63
|
+
'get_capability_summary',
|
|
46
64
|
]
|
|
@@ -32,6 +32,7 @@ class AnthropicProvider(BaseProvider):
|
|
|
32
32
|
|
|
33
33
|
def __init__(self, model: str = "claude-3-haiku-20240307", api_key: Optional[str] = None, **kwargs):
|
|
34
34
|
super().__init__(model, **kwargs)
|
|
35
|
+
self.provider = "anthropic"
|
|
35
36
|
|
|
36
37
|
if not ANTHROPIC_AVAILABLE:
|
|
37
38
|
raise ImportError("Anthropic package not installed. Install with: pip install anthropic")
|
|
@@ -454,9 +455,21 @@ class AnthropicProvider(BaseProvider):
|
|
|
454
455
|
# Create new client with updated timeout
|
|
455
456
|
self.client = anthropic.Anthropic(api_key=self.api_key, timeout=self._timeout)
|
|
456
457
|
def list_available_models(self, **kwargs) -> List[str]:
|
|
457
|
-
"""
|
|
458
|
+
"""
|
|
459
|
+
List available models from Anthropic API.
|
|
460
|
+
|
|
461
|
+
Args:
|
|
462
|
+
**kwargs: Optional parameters including:
|
|
463
|
+
- api_key: Anthropic API key
|
|
464
|
+
- input_capabilities: List of ModelInputCapability enums to filter by input capability
|
|
465
|
+
- output_capabilities: List of ModelOutputCapability enums to filter by output capability
|
|
466
|
+
|
|
467
|
+
Returns:
|
|
468
|
+
List of model names, optionally filtered by capabilities
|
|
469
|
+
"""
|
|
458
470
|
try:
|
|
459
471
|
import httpx
|
|
472
|
+
from .model_capabilities import filter_models_by_capabilities
|
|
460
473
|
|
|
461
474
|
# Use provided API key or instance API key
|
|
462
475
|
api_key = kwargs.get('api_key', self.api_key)
|
|
@@ -480,7 +493,21 @@ class AnthropicProvider(BaseProvider):
|
|
|
480
493
|
data = response.json()
|
|
481
494
|
models = [model["id"] for model in data.get("data", [])]
|
|
482
495
|
self.logger.debug(f"Retrieved {len(models)} models from Anthropic API")
|
|
483
|
-
|
|
496
|
+
models = sorted(models, reverse=True) # Latest models first
|
|
497
|
+
|
|
498
|
+
# Apply new capability filtering if provided
|
|
499
|
+
input_capabilities = kwargs.get('input_capabilities')
|
|
500
|
+
output_capabilities = kwargs.get('output_capabilities')
|
|
501
|
+
|
|
502
|
+
if input_capabilities or output_capabilities:
|
|
503
|
+
models = filter_models_by_capabilities(
|
|
504
|
+
models,
|
|
505
|
+
input_capabilities=input_capabilities,
|
|
506
|
+
output_capabilities=output_capabilities
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
return models
|
|
484
511
|
else:
|
|
485
512
|
self.logger.warning(f"Anthropic API returned status {response.status_code}")
|
|
486
513
|
return []
|
abstractcore/providers/base.py
CHANGED
|
@@ -3,6 +3,8 @@ Base provider with integrated telemetry, events, and exception handling.
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
import time
|
|
6
|
+
import uuid
|
|
7
|
+
from collections import deque
|
|
6
8
|
from typing import List, Dict, Any, Optional, Union, Iterator, Type
|
|
7
9
|
from abc import ABC, abstractmethod
|
|
8
10
|
|
|
@@ -38,6 +40,7 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
38
40
|
|
|
39
41
|
def __init__(self, model: str, **kwargs):
|
|
40
42
|
AbstractCoreInterface.__init__(self, model, **kwargs)
|
|
43
|
+
self.provider = None
|
|
41
44
|
|
|
42
45
|
# Setup structured logging
|
|
43
46
|
self.logger = get_logger(self.__class__.__name__)
|
|
@@ -66,6 +69,13 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
66
69
|
|
|
67
70
|
# Create provider key for circuit breaker tracking
|
|
68
71
|
self.provider_key = f"{self.__class__.__name__}:{self.model}"
|
|
72
|
+
|
|
73
|
+
# Setup Glyph compression configuration
|
|
74
|
+
self.glyph_config = kwargs.get('glyph_config', None)
|
|
75
|
+
|
|
76
|
+
# Setup interaction tracing
|
|
77
|
+
self.enable_tracing = kwargs.get('enable_tracing', False)
|
|
78
|
+
self._traces = deque(maxlen=kwargs.get('max_traces', 100)) # Ring buffer for memory efficiency
|
|
69
79
|
|
|
70
80
|
# Provider created successfully - no event emission needed
|
|
71
81
|
# (The simplified event system focuses on generation and tool events only)
|
|
@@ -172,6 +182,97 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
172
182
|
result_info = f" (result length: {len(str(result))})" if result else ""
|
|
173
183
|
self.logger.info(f"Tool call completed: {tool_name}{result_info}")
|
|
174
184
|
|
|
185
|
+
def _capture_trace(self, prompt: str, messages: Optional[List[Dict[str, str]]],
|
|
186
|
+
system_prompt: Optional[str], tools: Optional[List[Dict[str, Any]]],
|
|
187
|
+
response: GenerateResponse, kwargs: Dict[str, Any]) -> str:
|
|
188
|
+
"""
|
|
189
|
+
Capture interaction trace for observability.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
prompt: Input prompt
|
|
193
|
+
messages: Conversation history
|
|
194
|
+
system_prompt: System prompt
|
|
195
|
+
tools: Available tools
|
|
196
|
+
response: Generated response
|
|
197
|
+
kwargs: Additional generation parameters
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
Trace ID (UUID string)
|
|
201
|
+
"""
|
|
202
|
+
trace_id = str(uuid.uuid4())
|
|
203
|
+
|
|
204
|
+
# Extract generation parameters
|
|
205
|
+
temperature = kwargs.get('temperature', self.temperature)
|
|
206
|
+
max_tokens = kwargs.get('max_tokens', self.max_tokens)
|
|
207
|
+
max_output_tokens = kwargs.get('max_output_tokens', self.max_output_tokens)
|
|
208
|
+
seed = kwargs.get('seed', self.seed)
|
|
209
|
+
top_p = kwargs.get('top_p', getattr(self, 'top_p', None))
|
|
210
|
+
top_k = kwargs.get('top_k', getattr(self, 'top_k', None))
|
|
211
|
+
|
|
212
|
+
# Build parameters dict
|
|
213
|
+
parameters = {
|
|
214
|
+
'temperature': temperature,
|
|
215
|
+
'max_tokens': max_tokens,
|
|
216
|
+
'max_output_tokens': max_output_tokens,
|
|
217
|
+
}
|
|
218
|
+
if seed is not None:
|
|
219
|
+
parameters['seed'] = seed
|
|
220
|
+
if top_p is not None:
|
|
221
|
+
parameters['top_p'] = top_p
|
|
222
|
+
if top_k is not None:
|
|
223
|
+
parameters['top_k'] = top_k
|
|
224
|
+
|
|
225
|
+
# Create trace record
|
|
226
|
+
trace = {
|
|
227
|
+
'trace_id': trace_id,
|
|
228
|
+
'timestamp': datetime.now().isoformat(),
|
|
229
|
+
'provider': self.__class__.__name__,
|
|
230
|
+
'model': self.model,
|
|
231
|
+
'system_prompt': system_prompt,
|
|
232
|
+
'prompt': prompt,
|
|
233
|
+
'messages': messages,
|
|
234
|
+
'tools': tools,
|
|
235
|
+
'parameters': parameters,
|
|
236
|
+
'response': {
|
|
237
|
+
'content': response.content,
|
|
238
|
+
'raw_response': None, # Omit raw_response to save memory and avoid logging sensitive data
|
|
239
|
+
'tool_calls': response.tool_calls,
|
|
240
|
+
'finish_reason': response.finish_reason,
|
|
241
|
+
'usage': response.usage,
|
|
242
|
+
'generation_time_ms': response.gen_time,
|
|
243
|
+
},
|
|
244
|
+
'metadata': kwargs.get('trace_metadata', {})
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
# Store trace in ring buffer
|
|
248
|
+
self._traces.append(trace)
|
|
249
|
+
|
|
250
|
+
return trace_id
|
|
251
|
+
|
|
252
|
+
def get_traces(self, trace_id: Optional[str] = None, last_n: Optional[int] = None) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
|
|
253
|
+
"""
|
|
254
|
+
Retrieve interaction traces.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
trace_id: Optional specific trace ID to retrieve
|
|
258
|
+
last_n: Optional number of most recent traces to retrieve
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Single trace dict if trace_id provided, list of traces otherwise
|
|
262
|
+
"""
|
|
263
|
+
if trace_id:
|
|
264
|
+
# Find specific trace by ID
|
|
265
|
+
for trace in self._traces:
|
|
266
|
+
if trace['trace_id'] == trace_id:
|
|
267
|
+
return trace
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
if last_n:
|
|
271
|
+
# Return last N traces
|
|
272
|
+
return list(self._traces)[-last_n:] if len(self._traces) >= last_n else list(self._traces)
|
|
273
|
+
|
|
274
|
+
# Return all traces
|
|
275
|
+
return list(self._traces)
|
|
175
276
|
|
|
176
277
|
def _handle_api_error(self, error: Exception) -> Exception:
|
|
177
278
|
"""
|
|
@@ -210,6 +311,7 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
210
311
|
retry_strategy=None, # Custom retry strategy for structured output
|
|
211
312
|
tool_call_tags: Optional[str] = None, # Tool call tag rewriting
|
|
212
313
|
execute_tools: Optional[bool] = None, # Tool execution control
|
|
314
|
+
glyph_compression: Optional[str] = None, # Glyph compression preference
|
|
213
315
|
**kwargs) -> Union[GenerateResponse, Iterator[GenerateResponse], BaseModel]:
|
|
214
316
|
"""
|
|
215
317
|
Generate with integrated telemetry and error handling.
|
|
@@ -226,6 +328,7 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
226
328
|
retry_strategy: Optional retry strategy for structured output validation
|
|
227
329
|
tool_call_tags: Optional tool call tag format for rewriting
|
|
228
330
|
execute_tools: Whether to execute tools automatically (True) or let agent handle execution (False)
|
|
331
|
+
glyph_compression: Glyph compression preference ("auto", "always", "never")
|
|
229
332
|
"""
|
|
230
333
|
# Handle structured output request
|
|
231
334
|
if response_model is not None:
|
|
@@ -268,8 +371,17 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
268
371
|
|
|
269
372
|
# Process media content if provided
|
|
270
373
|
processed_media = None
|
|
374
|
+
media_metadata = None
|
|
271
375
|
if media:
|
|
272
|
-
|
|
376
|
+
compression_pref = glyph_compression or kwargs.get('glyph_compression', 'auto')
|
|
377
|
+
processed_media = self._process_media_content(media, compression_pref)
|
|
378
|
+
|
|
379
|
+
# Extract metadata from processed media for response
|
|
380
|
+
if processed_media:
|
|
381
|
+
media_metadata = []
|
|
382
|
+
for media_content in processed_media:
|
|
383
|
+
if hasattr(media_content, 'metadata') and media_content.metadata:
|
|
384
|
+
media_metadata.append(media_content.metadata)
|
|
273
385
|
|
|
274
386
|
# Convert tools to ToolDefinition objects first (outside retry loop)
|
|
275
387
|
converted_tools = None
|
|
@@ -326,6 +438,7 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
326
438
|
stream=stream,
|
|
327
439
|
execute_tools=should_execute_tools,
|
|
328
440
|
tool_call_tags=tool_call_tags,
|
|
441
|
+
media_metadata=media_metadata,
|
|
329
442
|
**kwargs
|
|
330
443
|
)
|
|
331
444
|
|
|
@@ -379,6 +492,26 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
379
492
|
# Apply default qwen3 rewriting for non-streaming responses
|
|
380
493
|
response = self._apply_non_streaming_tag_rewriting(response, tool_call_tags)
|
|
381
494
|
|
|
495
|
+
# Add visual token calculation if media metadata is available
|
|
496
|
+
if media_metadata and response:
|
|
497
|
+
self.logger.debug(f"Enhancing response with visual tokens from {len(media_metadata)} media items")
|
|
498
|
+
response = self._enhance_response_with_visual_tokens(response, media_metadata)
|
|
499
|
+
|
|
500
|
+
# Capture interaction trace if enabled
|
|
501
|
+
if self.enable_tracing and response:
|
|
502
|
+
trace_id = self._capture_trace(
|
|
503
|
+
prompt=prompt,
|
|
504
|
+
messages=messages,
|
|
505
|
+
system_prompt=system_prompt,
|
|
506
|
+
tools=converted_tools,
|
|
507
|
+
response=response,
|
|
508
|
+
kwargs=kwargs
|
|
509
|
+
)
|
|
510
|
+
# Attach trace_id to response metadata
|
|
511
|
+
if not response.metadata:
|
|
512
|
+
response.metadata = {}
|
|
513
|
+
response.metadata['trace_id'] = trace_id
|
|
514
|
+
|
|
382
515
|
self._track_generation(prompt, response, start_time, success=True, stream=False)
|
|
383
516
|
return response
|
|
384
517
|
|
|
@@ -410,6 +543,7 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
410
543
|
stream: bool = False,
|
|
411
544
|
response_model: Optional[Type[BaseModel]] = None,
|
|
412
545
|
execute_tools: Optional[bool] = None,
|
|
546
|
+
media_metadata: Optional[List[Dict[str, Any]]] = None,
|
|
413
547
|
**kwargs) -> Union[GenerateResponse, Iterator[GenerateResponse]]:
|
|
414
548
|
"""
|
|
415
549
|
Internal generation method to be implemented by subclasses.
|
|
@@ -428,6 +562,102 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
428
562
|
"""
|
|
429
563
|
raise NotImplementedError("Subclasses must implement _generate_internal")
|
|
430
564
|
|
|
565
|
+
def _enhance_response_with_visual_tokens(self, response: GenerateResponse, media_metadata: List[Dict[str, Any]]) -> GenerateResponse:
|
|
566
|
+
"""
|
|
567
|
+
Enhance the response with visual token calculations for Glyph compression.
|
|
568
|
+
This method is called automatically by BaseProvider for all providers.
|
|
569
|
+
"""
|
|
570
|
+
try:
|
|
571
|
+
# Calculate visual tokens using VLM token calculator
|
|
572
|
+
provider_name = self.provider or self.__class__.__name__.lower().replace('provider', '')
|
|
573
|
+
self.logger.debug(f"Calculating visual tokens for provider={provider_name}, model={self.model}")
|
|
574
|
+
|
|
575
|
+
visual_tokens = self._calculate_visual_tokens(media_metadata, provider_name, self.model)
|
|
576
|
+
self.logger.debug(f"Calculated visual tokens: {visual_tokens}")
|
|
577
|
+
|
|
578
|
+
if visual_tokens > 0:
|
|
579
|
+
# Ensure response has metadata
|
|
580
|
+
if not response.metadata:
|
|
581
|
+
response.metadata = {}
|
|
582
|
+
|
|
583
|
+
# Add visual token information to metadata
|
|
584
|
+
response.metadata['visual_tokens'] = visual_tokens
|
|
585
|
+
|
|
586
|
+
# Ensure response has usage dict
|
|
587
|
+
if not response.usage:
|
|
588
|
+
response.usage = {}
|
|
589
|
+
|
|
590
|
+
# Add visual tokens to usage
|
|
591
|
+
response.usage['visual_tokens'] = visual_tokens
|
|
592
|
+
|
|
593
|
+
# Update total tokens to include visual tokens
|
|
594
|
+
original_total = response.usage.get('total_tokens', 0)
|
|
595
|
+
response.usage['total_tokens'] = original_total + visual_tokens
|
|
596
|
+
|
|
597
|
+
self.logger.info(f"Enhanced response with {visual_tokens} visual tokens (new total: {response.usage['total_tokens']})")
|
|
598
|
+
else:
|
|
599
|
+
self.logger.debug("No visual tokens calculated - skipping enhancement")
|
|
600
|
+
|
|
601
|
+
except Exception as e:
|
|
602
|
+
self.logger.warning(f"Failed to enhance response with visual tokens: {e}")
|
|
603
|
+
|
|
604
|
+
return response
|
|
605
|
+
|
|
606
|
+
def _calculate_visual_tokens(self, media_metadata: List[Dict[str, Any]], provider: str, model: str) -> int:
|
|
607
|
+
"""Calculate visual tokens from media metadata using VLM token calculator."""
|
|
608
|
+
try:
|
|
609
|
+
from ..utils.vlm_token_calculator import VLMTokenCalculator
|
|
610
|
+
from pathlib import Path
|
|
611
|
+
|
|
612
|
+
calculator = VLMTokenCalculator()
|
|
613
|
+
total_visual_tokens = 0
|
|
614
|
+
|
|
615
|
+
self.logger.debug(f"Processing {len(media_metadata)} media metadata items")
|
|
616
|
+
|
|
617
|
+
for i, metadata in enumerate(media_metadata):
|
|
618
|
+
self.logger.debug(f"Metadata {i}: processing_method={metadata.get('processing_method')}")
|
|
619
|
+
|
|
620
|
+
# Check if this is Glyph compression
|
|
621
|
+
if metadata.get('processing_method') == 'direct_pdf_conversion':
|
|
622
|
+
glyph_cache_dir = metadata.get('glyph_cache_dir')
|
|
623
|
+
total_images = metadata.get('total_images', 0)
|
|
624
|
+
|
|
625
|
+
self.logger.debug(f"Glyph metadata found: cache_dir={glyph_cache_dir}, total_images={total_images}")
|
|
626
|
+
|
|
627
|
+
if glyph_cache_dir and Path(glyph_cache_dir).exists():
|
|
628
|
+
# Get actual image paths
|
|
629
|
+
cache_dir = Path(glyph_cache_dir)
|
|
630
|
+
image_paths = list(cache_dir.glob("image_*.png"))
|
|
631
|
+
|
|
632
|
+
self.logger.debug(f"Found {len(image_paths)} images in cache directory")
|
|
633
|
+
|
|
634
|
+
if image_paths:
|
|
635
|
+
# Calculate tokens for all images
|
|
636
|
+
token_analysis = calculator.calculate_tokens_for_images(
|
|
637
|
+
image_paths=image_paths,
|
|
638
|
+
provider=provider,
|
|
639
|
+
model=model
|
|
640
|
+
)
|
|
641
|
+
total_visual_tokens += token_analysis['total_tokens']
|
|
642
|
+
|
|
643
|
+
self.logger.debug(f"Calculated {token_analysis['total_tokens']} visual tokens for {len(image_paths)} Glyph images")
|
|
644
|
+
else:
|
|
645
|
+
# Fallback: estimate based on total_images
|
|
646
|
+
base_tokens = calculator.PROVIDER_CONFIGS.get(provider, {}).get('base_tokens', 512)
|
|
647
|
+
estimated_tokens = total_images * base_tokens
|
|
648
|
+
total_visual_tokens += estimated_tokens
|
|
649
|
+
|
|
650
|
+
self.logger.debug(f"Estimated {estimated_tokens} visual tokens for {total_images} Glyph images (fallback)")
|
|
651
|
+
else:
|
|
652
|
+
self.logger.debug(f"Cache directory not found or doesn't exist: {glyph_cache_dir}")
|
|
653
|
+
|
|
654
|
+
self.logger.debug(f"Total visual tokens calculated: {total_visual_tokens}")
|
|
655
|
+
return total_visual_tokens
|
|
656
|
+
|
|
657
|
+
except Exception as e:
|
|
658
|
+
self.logger.warning(f"Failed to calculate visual tokens: {e}")
|
|
659
|
+
return 0
|
|
660
|
+
|
|
431
661
|
def _initialize_token_limits(self):
|
|
432
662
|
"""Initialize default token limits based on model capabilities"""
|
|
433
663
|
# Set default max_tokens if not provided
|
|
@@ -804,12 +1034,14 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
804
1034
|
"""Rough estimation of token count for given text"""
|
|
805
1035
|
return super().estimate_tokens(text)
|
|
806
1036
|
|
|
807
|
-
def _process_media_content(self, media: List[Union[str, Dict[str, Any], 'MediaContent']]
|
|
1037
|
+
def _process_media_content(self, media: List[Union[str, Dict[str, Any], 'MediaContent']],
|
|
1038
|
+
glyph_compression: str = "auto") -> List['MediaContent']:
|
|
808
1039
|
"""
|
|
809
1040
|
Process media content from various input formats into standardized MediaContent objects.
|
|
810
1041
|
|
|
811
1042
|
Args:
|
|
812
1043
|
media: List of media inputs (file paths, MediaContent objects, or dicts)
|
|
1044
|
+
glyph_compression: Glyph compression preference (auto, always, never)
|
|
813
1045
|
|
|
814
1046
|
Returns:
|
|
815
1047
|
List of processed MediaContent objects
|
|
@@ -837,8 +1069,16 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
837
1069
|
try:
|
|
838
1070
|
if isinstance(media_item, str):
|
|
839
1071
|
# File path - process with auto media handler
|
|
840
|
-
handler = AutoMediaHandler(
|
|
841
|
-
|
|
1072
|
+
handler = AutoMediaHandler(
|
|
1073
|
+
enable_glyph_compression=True,
|
|
1074
|
+
glyph_config=getattr(self, 'glyph_config', None)
|
|
1075
|
+
)
|
|
1076
|
+
result = handler.process_file(
|
|
1077
|
+
media_item,
|
|
1078
|
+
provider=self.provider,
|
|
1079
|
+
model=self.model,
|
|
1080
|
+
glyph_compression=glyph_compression
|
|
1081
|
+
)
|
|
842
1082
|
if result.success:
|
|
843
1083
|
processed_media.append(result.media_content)
|
|
844
1084
|
else:
|
|
@@ -880,14 +1120,47 @@ class BaseProvider(AbstractCoreInterface, ABC):
|
|
|
880
1120
|
The server will use this method to aggregate models across all providers.
|
|
881
1121
|
|
|
882
1122
|
Args:
|
|
883
|
-
**kwargs: Provider-specific parameters
|
|
1123
|
+
**kwargs: Provider-specific parameters including:
|
|
1124
|
+
- api_key: API key for authentication (if required)
|
|
1125
|
+
- base_url: Base URL for API endpoint (if applicable)
|
|
1126
|
+
- input_capabilities: Optional list of ModelInputCapability enums to filter by input capability
|
|
1127
|
+
(e.g., [ModelInputCapability.IMAGE] for vision models)
|
|
1128
|
+
- output_capabilities: Optional list of ModelOutputCapability enums to filter by output capability
|
|
1129
|
+
(e.g., [ModelOutputCapability.EMBEDDINGS] for embedding models)
|
|
884
1130
|
|
|
885
1131
|
Returns:
|
|
886
|
-
List of model names available for this provider
|
|
1132
|
+
List of model names available for this provider, optionally filtered by capabilities
|
|
1133
|
+
|
|
1134
|
+
Examples:
|
|
1135
|
+
>>> from abstractcore.providers import OpenAIProvider
|
|
1136
|
+
>>> from abstractcore.providers.model_capabilities import ModelInputCapability, ModelOutputCapability
|
|
1137
|
+
>>>
|
|
1138
|
+
>>> # Get all models
|
|
1139
|
+
>>> all_models = OpenAIProvider.list_available_models(api_key="...")
|
|
1140
|
+
>>>
|
|
1141
|
+
>>> # Get models that can analyze images
|
|
1142
|
+
>>> vision_models = OpenAIProvider.list_available_models(
|
|
1143
|
+
... api_key="...",
|
|
1144
|
+
... input_capabilities=[ModelInputCapability.IMAGE]
|
|
1145
|
+
... )
|
|
1146
|
+
>>>
|
|
1147
|
+
>>> # Get embedding models
|
|
1148
|
+
>>> embedding_models = OpenAIProvider.list_available_models(
|
|
1149
|
+
... api_key="...",
|
|
1150
|
+
... output_capabilities=[ModelOutputCapability.EMBEDDINGS]
|
|
1151
|
+
... )
|
|
1152
|
+
>>>
|
|
1153
|
+
>>> # Get vision models that generate text (most common case)
|
|
1154
|
+
>>> vision_text_models = OpenAIProvider.list_available_models(
|
|
1155
|
+
... api_key="...",
|
|
1156
|
+
... input_capabilities=[ModelInputCapability.TEXT, ModelInputCapability.IMAGE],
|
|
1157
|
+
... output_capabilities=[ModelOutputCapability.TEXT]
|
|
1158
|
+
... )
|
|
887
1159
|
|
|
888
1160
|
Note:
|
|
889
1161
|
This is an abstract method that MUST be implemented by all provider subclasses.
|
|
890
1162
|
Each provider should implement its own discovery logic (API calls, local scanning, etc.).
|
|
1163
|
+
Providers should apply the capability filters if provided in kwargs.
|
|
891
1164
|
"""
|
|
892
1165
|
pass
|
|
893
1166
|
|