abstractcore 2.4.7__tar.gz → 2.4.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {abstractcore-2.4.7 → abstractcore-2.4.9}/PKG-INFO +2 -2
- {abstractcore-2.4.7 → abstractcore-2.4.9}/README.md +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/apps/app_config_utils.py +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/cli/main.py +13 -5
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/core/factory.py +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/vision_fallback.py +16 -2
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/__init__.py +0 -2
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/registry.py +1 -17
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/server/app.py +0 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/utils/version.py +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore.egg-info/PKG-INFO +2 -2
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore.egg-info/SOURCES.txt +0 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_basic_session.py +9 -6
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_comprehensive_events.py +10 -4
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_core_components.py +13 -7
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings_integration.py +4 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings_llm_integration.py +13 -4
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings_real.py +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_enhanced_prompt.py +3 -3
- abstractcore-2.4.9/tests/test_factory.py +54 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_provider_connectivity.py +8 -5
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_provider_simple_generation.py +6 -3
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_provider_streaming.py +18 -6
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_providers.py +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_real_models_comprehensive.py +2 -2
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_retry_strategy.py +13 -13
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_seed_determinism.py +6 -6
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_seed_temperature_basic.py +46 -18
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_stream_tool_calling.py +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_structured_integration.py +5 -5
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_structured_output.py +10 -10
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_tool_calling.py +1 -1
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_user_scenario_validation.py +6 -6
- abstractcore-2.4.7/abstractcore/providers/mock_provider.py +0 -167
- abstractcore-2.4.7/tests/test_factory.py +0 -35
- {abstractcore-2.4.7 → abstractcore-2.4.9}/LICENSE +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/apps/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/apps/__main__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/apps/extractor.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/apps/judge.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/apps/summarizer.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/architectures/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/architectures/detection.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/architectures/enums.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/assets/architecture_formats.json +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/assets/model_capabilities.json +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/assets/session_schema.json +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/cli/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/cli/vision_config.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/core/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/core/enums.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/core/interface.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/core/retry.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/core/session.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/core/types.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/embeddings/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/embeddings/manager.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/embeddings/models.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/events/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/exceptions/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/auto_handler.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/base.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/capabilities.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/handlers/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/handlers/anthropic_handler.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/handlers/local_handler.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/handlers/openai_handler.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/processors/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/processors/image_processor.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/processors/office_processor.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/processors/pdf_processor.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/processors/text_processor.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/types.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/utils/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/media/utils/image_scaler.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/processing/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/processing/basic_extractor.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/processing/basic_judge.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/processing/basic_summarizer.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/anthropic_provider.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/base.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/huggingface_provider.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/lmstudio_provider.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/mlx_provider.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/ollama_provider.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/openai_provider.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/providers/streaming.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/server/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/structured/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/structured/handler.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/structured/retry.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/common_tools.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/core.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/handler.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/parser.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/registry.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/syntax_rewriter.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/tools/tag_rewriter.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/utils/__init__.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/utils/cli.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/utils/message_preprocessor.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/utils/self_fixes.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/utils/structured_logging.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore/utils/token_utils.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore.egg-info/dependency_links.txt +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore.egg-info/entry_points.txt +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore.egg-info/requires.txt +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/abstractcore.egg-info/top_level.txt +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/pyproject.toml +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/setup.cfg +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_agentic_cli_compatibility.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_all_specified_providers.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_basic_summarizer.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_cli_media.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_complete_integration.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_consistency.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_critical_streaming_tool_fix.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_debug_server.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_direct_vs_server.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings_matrix_operations.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings_no_mock.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings_semantic_validation.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_embeddings_simple.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_environment_variable_tool_call_tags.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_final_accuracy.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_final_comprehensive.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_final_graceful_errors.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_fixed_media.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_fixed_prompt.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_graceful_fallback.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_import_debug.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_integrated_functionality.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_lmstudio_context.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_media_import.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_media_server.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_ollama_tool_role_fix.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_openai_conversion_manual.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_openai_format_bug.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_openai_format_conversion.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_openai_media_integration.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_progressive_complexity.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_provider_basic_session.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_provider_token_translation.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_provider_tool_detection.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_providers_comprehensive.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_providers_simple.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_retry_observability.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_sensory_prompting.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_server_debug.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_server_embeddings_real.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_server_integration.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_streaming_enhancements.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_streaming_tag_rewriting.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_syntax_rewriter.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_text_only_model_experience.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_tool_execution_separation.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_unified_streaming.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_unload_memory.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_vision_accuracy.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_vision_comprehensive.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_vision_fallback_improvement.py +0 -0
- {abstractcore-2.4.7 → abstractcore-2.4.9}/tests/test_wrong_model_fallback.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: abstractcore
|
|
3
|
-
Version: 2.4.
|
|
3
|
+
Version: 2.4.9
|
|
4
4
|
Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
|
|
5
5
|
Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
6
6
|
Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
@@ -195,7 +195,7 @@ print(f"Summary: {response.get_summary()}") # "Model: gpt-4o-mini | Toke
|
|
|
195
195
|
|
|
196
196
|
**Token Count Sources:**
|
|
197
197
|
- **Provider APIs**: OpenAI, Anthropic, LMStudio (native API token counts)
|
|
198
|
-
- **AbstractCore Calculation**: MLX, HuggingFace
|
|
198
|
+
- **AbstractCore Calculation**: MLX, HuggingFace (using `token_utils.py`)
|
|
199
199
|
- **Mixed Sources**: Ollama (combination of provider and calculated tokens)
|
|
200
200
|
|
|
201
201
|
**Backward Compatibility**: Legacy `prompt_tokens` and `completion_tokens` keys remain available in `response.usage` dictionary.
|
|
@@ -95,7 +95,7 @@ print(f"Summary: {response.get_summary()}") # "Model: gpt-4o-mini | Toke
|
|
|
95
95
|
|
|
96
96
|
**Token Count Sources:**
|
|
97
97
|
- **Provider APIs**: OpenAI, Anthropic, LMStudio (native API token counts)
|
|
98
|
-
- **AbstractCore Calculation**: MLX, HuggingFace
|
|
98
|
+
- **AbstractCore Calculation**: MLX, HuggingFace (using `token_utils.py`)
|
|
99
99
|
- **Mixed Sources**: Ollama (combination of provider and calculated tokens)
|
|
100
100
|
|
|
101
101
|
**Backward Compatibility**: Legacy `prompt_tokens` and `completion_tokens` keys remain available in `response.usage` dictionary.
|
|
@@ -8,7 +8,7 @@ def get_app_defaults(app_name: str) -> tuple[str, str]:
|
|
|
8
8
|
from ..config import get_config_manager
|
|
9
9
|
config_manager = get_config_manager()
|
|
10
10
|
return config_manager.get_app_default(app_name)
|
|
11
|
-
except Exception:
|
|
11
|
+
except (ImportError, Exception):
|
|
12
12
|
# Fallback to hardcoded defaults if config unavailable
|
|
13
13
|
hardcoded_defaults = {
|
|
14
14
|
'summarizer': ('huggingface', 'unsloth/Qwen3-4B-Instruct-2507-GGUF'),
|
|
@@ -38,7 +38,13 @@ from typing import List, Optional
|
|
|
38
38
|
# Add parent directory to path for imports
|
|
39
39
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
|
40
40
|
|
|
41
|
-
|
|
41
|
+
# Import config manager with fallback
|
|
42
|
+
try:
|
|
43
|
+
from abstractcore.config import get_config_manager
|
|
44
|
+
CONFIG_AVAILABLE = True
|
|
45
|
+
except ImportError:
|
|
46
|
+
CONFIG_AVAILABLE = False
|
|
47
|
+
get_config_manager = None
|
|
42
48
|
|
|
43
49
|
def download_vision_model(model_name: str = "blip-base-caption") -> bool:
|
|
44
50
|
"""Download a vision model for local use."""
|
|
@@ -144,10 +150,12 @@ def download_vision_model(model_name: str = "blip-base-caption") -> bool:
|
|
|
144
150
|
print(f"📁 Model saved to: {models_dir}")
|
|
145
151
|
|
|
146
152
|
# Configure AbstractCore to use this model
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
153
|
+
if CONFIG_AVAILABLE:
|
|
154
|
+
config_manager = get_config_manager()
|
|
155
|
+
# Use the proper HuggingFace model identifier
|
|
156
|
+
config_manager.set_vision_provider("huggingface", hf_id)
|
|
157
|
+
else:
|
|
158
|
+
print("⚠️ Config system not available - manual configuration required")
|
|
151
159
|
|
|
152
160
|
print(f"✅ Configured AbstractCore to use HuggingFace model: {hf_id}")
|
|
153
161
|
print(f"🎯 Vision fallback is now enabled!")
|
|
@@ -12,7 +12,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
|
|
|
12
12
|
Create an LLM provider instance with unified token parameter support.
|
|
13
13
|
|
|
14
14
|
Args:
|
|
15
|
-
provider: Provider name (openai, anthropic, ollama, huggingface, mlx, lmstudio
|
|
15
|
+
provider: Provider name (openai, anthropic, ollama, huggingface, mlx, lmstudio)
|
|
16
16
|
model: Model name (optional, will use provider default)
|
|
17
17
|
**kwargs: Additional configuration including token parameters
|
|
18
18
|
|
|
@@ -31,14 +31,28 @@ class VisionFallbackHandler:
|
|
|
31
31
|
def __init__(self, config_manager=None):
|
|
32
32
|
"""Initialize with configuration manager."""
|
|
33
33
|
if config_manager is None:
|
|
34
|
-
|
|
35
|
-
|
|
34
|
+
try:
|
|
35
|
+
from abstractcore.config import get_config_manager
|
|
36
|
+
self.config_manager = get_config_manager()
|
|
37
|
+
except ImportError:
|
|
38
|
+
# Config module not available - use fallback behavior
|
|
39
|
+
logger.warning("Config module not available, vision fallback disabled")
|
|
40
|
+
self.config_manager = None
|
|
36
41
|
else:
|
|
37
42
|
self.config_manager = config_manager
|
|
38
43
|
|
|
39
44
|
@property
|
|
40
45
|
def vision_config(self):
|
|
41
46
|
"""Get vision configuration from unified config system."""
|
|
47
|
+
if self.config_manager is None:
|
|
48
|
+
# Return a minimal config object when config system is not available
|
|
49
|
+
class FallbackVisionConfig:
|
|
50
|
+
strategy = "disabled"
|
|
51
|
+
caption_provider = None
|
|
52
|
+
caption_model = None
|
|
53
|
+
fallback_chain = []
|
|
54
|
+
local_models_path = None
|
|
55
|
+
return FallbackVisionConfig()
|
|
42
56
|
return self.config_manager.config.vision
|
|
43
57
|
|
|
44
58
|
def create_description(self, image_path: str, user_prompt: str = None) -> str:
|
|
@@ -7,7 +7,6 @@ from .ollama_provider import OllamaProvider
|
|
|
7
7
|
from .lmstudio_provider import LMStudioProvider
|
|
8
8
|
from .huggingface_provider import HuggingFaceProvider
|
|
9
9
|
from .mlx_provider import MLXProvider
|
|
10
|
-
from .mock_provider import MockProvider
|
|
11
10
|
|
|
12
11
|
# Provider registry for centralized provider discovery and management
|
|
13
12
|
from .registry import (
|
|
@@ -32,7 +31,6 @@ __all__ = [
|
|
|
32
31
|
'LMStudioProvider',
|
|
33
32
|
'HuggingFaceProvider',
|
|
34
33
|
'MLXProvider',
|
|
35
|
-
'MockProvider',
|
|
36
34
|
|
|
37
35
|
# Provider registry
|
|
38
36
|
'ProviderRegistry',
|
|
@@ -136,19 +136,6 @@ class ProviderRegistry:
|
|
|
136
136
|
import_path="..providers.huggingface_provider"
|
|
137
137
|
))
|
|
138
138
|
|
|
139
|
-
# Mock Provider
|
|
140
|
-
self.register_provider(ProviderInfo(
|
|
141
|
-
name="mock",
|
|
142
|
-
display_name="Mock",
|
|
143
|
-
provider_class=None,
|
|
144
|
-
description="Testing provider for development and unit tests",
|
|
145
|
-
default_model="mock-model",
|
|
146
|
-
supported_features=["chat", "completion", "embeddings", "prompted_tools", "streaming", "testing"],
|
|
147
|
-
authentication_required=False,
|
|
148
|
-
local_provider=True,
|
|
149
|
-
installation_extras=None,
|
|
150
|
-
import_path="..providers.mock_provider"
|
|
151
|
-
))
|
|
152
139
|
|
|
153
140
|
def register_provider(self, provider_info: ProviderInfo):
|
|
154
141
|
"""Register a provider in the registry."""
|
|
@@ -182,10 +169,7 @@ class ProviderRegistry:
|
|
|
182
169
|
def _load_provider_class(self, provider_info: ProviderInfo):
|
|
183
170
|
"""Dynamically load a provider class."""
|
|
184
171
|
try:
|
|
185
|
-
if provider_info.name == "
|
|
186
|
-
from ..providers.mock_provider import MockProvider
|
|
187
|
-
return MockProvider
|
|
188
|
-
elif provider_info.name == "openai":
|
|
172
|
+
if provider_info.name == "openai":
|
|
189
173
|
from ..providers.openai_provider import OpenAIProvider
|
|
190
174
|
return OpenAIProvider
|
|
191
175
|
elif provider_info.name == "anthropic":
|
|
@@ -1101,7 +1101,6 @@ async def list_providers():
|
|
|
1101
1101
|
- **LMStudio**: Local model development and testing platform
|
|
1102
1102
|
- **MLX**: Apple Silicon optimized local inference
|
|
1103
1103
|
- **HuggingFace**: Access to HuggingFace models (transformers and embeddings)
|
|
1104
|
-
- **Mock**: Testing provider for development
|
|
1105
1104
|
|
|
1106
1105
|
**Use Cases:**
|
|
1107
1106
|
- Discover available providers before making requests
|
|
@@ -11,4 +11,4 @@ including when the package is installed from PyPI where pyproject.toml is not av
|
|
|
11
11
|
|
|
12
12
|
# Package version - update this when releasing new versions
|
|
13
13
|
# This must be manually synchronized with the version in pyproject.toml
|
|
14
|
-
__version__ = "2.4.
|
|
14
|
+
__version__ = "2.4.9"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: abstractcore
|
|
3
|
-
Version: 2.4.
|
|
3
|
+
Version: 2.4.9
|
|
4
4
|
Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
|
|
5
5
|
Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
6
6
|
Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
@@ -195,7 +195,7 @@ print(f"Summary: {response.get_summary()}") # "Model: gpt-4o-mini | Toke
|
|
|
195
195
|
|
|
196
196
|
**Token Count Sources:**
|
|
197
197
|
- **Provider APIs**: OpenAI, Anthropic, LMStudio (native API token counts)
|
|
198
|
-
- **AbstractCore Calculation**: MLX, HuggingFace
|
|
198
|
+
- **AbstractCore Calculation**: MLX, HuggingFace (using `token_utils.py`)
|
|
199
199
|
- **Mixed Sources**: Ollama (combination of provider and calculated tokens)
|
|
200
200
|
|
|
201
201
|
**Backward Compatibility**: Legacy `prompt_tokens` and `completion_tokens` keys remain available in `response.usage` dictionary.
|
|
@@ -62,7 +62,6 @@ abstractcore/providers/base.py
|
|
|
62
62
|
abstractcore/providers/huggingface_provider.py
|
|
63
63
|
abstractcore/providers/lmstudio_provider.py
|
|
64
64
|
abstractcore/providers/mlx_provider.py
|
|
65
|
-
abstractcore/providers/mock_provider.py
|
|
66
65
|
abstractcore/providers/ollama_provider.py
|
|
67
66
|
abstractcore/providers/openai_provider.py
|
|
68
67
|
abstractcore/providers/registry.py
|
|
@@ -6,7 +6,7 @@ import pytest
|
|
|
6
6
|
from datetime import datetime
|
|
7
7
|
from abstractcore.core.session import BasicSession
|
|
8
8
|
from abstractcore.core.types import Message
|
|
9
|
-
from abstractcore.providers.
|
|
9
|
+
from abstractcore.providers.openai_provider import OpenAIProvider
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class TestBasicSession:
|
|
@@ -65,14 +65,17 @@ class TestBasicSession:
|
|
|
65
65
|
session.clear_history(keep_system=False)
|
|
66
66
|
assert len(session.messages) == 0
|
|
67
67
|
|
|
68
|
-
def
|
|
69
|
-
"""Test generation with
|
|
70
|
-
|
|
71
|
-
|
|
68
|
+
def test_generation_with_openai_provider(self):
|
|
69
|
+
"""Test generation with OpenAI provider"""
|
|
70
|
+
try:
|
|
71
|
+
provider = OpenAIProvider()
|
|
72
|
+
session = BasicSession(provider=provider)
|
|
73
|
+
except ImportError:
|
|
74
|
+
pytest.skip("OpenAI provider not available")
|
|
72
75
|
|
|
73
76
|
response = session.generate("Hello")
|
|
74
77
|
assert response.content is not None
|
|
75
|
-
assert
|
|
78
|
+
assert len(response.content) > 0
|
|
76
79
|
assert len(session.messages) == 2 # user + assistant
|
|
77
80
|
|
|
78
81
|
def test_persistence(self, tmp_path):
|
|
@@ -212,14 +212,17 @@ class TestEventHandlerPatterns:
|
|
|
212
212
|
class TestProviderEvents:
|
|
213
213
|
"""Test events emitted by providers"""
|
|
214
214
|
|
|
215
|
-
def
|
|
216
|
-
"""Test that
|
|
215
|
+
def test_openai_provider_events(self, event_capture):
|
|
216
|
+
"""Test that OpenAI provider emits expected events"""
|
|
217
217
|
# Register event listener for generation events only (PROVIDER_CREATED removed)
|
|
218
218
|
on_global(EventType.GENERATION_STARTED, event_capture.capture_event)
|
|
219
219
|
on_global(EventType.GENERATION_COMPLETED, event_capture.capture_event)
|
|
220
220
|
|
|
221
221
|
# Create provider and generate response
|
|
222
|
-
|
|
222
|
+
try:
|
|
223
|
+
llm = create_llm("openai", model="gpt-4o")
|
|
224
|
+
except ImportError:
|
|
225
|
+
pytest.skip("OpenAI provider not available")
|
|
223
226
|
response = llm.generate("Test prompt")
|
|
224
227
|
|
|
225
228
|
# Verify generation events were emitted
|
|
@@ -302,7 +305,10 @@ class TestStructuredOutputEvents:
|
|
|
302
305
|
on_global(EventType.VALIDATION_FAILED, event_capture.capture_event)
|
|
303
306
|
|
|
304
307
|
# Create provider and generate structured response
|
|
305
|
-
|
|
308
|
+
try:
|
|
309
|
+
llm = create_llm("openai", model="gpt-4o")
|
|
310
|
+
except ImportError:
|
|
311
|
+
pytest.skip("OpenAI provider not available")
|
|
306
312
|
|
|
307
313
|
# Test with invalid JSON to trigger validation failure
|
|
308
314
|
with patch.object(llm, '_generate_internal') as mock_generate:
|
|
@@ -137,10 +137,13 @@ class TestBasicSession:
|
|
|
137
137
|
class TestProviderFactory:
|
|
138
138
|
"""Test provider factory"""
|
|
139
139
|
|
|
140
|
-
def
|
|
141
|
-
"""Test creating
|
|
142
|
-
|
|
143
|
-
|
|
140
|
+
def test_create_openai_provider(self):
|
|
141
|
+
"""Test creating OpenAI provider"""
|
|
142
|
+
try:
|
|
143
|
+
provider = create_llm("openai")
|
|
144
|
+
assert provider is not None
|
|
145
|
+
except ImportError:
|
|
146
|
+
pytest.skip("OpenAI provider not available")
|
|
144
147
|
assert isinstance(provider, AbstractCoreInterface)
|
|
145
148
|
|
|
146
149
|
def test_create_ollama_provider(self):
|
|
@@ -303,9 +306,12 @@ class TestProviderIntegration:
|
|
|
303
306
|
assert response.content is not None
|
|
304
307
|
|
|
305
308
|
def test_session_with_provider(self):
|
|
306
|
-
"""Test session with
|
|
307
|
-
|
|
308
|
-
|
|
309
|
+
"""Test session with OpenAI provider"""
|
|
310
|
+
try:
|
|
311
|
+
provider = create_llm("openai")
|
|
312
|
+
session = BasicSession(provider=provider, system_prompt="Test system")
|
|
313
|
+
except ImportError:
|
|
314
|
+
pytest.skip("OpenAI provider not available")
|
|
309
315
|
|
|
310
316
|
response = session.generate("Hello")
|
|
311
317
|
assert response is not None
|
|
@@ -204,7 +204,10 @@ class TestLLMEmbeddingIntegration:
|
|
|
204
204
|
mock_llm = mock_create_llm.return_value
|
|
205
205
|
mock_llm.generate.return_value = "Mock response"
|
|
206
206
|
|
|
207
|
-
|
|
207
|
+
try:
|
|
208
|
+
llm = create_llm("openai", model="gpt-4o")
|
|
209
|
+
except ImportError:
|
|
210
|
+
pytest.skip("OpenAI provider not available")
|
|
208
211
|
|
|
209
212
|
# Use both independently
|
|
210
213
|
embedding = embedder.embed("Test text")
|
|
@@ -43,8 +43,11 @@ class TestEmbeddingsLLMIntegration:
|
|
|
43
43
|
assert len(embedding) == 384
|
|
44
44
|
assert all(isinstance(x, (int, float)) for x in embedding)
|
|
45
45
|
|
|
46
|
-
# Create LLM provider (
|
|
47
|
-
|
|
46
|
+
# Create LLM provider (OpenAI if available)
|
|
47
|
+
try:
|
|
48
|
+
llm = create_llm("openai")
|
|
49
|
+
except ImportError:
|
|
50
|
+
pytest.skip("OpenAI provider not available")
|
|
48
51
|
|
|
49
52
|
# Verify both work independently
|
|
50
53
|
similarity = embedder.compute_similarity("test 1", "test 2")
|
|
@@ -73,7 +76,10 @@ class TestEmbeddingsLLMIntegration:
|
|
|
73
76
|
model="sentence-transformers/all-MiniLM-L6-v2",
|
|
74
77
|
cache_dir=self.cache_dir
|
|
75
78
|
)
|
|
76
|
-
|
|
79
|
+
try:
|
|
80
|
+
llm = create_llm("openai")
|
|
81
|
+
except ImportError:
|
|
82
|
+
pytest.skip("OpenAI provider not available")
|
|
77
83
|
|
|
78
84
|
# Real knowledge base
|
|
79
85
|
knowledge_base = [
|
|
@@ -221,7 +227,10 @@ Based on the provided context, please answer the question:"""
|
|
|
221
227
|
model="sentence-transformers/all-MiniLM-L6-v2",
|
|
222
228
|
cache_dir=self.cache_dir
|
|
223
229
|
)
|
|
224
|
-
|
|
230
|
+
try:
|
|
231
|
+
llm = create_llm("openai")
|
|
232
|
+
except ImportError:
|
|
233
|
+
pytest.skip("OpenAI provider not available")
|
|
225
234
|
session = BasicSession(llm)
|
|
226
235
|
|
|
227
236
|
# Test session with embedding-enhanced prompts
|
|
@@ -273,7 +273,7 @@ class TestLLMIntegrationReal:
|
|
|
273
273
|
# Test that LLM creation doesn't interfere with embeddings
|
|
274
274
|
try:
|
|
275
275
|
# This might fail if no LLM providers are configured, which is fine
|
|
276
|
-
llm = create_llm("
|
|
276
|
+
llm = create_llm("openai") # Use OpenAI provider if available
|
|
277
277
|
print("✅ LLM and embeddings can coexist")
|
|
278
278
|
except Exception:
|
|
279
279
|
print("✅ LLM creation failed as expected (no real providers configured)")
|
|
@@ -20,7 +20,7 @@ def test_enhanced_prompt_structure():
|
|
|
20
20
|
# Create a test LocalMediaHandler for text-only model
|
|
21
21
|
handler = LocalMediaHandler("ollama", {"vision_support": False})
|
|
22
22
|
|
|
23
|
-
# Create
|
|
23
|
+
# Create test MediaContent
|
|
24
24
|
media_content = MediaContent(
|
|
25
25
|
content="test-image-content",
|
|
26
26
|
media_type=MediaType.IMAGE,
|
|
@@ -29,13 +29,13 @@ def test_enhanced_prompt_structure():
|
|
|
29
29
|
metadata={"file_name": "arc_de_triomphe.jpg"}
|
|
30
30
|
)
|
|
31
31
|
|
|
32
|
-
# Set a
|
|
32
|
+
# Set a test file path
|
|
33
33
|
media_content.file_path = "/tmp/test_image.jpg"
|
|
34
34
|
|
|
35
35
|
print("1. Testing prompt construction...")
|
|
36
36
|
print(" User question: 'What is in this image?'")
|
|
37
37
|
|
|
38
|
-
# This would normally call the vision fallback, but we'll
|
|
38
|
+
# This would normally call the vision fallback, but we'll simulate the result
|
|
39
39
|
# to show the prompt structure without actually processing an image
|
|
40
40
|
|
|
41
41
|
print("\n2. Expected prompt structure sent to text-only model:")
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test the factory function.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
from abstractcore import create_llm
|
|
7
|
+
from abstractcore.providers.openai_provider import OpenAIProvider
|
|
8
|
+
from abstractcore.providers.anthropic_provider import AnthropicProvider
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TestFactory:
|
|
12
|
+
"""Test create_llm factory function"""
|
|
13
|
+
|
|
14
|
+
def test_create_openai_provider(self):
|
|
15
|
+
"""Test creating OpenAI provider"""
|
|
16
|
+
try:
|
|
17
|
+
llm = create_llm("openai")
|
|
18
|
+
assert isinstance(llm, OpenAIProvider)
|
|
19
|
+
assert llm.model == "gpt-5-nano-2025-08-07" # Default model
|
|
20
|
+
except ImportError:
|
|
21
|
+
pytest.skip("OpenAI dependencies not available")
|
|
22
|
+
|
|
23
|
+
def test_create_anthropic_provider(self):
|
|
24
|
+
"""Test creating Anthropic provider"""
|
|
25
|
+
try:
|
|
26
|
+
llm = create_llm("anthropic")
|
|
27
|
+
assert isinstance(llm, AnthropicProvider)
|
|
28
|
+
assert llm.model == "claude-3-5-haiku-latest" # Default model
|
|
29
|
+
except ImportError:
|
|
30
|
+
pytest.skip("Anthropic dependencies not available")
|
|
31
|
+
|
|
32
|
+
def test_create_provider_with_custom_model(self):
|
|
33
|
+
"""Test creating provider with custom model"""
|
|
34
|
+
try:
|
|
35
|
+
llm = create_llm("openai", model="gpt-4o")
|
|
36
|
+
assert llm.model == "gpt-4o"
|
|
37
|
+
except ImportError:
|
|
38
|
+
pytest.skip("OpenAI dependencies not available")
|
|
39
|
+
|
|
40
|
+
def test_unknown_provider_raises_error(self):
|
|
41
|
+
"""Test unknown provider raises error"""
|
|
42
|
+
with pytest.raises(ValueError, match="Unknown provider"):
|
|
43
|
+
create_llm("nonexistent")
|
|
44
|
+
|
|
45
|
+
def test_provider_case_insensitive(self):
|
|
46
|
+
"""Test provider names are case insensitive"""
|
|
47
|
+
try:
|
|
48
|
+
llm1 = create_llm("OPENAI")
|
|
49
|
+
llm2 = create_llm("OpenAI")
|
|
50
|
+
llm3 = create_llm("openai")
|
|
51
|
+
|
|
52
|
+
assert all(isinstance(llm, OpenAIProvider) for llm in [llm1, llm2, llm3])
|
|
53
|
+
except ImportError:
|
|
54
|
+
pytest.skip("OpenAI dependencies not available")
|
|
@@ -88,11 +88,14 @@ class TestProviderConnectivity:
|
|
|
88
88
|
else:
|
|
89
89
|
raise
|
|
90
90
|
|
|
91
|
-
def
|
|
92
|
-
"""Test
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
91
|
+
def test_openai_connectivity(self):
|
|
92
|
+
"""Test OpenAI provider can be created."""
|
|
93
|
+
try:
|
|
94
|
+
provider = create_llm("openai", model="gpt-4o")
|
|
95
|
+
assert provider is not None
|
|
96
|
+
except ImportError:
|
|
97
|
+
pytest.skip("OpenAI provider not available")
|
|
98
|
+
assert provider.model == "gpt-4o"
|
|
96
99
|
|
|
97
100
|
|
|
98
101
|
if __name__ == "__main__":
|
|
@@ -148,9 +148,12 @@ class TestProviderSimpleGeneration:
|
|
|
148
148
|
else:
|
|
149
149
|
raise
|
|
150
150
|
|
|
151
|
-
def
|
|
152
|
-
"""Test
|
|
153
|
-
|
|
151
|
+
def test_openai_simple_generation(self):
|
|
152
|
+
"""Test OpenAI provider simple generation."""
|
|
153
|
+
try:
|
|
154
|
+
provider = create_llm("openai", model="gpt-4o")
|
|
155
|
+
except ImportError:
|
|
156
|
+
pytest.skip("OpenAI provider not available")
|
|
154
157
|
|
|
155
158
|
response = provider.generate("Who are you? Answer in one sentence.")
|
|
156
159
|
|
|
@@ -197,9 +197,12 @@ class TestProviderStreaming:
|
|
|
197
197
|
else:
|
|
198
198
|
raise
|
|
199
199
|
|
|
200
|
-
def
|
|
201
|
-
"""Test
|
|
202
|
-
|
|
200
|
+
def test_openai_streaming_basic(self):
|
|
201
|
+
"""Test OpenAI provider streaming functionality."""
|
|
202
|
+
try:
|
|
203
|
+
provider = create_llm("openai", model="gpt-4o")
|
|
204
|
+
except ImportError:
|
|
205
|
+
pytest.skip("OpenAI provider not available")
|
|
203
206
|
|
|
204
207
|
# Test streaming
|
|
205
208
|
stream = provider.generate(
|
|
@@ -304,7 +307,10 @@ class TestProviderStreaming:
|
|
|
304
307
|
|
|
305
308
|
def test_streaming_chunk_structure(self):
|
|
306
309
|
"""Test that streaming chunks have proper structure."""
|
|
307
|
-
|
|
310
|
+
try:
|
|
311
|
+
provider = create_llm("openai", model="gpt-4o")
|
|
312
|
+
except ImportError:
|
|
313
|
+
pytest.skip("OpenAI provider not available")
|
|
308
314
|
|
|
309
315
|
stream = provider.generate("Test prompt", stream=True)
|
|
310
316
|
|
|
@@ -321,7 +327,10 @@ class TestProviderStreaming:
|
|
|
321
327
|
|
|
322
328
|
def test_streaming_interruption(self):
|
|
323
329
|
"""Test that streaming can be interrupted gracefully."""
|
|
324
|
-
|
|
330
|
+
try:
|
|
331
|
+
provider = create_llm("openai", model="gpt-4o")
|
|
332
|
+
except ImportError:
|
|
333
|
+
pytest.skip("OpenAI provider not available")
|
|
325
334
|
|
|
326
335
|
stream = provider.generate("Long response test", stream=True)
|
|
327
336
|
|
|
@@ -337,7 +346,10 @@ class TestProviderStreaming:
|
|
|
337
346
|
|
|
338
347
|
def test_streaming_empty_response(self):
|
|
339
348
|
"""Test streaming behavior with empty or minimal responses."""
|
|
340
|
-
|
|
349
|
+
try:
|
|
350
|
+
provider = create_llm("openai", model="gpt-4o")
|
|
351
|
+
except ImportError:
|
|
352
|
+
pytest.skip("OpenAI provider not available")
|
|
341
353
|
|
|
342
354
|
stream = provider.generate("", stream=True) # Empty prompt
|
|
343
355
|
|
|
@@ -289,8 +289,8 @@ class TestRealLLMIntegration:
|
|
|
289
289
|
cache_dir=self.cache_dir
|
|
290
290
|
)
|
|
291
291
|
|
|
292
|
-
# Use
|
|
293
|
-
llm = create_llm("
|
|
292
|
+
# Use OpenAI LLM for testing structure
|
|
293
|
+
llm = create_llm("openai")
|
|
294
294
|
|
|
295
295
|
# Real knowledge base
|
|
296
296
|
knowledge_base = [
|