abstractcore 2.4.7__py3-none-any.whl → 2.4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/core/factory.py +1 -1
- abstractcore/providers/__init__.py +0 -2
- abstractcore/providers/registry.py +1 -17
- abstractcore/server/app.py +0 -1
- abstractcore/utils/version.py +1 -1
- {abstractcore-2.4.7.dist-info → abstractcore-2.4.8.dist-info}/METADATA +2 -2
- {abstractcore-2.4.7.dist-info → abstractcore-2.4.8.dist-info}/RECORD +11 -12
- abstractcore/providers/mock_provider.py +0 -167
- {abstractcore-2.4.7.dist-info → abstractcore-2.4.8.dist-info}/WHEEL +0 -0
- {abstractcore-2.4.7.dist-info → abstractcore-2.4.8.dist-info}/entry_points.txt +0 -0
- {abstractcore-2.4.7.dist-info → abstractcore-2.4.8.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.4.7.dist-info → abstractcore-2.4.8.dist-info}/top_level.txt +0 -0
abstractcore/core/factory.py
CHANGED
|
@@ -12,7 +12,7 @@ def create_llm(provider: str, model: Optional[str] = None, **kwargs) -> Abstract
|
|
|
12
12
|
Create an LLM provider instance with unified token parameter support.
|
|
13
13
|
|
|
14
14
|
Args:
|
|
15
|
-
provider: Provider name (openai, anthropic, ollama, huggingface, mlx, lmstudio
|
|
15
|
+
provider: Provider name (openai, anthropic, ollama, huggingface, mlx, lmstudio)
|
|
16
16
|
model: Model name (optional, will use provider default)
|
|
17
17
|
**kwargs: Additional configuration including token parameters
|
|
18
18
|
|
|
@@ -7,7 +7,6 @@ from .ollama_provider import OllamaProvider
|
|
|
7
7
|
from .lmstudio_provider import LMStudioProvider
|
|
8
8
|
from .huggingface_provider import HuggingFaceProvider
|
|
9
9
|
from .mlx_provider import MLXProvider
|
|
10
|
-
from .mock_provider import MockProvider
|
|
11
10
|
|
|
12
11
|
# Provider registry for centralized provider discovery and management
|
|
13
12
|
from .registry import (
|
|
@@ -32,7 +31,6 @@ __all__ = [
|
|
|
32
31
|
'LMStudioProvider',
|
|
33
32
|
'HuggingFaceProvider',
|
|
34
33
|
'MLXProvider',
|
|
35
|
-
'MockProvider',
|
|
36
34
|
|
|
37
35
|
# Provider registry
|
|
38
36
|
'ProviderRegistry',
|
|
@@ -136,19 +136,6 @@ class ProviderRegistry:
|
|
|
136
136
|
import_path="..providers.huggingface_provider"
|
|
137
137
|
))
|
|
138
138
|
|
|
139
|
-
# Mock Provider
|
|
140
|
-
self.register_provider(ProviderInfo(
|
|
141
|
-
name="mock",
|
|
142
|
-
display_name="Mock",
|
|
143
|
-
provider_class=None,
|
|
144
|
-
description="Testing provider for development and unit tests",
|
|
145
|
-
default_model="mock-model",
|
|
146
|
-
supported_features=["chat", "completion", "embeddings", "prompted_tools", "streaming", "testing"],
|
|
147
|
-
authentication_required=False,
|
|
148
|
-
local_provider=True,
|
|
149
|
-
installation_extras=None,
|
|
150
|
-
import_path="..providers.mock_provider"
|
|
151
|
-
))
|
|
152
139
|
|
|
153
140
|
def register_provider(self, provider_info: ProviderInfo):
|
|
154
141
|
"""Register a provider in the registry."""
|
|
@@ -182,10 +169,7 @@ class ProviderRegistry:
|
|
|
182
169
|
def _load_provider_class(self, provider_info: ProviderInfo):
|
|
183
170
|
"""Dynamically load a provider class."""
|
|
184
171
|
try:
|
|
185
|
-
if provider_info.name == "
|
|
186
|
-
from ..providers.mock_provider import MockProvider
|
|
187
|
-
return MockProvider
|
|
188
|
-
elif provider_info.name == "openai":
|
|
172
|
+
if provider_info.name == "openai":
|
|
189
173
|
from ..providers.openai_provider import OpenAIProvider
|
|
190
174
|
return OpenAIProvider
|
|
191
175
|
elif provider_info.name == "anthropic":
|
abstractcore/server/app.py
CHANGED
|
@@ -1101,7 +1101,6 @@ async def list_providers():
|
|
|
1101
1101
|
- **LMStudio**: Local model development and testing platform
|
|
1102
1102
|
- **MLX**: Apple Silicon optimized local inference
|
|
1103
1103
|
- **HuggingFace**: Access to HuggingFace models (transformers and embeddings)
|
|
1104
|
-
- **Mock**: Testing provider for development
|
|
1105
1104
|
|
|
1106
1105
|
**Use Cases:**
|
|
1107
1106
|
- Discover available providers before making requests
|
abstractcore/utils/version.py
CHANGED
|
@@ -11,4 +11,4 @@ including when the package is installed from PyPI where pyproject.toml is not av
|
|
|
11
11
|
|
|
12
12
|
# Package version - update this when releasing new versions
|
|
13
13
|
# This must be manually synchronized with the version in pyproject.toml
|
|
14
|
-
__version__ = "2.4.
|
|
14
|
+
__version__ = "2.4.8"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: abstractcore
|
|
3
|
-
Version: 2.4.
|
|
3
|
+
Version: 2.4.8
|
|
4
4
|
Summary: Unified interface to all LLM providers with essential infrastructure for tool calling, streaming, and model management
|
|
5
5
|
Author-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
6
6
|
Maintainer-email: Laurent-Philippe Albou <contact@abstractcore.ai>
|
|
@@ -195,7 +195,7 @@ print(f"Summary: {response.get_summary()}") # "Model: gpt-4o-mini | Toke
|
|
|
195
195
|
|
|
196
196
|
**Token Count Sources:**
|
|
197
197
|
- **Provider APIs**: OpenAI, Anthropic, LMStudio (native API token counts)
|
|
198
|
-
- **AbstractCore Calculation**: MLX, HuggingFace
|
|
198
|
+
- **AbstractCore Calculation**: MLX, HuggingFace (using `token_utils.py`)
|
|
199
199
|
- **Mixed Sources**: Ollama (combination of provider and calculated tokens)
|
|
200
200
|
|
|
201
201
|
**Backward Compatibility**: Legacy `prompt_tokens` and `completion_tokens` keys remain available in `response.usage` dictionary.
|
|
@@ -16,7 +16,7 @@ abstractcore/cli/main.py,sha256=QD38nnfrInavO452WbkXCI37SVsdIu9VhvjEOojXBGY,3183
|
|
|
16
16
|
abstractcore/cli/vision_config.py,sha256=jJzO4zBexh8SqSKp6YKOXdMDSv4AL4Ztl5Xi-5c4KyY,17869
|
|
17
17
|
abstractcore/core/__init__.py,sha256=2h-86U4QkCQ4gzZ4iRusSTMlkODiUS6tKjZHiEXz6rM,684
|
|
18
18
|
abstractcore/core/enums.py,sha256=BhkVnHC-X1_377JDmqd-2mnem9GdBLqixWlYzlP_FJU,695
|
|
19
|
-
abstractcore/core/factory.py,sha256=
|
|
19
|
+
abstractcore/core/factory.py,sha256=ec7WGW2JKK-dhDplziTAeRkebEUFymtEEZ_bS5qkpqY,2798
|
|
20
20
|
abstractcore/core/interface.py,sha256=-VAY0nlsTnWN_WghiuMC7iE7xUdZfYOg6KlgrAPi14Y,14086
|
|
21
21
|
abstractcore/core/retry.py,sha256=wNlUAxfmvdO_uVWb4iqkhTqd7O1oRwXxqvVQaLXQOw0,14538
|
|
22
22
|
abstractcore/core/session.py,sha256=fdqhnufCWrV022RjQ-Xfb1KFv_s9-GzetSSR-QuXv-Q,36452
|
|
@@ -47,19 +47,18 @@ abstractcore/processing/__init__.py,sha256=t6hiakQjcZROT4pw9ZFt2q6fF3vf5VpdMKG2E
|
|
|
47
47
|
abstractcore/processing/basic_extractor.py,sha256=3x-3BdIHgLvqLnLF6K1-P4qVaLIpAnNIIutaJi7lDQM,49832
|
|
48
48
|
abstractcore/processing/basic_judge.py,sha256=tKWJrg_tY4vCHzWgXxz0ZjgLXBYYfpMcpG7vl03hJcM,32218
|
|
49
49
|
abstractcore/processing/basic_summarizer.py,sha256=XHNxMQ_8aLStTeUo6_2JaThlct12Htpz7ORmm0iuJsg,25495
|
|
50
|
-
abstractcore/providers/__init__.py,sha256=
|
|
50
|
+
abstractcore/providers/__init__.py,sha256=n-2RMNm3QpKxHw9EOjv8icRMRnfJp5Xg0uSVzHCW3BI,1222
|
|
51
51
|
abstractcore/providers/anthropic_provider.py,sha256=R87Z_DNNdeA4LMSxx84pqo8saKFz38dHCJMBpc-rL70,21552
|
|
52
52
|
abstractcore/providers/base.py,sha256=YfrqM3c7wLT19vspL7goUO6Bv-z1691ZkCM2wxvQX4s,51501
|
|
53
53
|
abstractcore/providers/huggingface_provider.py,sha256=pgpeSwpwyNB_5GDyLEz2OSTu9me-GAJzQ116dGtpCvQ,49012
|
|
54
54
|
abstractcore/providers/lmstudio_provider.py,sha256=odT6luVR3POVcq2ZqkINLyLoCAu_YGpLLj3fEddNliU,21021
|
|
55
55
|
abstractcore/providers/mlx_provider.py,sha256=sDgxf_kVJJwxprQWVef9w2CLOu2dLES8D0Vf5tY6PzE,18463
|
|
56
|
-
abstractcore/providers/mock_provider.py,sha256=x-frlBLxlqx6jlMoPnZN4aNv1pHacRYW_jlp0peI9FA,6168
|
|
57
56
|
abstractcore/providers/ollama_provider.py,sha256=1bE80NCj_TQADxRCiu9luyLuI_gZe2EO5pCKoC4VhQM,21740
|
|
58
57
|
abstractcore/providers/openai_provider.py,sha256=1s7JJalyIBOvLB7UAUwXbTc2aYrYSWg7hJjKGnCX1qU,23313
|
|
59
|
-
abstractcore/providers/registry.py,sha256=
|
|
58
|
+
abstractcore/providers/registry.py,sha256=fKFrN6Z3o5Gi0dfwvXDPtrrJXDjx9oPSfjWjZf-NJBc,15883
|
|
60
59
|
abstractcore/providers/streaming.py,sha256=VnffBV_CU9SAKzghL154OoFyEdDsiLwUNXPahyU41Bw,31342
|
|
61
60
|
abstractcore/server/__init__.py,sha256=1DSAz_YhQtnKv7sNi5TMQV8GFujctDOabgvAdilQE0o,249
|
|
62
|
-
abstractcore/server/app.py,sha256=
|
|
61
|
+
abstractcore/server/app.py,sha256=7pG5ZkZqYNnyby4jyvp3_NKl5nNDmZpOhv_-F8Jruy4,96580
|
|
63
62
|
abstractcore/structured/__init__.py,sha256=VXRQHGcm-iaYnLOBPin2kyhvhhQA0kaGt_pcNDGsE_8,339
|
|
64
63
|
abstractcore/structured/handler.py,sha256=Vb15smiR81JGDXX2RLkY2Exuj67J7a6C-xwVrZoXp0I,17134
|
|
65
64
|
abstractcore/structured/retry.py,sha256=BN_PvrWybyU1clMy2cult1-TVxFSMaVqiCPmmXvA5aI,3805
|
|
@@ -77,10 +76,10 @@ abstractcore/utils/message_preprocessor.py,sha256=GdHkm6tmrgjm3PwHRSCjIsq1XLkbhy
|
|
|
77
76
|
abstractcore/utils/self_fixes.py,sha256=QEDwNTW80iQM4ftfEY3Ghz69F018oKwLM9yeRCYZOvw,5886
|
|
78
77
|
abstractcore/utils/structured_logging.py,sha256=Vm-HviSa42G9DJCWmaEv4a0QG3NMsADD3ictLOs4En0,19952
|
|
79
78
|
abstractcore/utils/token_utils.py,sha256=eLwFmJ68p9WMFD_MHLMmeJRW6Oqx_4hKELB8FNQ2Mnk,21097
|
|
80
|
-
abstractcore/utils/version.py,sha256=
|
|
81
|
-
abstractcore-2.4.
|
|
82
|
-
abstractcore-2.4.
|
|
83
|
-
abstractcore-2.4.
|
|
84
|
-
abstractcore-2.4.
|
|
85
|
-
abstractcore-2.4.
|
|
86
|
-
abstractcore-2.4.
|
|
79
|
+
abstractcore/utils/version.py,sha256=4vhW55aGVOlb2i9Zm_ZxoGAPd5qtSm6ZN1WmH15tBYs,605
|
|
80
|
+
abstractcore-2.4.8.dist-info/licenses/LICENSE,sha256=PI2v_4HMvd6050uDD_4AY_8PzBnu2asa3RKbdDjowTA,1078
|
|
81
|
+
abstractcore-2.4.8.dist-info/METADATA,sha256=UfLD0Zi-Mco6B9NkoWuypgHZuUPJeGYyJVJjBBPlpsA,31907
|
|
82
|
+
abstractcore-2.4.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
83
|
+
abstractcore-2.4.8.dist-info/entry_points.txt,sha256=UdVmchBC_Lt3H4Vlkt5js-QDAkVlBbkCu1yCsswk-KE,454
|
|
84
|
+
abstractcore-2.4.8.dist-info/top_level.txt,sha256=DiNHBI35SIawW3N9Z-z0y6cQYNbXd32pvBkW0RLfScs,13
|
|
85
|
+
abstractcore-2.4.8.dist-info/RECORD,,
|
|
@@ -1,167 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Mock provider for testing purposes.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
from typing import List, Dict, Any, Optional, Union, Iterator, Type
|
|
6
|
-
|
|
7
|
-
try:
|
|
8
|
-
from pydantic import BaseModel
|
|
9
|
-
PYDANTIC_AVAILABLE = True
|
|
10
|
-
except ImportError:
|
|
11
|
-
PYDANTIC_AVAILABLE = False
|
|
12
|
-
BaseModel = None
|
|
13
|
-
|
|
14
|
-
from .base import BaseProvider
|
|
15
|
-
from ..core.types import GenerateResponse
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class MockProvider(BaseProvider):
|
|
19
|
-
"""Simple mock provider for testing core functionality."""
|
|
20
|
-
|
|
21
|
-
def __init__(self, model: str = "mock-model", **kwargs):
|
|
22
|
-
super().__init__(model, **kwargs)
|
|
23
|
-
|
|
24
|
-
# Handle timeout parameter for mock provider
|
|
25
|
-
self._handle_timeout_parameter(kwargs)
|
|
26
|
-
|
|
27
|
-
# Mock provider uses prompted strategy for structured output
|
|
28
|
-
self.model_capabilities = {"structured_output": "prompted"}
|
|
29
|
-
|
|
30
|
-
def generate(self, *args, **kwargs):
|
|
31
|
-
"""Public generate method that includes telemetry"""
|
|
32
|
-
return self.generate_with_telemetry(*args, **kwargs)
|
|
33
|
-
|
|
34
|
-
def _generate_internal(self,
|
|
35
|
-
prompt: str,
|
|
36
|
-
messages: Optional[List[Dict[str, str]]] = None,
|
|
37
|
-
system_prompt: Optional[str] = None,
|
|
38
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
|
39
|
-
stream: bool = False,
|
|
40
|
-
response_model: Optional[Type[BaseModel]] = None,
|
|
41
|
-
**kwargs) -> Union[GenerateResponse, Iterator[GenerateResponse]]:
|
|
42
|
-
"""Mock generation implementation"""
|
|
43
|
-
|
|
44
|
-
if stream:
|
|
45
|
-
return self._stream_response(prompt)
|
|
46
|
-
else:
|
|
47
|
-
return self._single_response(prompt, response_model)
|
|
48
|
-
|
|
49
|
-
def _single_response(self, prompt: str, response_model: Optional[Type[BaseModel]] = None) -> GenerateResponse:
|
|
50
|
-
"""Generate single mock response"""
|
|
51
|
-
import time
|
|
52
|
-
|
|
53
|
-
# Simulate generation time (10-100ms for mock)
|
|
54
|
-
start_time = time.time()
|
|
55
|
-
time.sleep(0.01 + (len(prompt) % 10) * 0.01) # 10-100ms based on prompt length
|
|
56
|
-
gen_time = round((time.time() - start_time) * 1000, 1)
|
|
57
|
-
|
|
58
|
-
if response_model and PYDANTIC_AVAILABLE:
|
|
59
|
-
# Generate valid JSON for structured output
|
|
60
|
-
content = self._generate_mock_json(response_model)
|
|
61
|
-
else:
|
|
62
|
-
content = f"Mock response to: {prompt}"
|
|
63
|
-
|
|
64
|
-
return GenerateResponse(
|
|
65
|
-
content=content,
|
|
66
|
-
model=self.model,
|
|
67
|
-
finish_reason="stop",
|
|
68
|
-
usage=self._calculate_mock_usage(prompt, content),
|
|
69
|
-
gen_time=gen_time
|
|
70
|
-
)
|
|
71
|
-
|
|
72
|
-
def _calculate_mock_usage(self, prompt: str, response: str) -> Dict[str, int]:
|
|
73
|
-
"""Calculate mock token usage using centralized token utilities."""
|
|
74
|
-
from ..utils.token_utils import TokenUtils
|
|
75
|
-
|
|
76
|
-
input_tokens = TokenUtils.estimate_tokens(prompt, self.model)
|
|
77
|
-
output_tokens = TokenUtils.estimate_tokens(response, self.model)
|
|
78
|
-
total_tokens = input_tokens + output_tokens
|
|
79
|
-
|
|
80
|
-
return {
|
|
81
|
-
"input_tokens": input_tokens,
|
|
82
|
-
"output_tokens": output_tokens,
|
|
83
|
-
"total_tokens": total_tokens,
|
|
84
|
-
# Keep legacy keys for backward compatibility
|
|
85
|
-
"prompt_tokens": input_tokens,
|
|
86
|
-
"completion_tokens": output_tokens
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
def _stream_response(self, prompt: str) -> Iterator[GenerateResponse]:
|
|
90
|
-
"""Generate streaming mock responses"""
|
|
91
|
-
words = f"Mock response to: {prompt}".split()
|
|
92
|
-
for i, word in enumerate(words):
|
|
93
|
-
yield GenerateResponse(
|
|
94
|
-
content=word + (" " if i < len(words) - 1 else ""),
|
|
95
|
-
model=self.model,
|
|
96
|
-
finish_reason="stop" if i == len(words) - 1 else None
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
def _generate_mock_json(self, model_class: Type[BaseModel]) -> str:
|
|
100
|
-
"""Generate valid JSON for Pydantic model"""
|
|
101
|
-
import json
|
|
102
|
-
|
|
103
|
-
# Create mock data based on field types
|
|
104
|
-
mock_data = {}
|
|
105
|
-
for field_name, field_info in model_class.model_fields.items():
|
|
106
|
-
field_type = field_info.annotation
|
|
107
|
-
|
|
108
|
-
# Handle basic types
|
|
109
|
-
if field_type == str:
|
|
110
|
-
mock_data[field_name] = f"mock_{field_name}"
|
|
111
|
-
elif field_type == int:
|
|
112
|
-
mock_data[field_name] = 42
|
|
113
|
-
elif field_type == float:
|
|
114
|
-
mock_data[field_name] = 3.14
|
|
115
|
-
elif field_type == bool:
|
|
116
|
-
mock_data[field_name] = True
|
|
117
|
-
else:
|
|
118
|
-
# For complex types, provide reasonable defaults
|
|
119
|
-
mock_data[field_name] = f"mock_{field_name}"
|
|
120
|
-
|
|
121
|
-
return json.dumps(mock_data)
|
|
122
|
-
|
|
123
|
-
def _handle_timeout_parameter(self, kwargs: Dict[str, Any]) -> None:
|
|
124
|
-
"""
|
|
125
|
-
Handle timeout parameter for Mock provider.
|
|
126
|
-
|
|
127
|
-
Mock provider simulates responses instantly, so timeout parameters
|
|
128
|
-
don't apply. If a non-None timeout is provided, it's accepted but
|
|
129
|
-
has no effect on mock generation.
|
|
130
|
-
|
|
131
|
-
Args:
|
|
132
|
-
kwargs: Initialization kwargs that may contain timeout
|
|
133
|
-
"""
|
|
134
|
-
timeout_value = kwargs.get('timeout')
|
|
135
|
-
if timeout_value is not None:
|
|
136
|
-
# For mock provider, we accept timeout but it has no effect
|
|
137
|
-
# No warning needed since this is for testing
|
|
138
|
-
self._timeout = timeout_value
|
|
139
|
-
else:
|
|
140
|
-
# Keep None value
|
|
141
|
-
self._timeout = None
|
|
142
|
-
|
|
143
|
-
def _update_http_client_timeout(self) -> None:
|
|
144
|
-
"""
|
|
145
|
-
Mock provider doesn't use HTTP clients.
|
|
146
|
-
Timeout changes have no effect on mock responses.
|
|
147
|
-
"""
|
|
148
|
-
# No-op for mock provider - no HTTP clients used
|
|
149
|
-
pass
|
|
150
|
-
|
|
151
|
-
def get_capabilities(self) -> List[str]:
|
|
152
|
-
"""Get mock capabilities"""
|
|
153
|
-
return ["tools", "streaming", "vision"]
|
|
154
|
-
|
|
155
|
-
def validate_config(self) -> bool:
|
|
156
|
-
"""Validate mock provider configuration"""
|
|
157
|
-
return True
|
|
158
|
-
|
|
159
|
-
def list_available_models(self, **kwargs) -> List[str]:
|
|
160
|
-
"""List available mock models for testing."""
|
|
161
|
-
return [
|
|
162
|
-
"mock-model",
|
|
163
|
-
"mock-chat-model",
|
|
164
|
-
"mock-streaming-model",
|
|
165
|
-
"mock-structured-model",
|
|
166
|
-
"mock-vision-model"
|
|
167
|
-
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|