abstractcore 2.4.5__py3-none-any.whl → 2.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractcore/__init__.py +5 -1
- abstractcore/assets/session_schema.json +1 -1
- abstractcore/core/interface.py +7 -0
- abstractcore/core/session.py +28 -3
- abstractcore/core/types.py +25 -1
- abstractcore/providers/anthropic_provider.py +20 -2
- abstractcore/providers/base.py +24 -0
- abstractcore/providers/huggingface_provider.py +44 -18
- abstractcore/providers/lmstudio_provider.py +17 -4
- abstractcore/providers/mlx_provider.py +36 -14
- abstractcore/providers/mock_provider.py +17 -7
- abstractcore/providers/ollama_provider.py +16 -4
- abstractcore/providers/openai_provider.py +18 -5
- abstractcore/tools/common_tools.py +651 -1
- abstractcore/utils/version.py +1 -1
- {abstractcore-2.4.5.dist-info → abstractcore-2.4.7.dist-info}/METADATA +108 -12
- {abstractcore-2.4.5.dist-info → abstractcore-2.4.7.dist-info}/RECORD +21 -21
- {abstractcore-2.4.5.dist-info → abstractcore-2.4.7.dist-info}/WHEEL +0 -0
- {abstractcore-2.4.5.dist-info → abstractcore-2.4.7.dist-info}/entry_points.txt +0 -0
- {abstractcore-2.4.5.dist-info → abstractcore-2.4.7.dist-info}/licenses/LICENSE +0 -0
- {abstractcore-2.4.5.dist-info → abstractcore-2.4.7.dist-info}/top_level.txt +0 -0
|
@@ -50,8 +50,7 @@ class OpenAIProvider(BaseProvider):
|
|
|
50
50
|
# Preflight check: validate model exists
|
|
51
51
|
self._validate_model_exists()
|
|
52
52
|
|
|
53
|
-
# Store configuration
|
|
54
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
53
|
+
# Store provider-specific configuration
|
|
55
54
|
self.top_p = kwargs.get("top_p", 1.0)
|
|
56
55
|
self.frequency_penalty = kwargs.get("frequency_penalty", 0.0)
|
|
57
56
|
self.presence_penalty = kwargs.get("presence_penalty", 0.0)
|
|
@@ -125,6 +124,11 @@ class OpenAIProvider(BaseProvider):
|
|
|
125
124
|
call_params["top_p"] = kwargs.get("top_p", self.top_p)
|
|
126
125
|
call_params["frequency_penalty"] = kwargs.get("frequency_penalty", self.frequency_penalty)
|
|
127
126
|
call_params["presence_penalty"] = kwargs.get("presence_penalty", self.presence_penalty)
|
|
127
|
+
|
|
128
|
+
# Add seed if provided (OpenAI supports seed for deterministic outputs)
|
|
129
|
+
seed_value = kwargs.get("seed", self.seed)
|
|
130
|
+
if seed_value is not None:
|
|
131
|
+
call_params["seed"] = seed_value
|
|
128
132
|
|
|
129
133
|
# Handle different token parameter names for different model families
|
|
130
134
|
if self._uses_max_completion_tokens():
|
|
@@ -165,8 +169,14 @@ class OpenAIProvider(BaseProvider):
|
|
|
165
169
|
if stream:
|
|
166
170
|
return self._stream_response(call_params, tools)
|
|
167
171
|
else:
|
|
172
|
+
# Track generation time
|
|
173
|
+
start_time = time.time()
|
|
168
174
|
response = self.client.chat.completions.create(**call_params)
|
|
175
|
+
gen_time = round((time.time() - start_time) * 1000, 1)
|
|
176
|
+
|
|
169
177
|
formatted = self._format_response(response)
|
|
178
|
+
# Add generation time to response
|
|
179
|
+
formatted.gen_time = gen_time
|
|
170
180
|
|
|
171
181
|
# Handle tool execution for OpenAI native responses
|
|
172
182
|
if tools and formatted.has_tool_calls():
|
|
@@ -212,13 +222,16 @@ class OpenAIProvider(BaseProvider):
|
|
|
212
222
|
"arguments": tc.function.arguments
|
|
213
223
|
})
|
|
214
224
|
|
|
215
|
-
# Build usage dict with
|
|
225
|
+
# Build usage dict with consistent terminology
|
|
216
226
|
usage = None
|
|
217
227
|
if hasattr(response, 'usage'):
|
|
218
228
|
usage = {
|
|
229
|
+
"input_tokens": response.usage.prompt_tokens,
|
|
230
|
+
"output_tokens": response.usage.completion_tokens,
|
|
231
|
+
"total_tokens": response.usage.total_tokens,
|
|
232
|
+
# Keep legacy keys for backward compatibility
|
|
219
233
|
"prompt_tokens": response.usage.prompt_tokens,
|
|
220
|
-
"completion_tokens": response.usage.completion_tokens
|
|
221
|
-
"total_tokens": response.usage.total_tokens
|
|
234
|
+
"completion_tokens": response.usage.completion_tokens
|
|
222
235
|
}
|
|
223
236
|
|
|
224
237
|
# Add detailed token breakdown for reasoning models
|