abstractcore 2.4.5__py3-none-any.whl → 2.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -50,8 +50,7 @@ class OpenAIProvider(BaseProvider):
50
50
  # Preflight check: validate model exists
51
51
  self._validate_model_exists()
52
52
 
53
- # Store configuration (remove duplicate max_tokens)
54
- self.temperature = kwargs.get("temperature", 0.7)
53
+ # Store provider-specific configuration
55
54
  self.top_p = kwargs.get("top_p", 1.0)
56
55
  self.frequency_penalty = kwargs.get("frequency_penalty", 0.0)
57
56
  self.presence_penalty = kwargs.get("presence_penalty", 0.0)
@@ -125,6 +124,11 @@ class OpenAIProvider(BaseProvider):
125
124
  call_params["top_p"] = kwargs.get("top_p", self.top_p)
126
125
  call_params["frequency_penalty"] = kwargs.get("frequency_penalty", self.frequency_penalty)
127
126
  call_params["presence_penalty"] = kwargs.get("presence_penalty", self.presence_penalty)
127
+
128
+ # Add seed if provided (OpenAI supports seed for deterministic outputs)
129
+ seed_value = kwargs.get("seed", self.seed)
130
+ if seed_value is not None:
131
+ call_params["seed"] = seed_value
128
132
 
129
133
  # Handle different token parameter names for different model families
130
134
  if self._uses_max_completion_tokens():
@@ -165,8 +169,14 @@ class OpenAIProvider(BaseProvider):
165
169
  if stream:
166
170
  return self._stream_response(call_params, tools)
167
171
  else:
172
+ # Track generation time
173
+ start_time = time.time()
168
174
  response = self.client.chat.completions.create(**call_params)
175
+ gen_time = round((time.time() - start_time) * 1000, 1)
176
+
169
177
  formatted = self._format_response(response)
178
+ # Add generation time to response
179
+ formatted.gen_time = gen_time
170
180
 
171
181
  # Handle tool execution for OpenAI native responses
172
182
  if tools and formatted.has_tool_calls():
@@ -212,13 +222,16 @@ class OpenAIProvider(BaseProvider):
212
222
  "arguments": tc.function.arguments
213
223
  })
214
224
 
215
- # Build usage dict with detailed breakdown
225
+ # Build usage dict with consistent terminology
216
226
  usage = None
217
227
  if hasattr(response, 'usage'):
218
228
  usage = {
229
+ "input_tokens": response.usage.prompt_tokens,
230
+ "output_tokens": response.usage.completion_tokens,
231
+ "total_tokens": response.usage.total_tokens,
232
+ # Keep legacy keys for backward compatibility
219
233
  "prompt_tokens": response.usage.prompt_tokens,
220
- "completion_tokens": response.usage.completion_tokens,
221
- "total_tokens": response.usage.total_tokens
234
+ "completion_tokens": response.usage.completion_tokens
222
235
  }
223
236
 
224
237
  # Add detailed token breakdown for reasoning models