pydantic-ai-slim 0.2.4__py3-none-any.whl → 0.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-ai-slim might be problematic. Click here for more details.

@@ -244,7 +244,7 @@ class AnthropicModel(Model):
244
244
  except APIStatusError as e:
245
245
  if (status_code := e.status_code) >= 400:
246
246
  raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e
247
- raise
247
+ raise # pragma: lax no cover
248
248
 
249
249
  def _process_response(self, response: AnthropicMessage) -> ModelResponse:
250
250
  """Process a non-streamed response, and prepare a message to return."""
@@ -262,13 +262,13 @@ class AnthropicModel(Model):
262
262
  )
263
263
  )
264
264
 
265
- return ModelResponse(items, usage=_map_usage(response), model_name=response.model)
265
+ return ModelResponse(items, usage=_map_usage(response), model_name=response.model, vendor_id=response.id)
266
266
 
267
267
  async def _process_streamed_response(self, response: AsyncStream[RawMessageStreamEvent]) -> StreamedResponse:
268
268
  peekable_response = _utils.PeekableAsyncStream(response)
269
269
  first_chunk = await peekable_response.peek()
270
270
  if isinstance(first_chunk, _utils.Unset):
271
- raise UnexpectedModelBehavior('Streamed response ended without content or tool calls')
271
+ raise UnexpectedModelBehavior('Streamed response ended without content or tool calls') # pragma: no cover
272
272
 
273
273
  # Since Anthropic doesn't provide a timestamp in the message, we'll use the current time
274
274
  timestamp = datetime.now(tz=timezone.utc)
@@ -305,9 +305,10 @@ class AnthropicModel(Model):
305
305
  is_error=False,
306
306
  )
307
307
  user_content_params.append(tool_result_block_param)
308
- elif isinstance(request_part, RetryPromptPart):
308
+ elif isinstance(request_part, RetryPromptPart): # pragma: no branch
309
309
  if request_part.tool_name is None:
310
- retry_param = TextBlockParam(type='text', text=request_part.model_response())
310
+ text = request_part.model_response() # pragma: no cover
311
+ retry_param = TextBlockParam(type='text', text=text) # pragma: no cover
311
312
  else:
312
313
  retry_param = ToolResultBlockParam(
313
314
  tool_use_id=_guard_tool_call_id(t=request_part),
@@ -380,7 +381,7 @@ class AnthropicModel(Model):
380
381
  else: # pragma: no cover
381
382
  raise RuntimeError(f'Unsupported media type: {item.media_type}')
382
383
  else:
383
- raise RuntimeError(f'Unsupported content type: {type(item)}')
384
+ raise RuntimeError(f'Unsupported content type: {type(item)}') # pragma: no cover
384
385
 
385
386
  @staticmethod
386
387
  def _map_tool_definition(f: ToolDefinition) -> ToolParam:
@@ -447,21 +448,25 @@ class AnthropicStreamedResponse(StreamedResponse):
447
448
  if isinstance(event, RawContentBlockStartEvent):
448
449
  current_block = event.content_block
449
450
  if isinstance(current_block, TextBlock) and current_block.text:
450
- yield self._parts_manager.handle_text_delta(vendor_part_id='content', content=current_block.text)
451
- elif isinstance(current_block, ToolUseBlock):
451
+ yield self._parts_manager.handle_text_delta( # pragma: lax no cover
452
+ vendor_part_id='content', content=current_block.text
453
+ )
454
+ elif isinstance(current_block, ToolUseBlock): # pragma: no branch
452
455
  maybe_event = self._parts_manager.handle_tool_call_delta(
453
456
  vendor_part_id=current_block.id,
454
457
  tool_name=current_block.name,
455
458
  args=cast(dict[str, Any], current_block.input),
456
459
  tool_call_id=current_block.id,
457
460
  )
458
- if maybe_event is not None:
461
+ if maybe_event is not None: # pragma: no branch
459
462
  yield maybe_event
460
463
 
461
464
  elif isinstance(event, RawContentBlockDeltaEvent):
462
465
  if isinstance(event.delta, TextDelta):
463
- yield self._parts_manager.handle_text_delta(vendor_part_id='content', content=event.delta.text)
464
- elif (
466
+ yield self._parts_manager.handle_text_delta( # pragma: no cover
467
+ vendor_part_id='content', content=event.delta.text
468
+ )
469
+ elif ( # pragma: no branch
465
470
  current_block and event.delta.type == 'input_json_delta' and isinstance(current_block, ToolUseBlock)
466
471
  ):
467
472
  # Try to parse the JSON immediately, otherwise cache the value for later. This handles
@@ -480,7 +485,7 @@ class AnthropicStreamedResponse(StreamedResponse):
480
485
  args=parsed_args,
481
486
  tool_call_id=current_block.id,
482
487
  )
483
- if maybe_event is not None:
488
+ if maybe_event is not None: # pragma: no branch
484
489
  yield maybe_event
485
490
 
486
491
  elif isinstance(event, (RawContentBlockStopEvent, RawMessageStopEvent)):
@@ -252,7 +252,7 @@ class BedrockConverseModel(Model):
252
252
 
253
253
  async def _process_response(self, response: ConverseResponseTypeDef) -> ModelResponse:
254
254
  items: list[ModelResponsePart] = []
255
- if message := response['output'].get('message'):
255
+ if message := response['output'].get('message'): # pragma: no branch
256
256
  for item in message['content']:
257
257
  if text := item.get('text'):
258
258
  items.append(TextPart(content=text))
@@ -271,7 +271,8 @@ class BedrockConverseModel(Model):
271
271
  response_tokens=response['usage']['outputTokens'],
272
272
  total_tokens=response['usage']['totalTokens'],
273
273
  )
274
- return ModelResponse(items, usage=u, model_name=self.model_name)
274
+ vendor_id = response.get('ResponseMetadata', {}).get('RequestId', None)
275
+ return ModelResponse(items, usage=u, model_name=self.model_name, vendor_id=vendor_id)
275
276
 
276
277
  @overload
277
278
  async def _messages_create(
@@ -305,7 +306,7 @@ class BedrockConverseModel(Model):
305
306
  if not tools or not support_tools_choice:
306
307
  tool_choice: ToolChoiceTypeDef = {}
307
308
  elif not model_request_parameters.allow_text_output:
308
- tool_choice = {'any': {}}
309
+ tool_choice = {'any': {}} # pragma: no cover
309
310
  else:
310
311
  tool_choice = {'auto': {}}
311
312
 
@@ -492,7 +493,7 @@ class BedrockConverseModel(Model):
492
493
  data = response.content
493
494
  content.append({'document': {'name': name, 'format': item.format, 'source': {'bytes': data}}})
494
495
 
495
- elif item.kind == 'video-url':
496
+ elif item.kind == 'video-url': # pragma: no branch
496
497
  format = item.media_type.split('/')[1]
497
498
  assert format in ('mkv', 'mov', 'mp4', 'webm', 'flv', 'mpeg', 'mpg', 'wmv', 'three_gp'), (
498
499
  f'Unsupported video format: {format}'
@@ -535,13 +536,13 @@ class BedrockStreamedResponse(StreamedResponse):
535
536
  if 'messageStop' in chunk:
536
537
  continue
537
538
  if 'metadata' in chunk:
538
- if 'usage' in chunk['metadata']:
539
+ if 'usage' in chunk['metadata']: # pragma: no branch
539
540
  self._usage += self._map_usage(chunk['metadata'])
540
541
  continue
541
542
  if 'contentBlockStart' in chunk:
542
543
  index = chunk['contentBlockStart']['contentBlockIndex']
543
544
  start = chunk['contentBlockStart']['start']
544
- if 'toolUse' in start:
545
+ if 'toolUse' in start: # pragma: no branch
545
546
  tool_use_start = start['toolUse']
546
547
  tool_id = tool_use_start['toolUseId']
547
548
  tool_name = tool_use_start['name']
@@ -552,7 +553,7 @@ class BedrockStreamedResponse(StreamedResponse):
552
553
  tool_call_id=tool_id,
553
554
  )
554
555
  if maybe_event:
555
- yield maybe_event
556
+ yield maybe_event # pragma: no cover
556
557
  if 'contentBlockDelta' in chunk:
557
558
  index = chunk['contentBlockDelta']['contentBlockIndex']
558
559
  delta = chunk['contentBlockDelta']['delta']
@@ -566,7 +567,7 @@ class BedrockStreamedResponse(StreamedResponse):
566
567
  args=tool_use.get('input'),
567
568
  tool_call_id=tool_id,
568
569
  )
569
- if maybe_event:
570
+ if maybe_event: # pragma: no branch
570
571
  yield maybe_event
571
572
 
572
573
  @property
@@ -602,4 +603,4 @@ class _AsyncIteratorWrapper(Generic[T]):
602
603
  if type(e.__cause__) is StopIteration:
603
604
  raise StopAsyncIteration
604
605
  else:
605
- raise e
606
+ raise e # pragma: lax no cover
@@ -174,7 +174,7 @@ class CohereModel(Model):
174
174
  except ApiError as e:
175
175
  if (status_code := e.status_code) and status_code >= 400:
176
176
  raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=e.body) from e
177
- raise
177
+ raise # pragma: lax no cover
178
178
 
179
179
  def _process_response(self, response: ChatResponse) -> ModelResponse:
180
180
  """Process a non-streamed response, and prepare a message to return."""
@@ -185,7 +185,7 @@ class CohereModel(Model):
185
185
  choice = response.message.content[0]
186
186
  parts.append(TextPart(choice.text))
187
187
  for c in response.message.tool_calls or []:
188
- if c.function and c.function.name and c.function.arguments:
188
+ if c.function and c.function.name and c.function.arguments: # pragma: no branch
189
189
  parts.append(
190
190
  ToolCallPart(
191
191
  tool_name=c.function.name,
@@ -269,7 +269,7 @@ class CohereModel(Model):
269
269
  )
270
270
  elif isinstance(part, RetryPromptPart):
271
271
  if part.tool_name is None:
272
- yield UserChatMessageV2(role='user', content=part.model_response())
272
+ yield UserChatMessageV2(role='user', content=part.model_response()) # pragma: no cover
273
273
  else:
274
274
  yield ToolChatMessageV2(
275
275
  role='tool',
@@ -287,7 +287,7 @@ def _map_usage(response: ChatResponse) -> usage.Usage:
287
287
  else:
288
288
  details: dict[str, int] = {}
289
289
  if u.billed_units is not None:
290
- if u.billed_units.input_tokens:
290
+ if u.billed_units.input_tokens: # pragma: no branch
291
291
  details['input_tokens'] = int(u.billed_units.input_tokens)
292
292
  if u.billed_units.output_tokens:
293
293
  details['output_tokens'] = int(u.billed_units.output_tokens)
@@ -96,7 +96,7 @@ class FallbackModel(Model):
96
96
  if self._fallback_on(exc):
97
97
  exceptions.append(exc)
98
98
  continue
99
- raise exc
99
+ raise exc # pragma: no cover
100
100
 
101
101
  self._set_span_attributes(model)
102
102
  yield response
@@ -109,7 +109,7 @@ class FallbackModel(Model):
109
109
  span = get_current_span()
110
110
  if span.is_recording():
111
111
  attributes = getattr(span, 'attributes', {})
112
- if attributes.get('gen_ai.request.model') == self.model_name:
112
+ if attributes.get('gen_ai.request.model') == self.model_name: # pragma: no branch
113
113
  span.set_attributes(InstrumentedModel.model_attributes(model))
114
114
 
115
115
  @property
@@ -106,7 +106,7 @@ class FunctionModel(Model):
106
106
  response = response_
107
107
  response.model_name = self._model_name
108
108
  # Add usage data if not already present
109
- if not response.usage.has_values():
109
+ if not response.usage.has_values(): # pragma: no branch
110
110
  response.usage = _estimate_usage(chain(messages, [response]))
111
111
  response.usage.requests = 1
112
112
  return response
@@ -137,8 +137,8 @@ class GeminiModel(Model):
137
137
 
138
138
  @property
139
139
  def base_url(self) -> str:
140
- assert self._url is not None, 'URL not initialized'
141
- return self._url
140
+ assert self._url is not None, 'URL not initialized' # pragma: no cover
141
+ return self._url # pragma: no cover
142
142
 
143
143
  async def request(
144
144
  self,
@@ -201,7 +201,7 @@ class GeminiModel(Model):
201
201
  elif tools:
202
202
  return _tool_config([t['name'] for t in tools['function_declarations']])
203
203
  else:
204
- return _tool_config([])
204
+ return _tool_config([]) # pragma: no cover
205
205
 
206
206
  @asynccontextmanager
207
207
  async def _make_request(
@@ -257,17 +257,21 @@ class GeminiModel(Model):
257
257
  await r.aread()
258
258
  if status_code >= 400:
259
259
  raise ModelHTTPError(status_code=status_code, model_name=self.model_name, body=r.text)
260
- raise UnexpectedModelBehavior(f'Unexpected response from gemini {status_code}', r.text)
260
+ raise UnexpectedModelBehavior( # pragma: no cover
261
+ f'Unexpected response from gemini {status_code}', r.text
262
+ )
261
263
  yield r
262
264
 
263
265
  def _process_response(self, response: _GeminiResponse) -> ModelResponse:
264
266
  if len(response['candidates']) != 1:
265
- raise UnexpectedModelBehavior('Expected exactly one candidate in Gemini response')
267
+ raise UnexpectedModelBehavior('Expected exactly one candidate in Gemini response') # pragma: no cover
266
268
  if 'content' not in response['candidates'][0]:
267
269
  if response['candidates'][0].get('finish_reason') == 'SAFETY':
268
270
  raise UnexpectedModelBehavior('Safety settings triggered', str(response))
269
271
  else:
270
- raise UnexpectedModelBehavior('Content field missing from Gemini response', str(response))
272
+ raise UnexpectedModelBehavior( # pragma: no cover
273
+ 'Content field missing from Gemini response', str(response)
274
+ )
271
275
  parts = response['candidates'][0]['content']['parts']
272
276
  usage = _metadata_as_usage(response)
273
277
  usage.requests = 1
@@ -285,7 +289,7 @@ class GeminiModel(Model):
285
289
  _ensure_decodeable(content),
286
290
  experimental_allow_partial='trailing-strings',
287
291
  )
288
- if responses:
292
+ if responses: # pragma: no branch
289
293
  last = responses[-1]
290
294
  if last['candidates'] and last['candidates'][0].get('content', {}).get('parts'):
291
295
  start_response = last
@@ -314,14 +318,14 @@ class GeminiModel(Model):
314
318
  message_parts.append(_response_part_from_response(part.tool_name, part.model_response_object()))
315
319
  elif isinstance(part, RetryPromptPart):
316
320
  if part.tool_name is None:
317
- message_parts.append(_GeminiTextPart(text=part.model_response()))
321
+ message_parts.append(_GeminiTextPart(text=part.model_response())) # pragma: no cover
318
322
  else:
319
323
  response = {'call_error': part.model_response()}
320
324
  message_parts.append(_response_part_from_response(part.tool_name, response))
321
325
  else:
322
326
  assert_never(part)
323
327
 
324
- if message_parts:
328
+ if message_parts: # pragma: no branch
325
329
  contents.append(_GeminiContent(role='user', parts=message_parts))
326
330
  elif isinstance(m, ModelResponse):
327
331
  contents.append(_content_model_response(m))
@@ -372,7 +376,7 @@ class ApiKeyAuth:
372
376
 
373
377
  async def headers(self) -> dict[str, str]:
374
378
  # https://cloud.google.com/docs/authentication/api-keys-use#using-with-rest
375
- return {'X-Goog-Api-Key': self.api_key}
379
+ return {'X-Goog-Api-Key': self.api_key} # pragma: no cover
376
380
 
377
381
 
378
382
  @dataclass
@@ -388,7 +392,7 @@ class GeminiStreamedResponse(StreamedResponse):
388
392
  async for gemini_response in self._get_gemini_responses():
389
393
  candidate = gemini_response['candidates'][0]
390
394
  if 'content' not in candidate:
391
- raise UnexpectedModelBehavior('Streamed response has no content field')
395
+ raise UnexpectedModelBehavior('Streamed response has no content field') # pragma: no cover
392
396
  gemini_part: _GeminiPartUnion
393
397
  for gemini_part in candidate['content']['parts']:
394
398
  if 'text' in gemini_part:
@@ -407,10 +411,10 @@ class GeminiStreamedResponse(StreamedResponse):
407
411
  args=gemini_part['function_call']['args'],
408
412
  tool_call_id=None,
409
413
  )
410
- if maybe_event is not None:
414
+ if maybe_event is not None: # pragma: no branch
411
415
  yield maybe_event
412
416
  else:
413
- assert 'function_response' in gemini_part, f'Unexpected part: {gemini_part}'
417
+ assert 'function_response' in gemini_part, f'Unexpected part: {gemini_part}' # pragma: no cover
414
418
 
415
419
  async def _get_gemini_responses(self) -> AsyncIterator[_GeminiResponse]:
416
420
  # This method exists to ensure we only yield completed items, so we don't need to worry about
@@ -438,7 +442,7 @@ class GeminiStreamedResponse(StreamedResponse):
438
442
  yield r
439
443
 
440
444
  # Now yield the final response, which should be complete
441
- if gemini_responses:
445
+ if gemini_responses: # pragma: no branch
442
446
  r = gemini_responses[-1]
443
447
  self._usage += _metadata_as_usage(r)
444
448
  yield r
@@ -601,7 +605,7 @@ def _process_response_from_parts(
601
605
  items.append(TextPart(content=part['text']))
602
606
  elif 'function_call' in part:
603
607
  items.append(ToolCallPart(tool_name=part['function_call']['name'], args=part['function_call']['args']))
604
- elif 'function_response' in part:
608
+ elif 'function_response' in part: # pragma: no cover
605
609
  raise UnexpectedModelBehavior(
606
610
  f'Unsupported response from Gemini, expected all parts to be function calls or text, got: {part!r}'
607
611
  )
@@ -631,13 +635,13 @@ class _GeminiFunctionResponse(TypedDict):
631
635
 
632
636
 
633
637
  def _part_discriminator(v: Any) -> str:
634
- if isinstance(v, dict):
638
+ if isinstance(v, dict): # pragma: no branch
635
639
  if 'text' in v:
636
640
  return 'text'
637
641
  elif 'inlineData' in v:
638
- return 'inline_data'
642
+ return 'inline_data' # pragma: no cover
639
643
  elif 'fileData' in v:
640
- return 'file_data'
644
+ return 'file_data' # pragma: no cover
641
645
  elif 'functionCall' in v or 'function_call' in v:
642
646
  return 'function_call'
643
647
  elif 'functionResponse' in v or 'function_response' in v:
@@ -748,10 +752,10 @@ class _GeminiUsageMetaData(TypedDict, total=False):
748
752
  def _metadata_as_usage(response: _GeminiResponse) -> usage.Usage:
749
753
  metadata = response.get('usage_metadata')
750
754
  if metadata is None:
751
- return usage.Usage()
755
+ return usage.Usage() # pragma: no cover
752
756
  details: dict[str, int] = {}
753
757
  if cached_content_token_count := metadata.get('cached_content_token_count'):
754
- details['cached_content_token_count'] = cached_content_token_count
758
+ details['cached_content_token_count'] = cached_content_token_count # pragma: no cover
755
759
  return usage.Usage(
756
760
  request_tokens=metadata.get('prompt_token_count', 0),
757
761
  response_tokens=metadata.get('candidates_token_count', 0),
@@ -866,10 +870,10 @@ class _GeminiJsonSchema(WalkJsonSchema):
866
870
  unique_items.append(item)
867
871
  if len(unique_items) > 1: # pragma: no cover
868
872
  schema['items'] = {'anyOf': unique_items}
869
- elif len(unique_items) == 1:
873
+ elif len(unique_items) == 1: # pragma: no branch
870
874
  schema['items'] = unique_items[0]
871
875
  schema.setdefault('minItems', len(prefix_items))
872
- if items is None:
876
+ if items is None: # pragma: no branch
873
877
  schema.setdefault('maxItems', len(prefix_items))
874
878
 
875
879
  return schema