pydantic-ai 0.0.23__tar.gz → 0.0.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/PKG-INFO +3 -3
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/pyproject.toml +3 -3
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_anthropic.py +7 -7
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_gemini.py +10 -8
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_groq.py +7 -7
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_mistral.py +15 -15
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_openai.py +12 -7
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/.gitignore +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/LICENSE +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/Makefile +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/README.md +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/__init__.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/conftest.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/example_modules/README.md +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/example_modules/bank_database.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/example_modules/fake_database.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/example_modules/weather_service.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/graph/__init__.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/graph/test_graph.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/graph/test_history.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/graph/test_mermaid.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/graph/test_state.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/import_examples.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/__init__.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/mock_async_stream.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_cohere.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_model.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_model_function.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_model_names.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_model_test.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/models/test_vertexai.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_agent.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_deps.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_examples.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_format_as_xml.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_live.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_logfire.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_parts_manager.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_streaming.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_tools.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_usage_limits.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/test_utils.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/typed_agent.py +0 -0
- {pydantic_ai-0.0.23 → pydantic_ai-0.0.24}/tests/typed_graph.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pydantic-ai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.24
|
|
4
4
|
Summary: Agent Framework / shim to use Pydantic with LLMs
|
|
5
5
|
Project-URL: Homepage, https://ai.pydantic.dev
|
|
6
6
|
Project-URL: Source, https://github.com/pydantic/pydantic-ai
|
|
@@ -32,9 +32,9 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
32
32
|
Classifier: Topic :: Internet
|
|
33
33
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
34
34
|
Requires-Python: >=3.9
|
|
35
|
-
Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.
|
|
35
|
+
Requires-Dist: pydantic-ai-slim[anthropic,cohere,groq,mistral,openai,vertexai]==0.0.24
|
|
36
36
|
Provides-Extra: examples
|
|
37
|
-
Requires-Dist: pydantic-ai-examples==0.0.
|
|
37
|
+
Requires-Dist: pydantic-ai-examples==0.0.24; extra == 'examples'
|
|
38
38
|
Provides-Extra: logfire
|
|
39
39
|
Requires-Dist: logfire>=2.3; extra == 'logfire'
|
|
40
40
|
Description-Content-Type: text/markdown
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "pydantic-ai"
|
|
7
|
-
version = "0.0.
|
|
7
|
+
version = "0.0.24"
|
|
8
8
|
description = "Agent Framework / shim to use Pydantic with LLMs"
|
|
9
9
|
authors = [
|
|
10
10
|
{ name = "Samuel Colvin", email = "samuel@pydantic.dev" },
|
|
@@ -37,7 +37,7 @@ classifiers = [
|
|
|
37
37
|
]
|
|
38
38
|
requires-python = ">=3.9"
|
|
39
39
|
|
|
40
|
-
dependencies = ["pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.
|
|
40
|
+
dependencies = ["pydantic-ai-slim[openai,vertexai,groq,anthropic,mistral,cohere]==0.0.24"]
|
|
41
41
|
|
|
42
42
|
[project.urls]
|
|
43
43
|
Homepage = "https://ai.pydantic.dev"
|
|
@@ -46,7 +46,7 @@ Documentation = "https://ai.pydantic.dev"
|
|
|
46
46
|
Changelog = "https://github.com/pydantic/pydantic-ai/releases"
|
|
47
47
|
|
|
48
48
|
[project.optional-dependencies]
|
|
49
|
-
examples = ["pydantic-ai-examples==0.0.
|
|
49
|
+
examples = ["pydantic-ai-examples==0.0.24"]
|
|
50
50
|
logfire = ["logfire>=2.3"]
|
|
51
51
|
|
|
52
52
|
[tool.uv.sources]
|
|
@@ -112,7 +112,7 @@ def completion_message(content: list[ContentBlock], usage: AnthropicUsage) -> An
|
|
|
112
112
|
return AnthropicMessage(
|
|
113
113
|
id='123',
|
|
114
114
|
content=content,
|
|
115
|
-
model='claude-3-5-haiku-
|
|
115
|
+
model='claude-3-5-haiku-123',
|
|
116
116
|
role='assistant',
|
|
117
117
|
stop_reason='end_turn',
|
|
118
118
|
type='message',
|
|
@@ -141,13 +141,13 @@ async def test_sync_request_text_response(allow_model_requests: None):
|
|
|
141
141
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
142
142
|
ModelResponse(
|
|
143
143
|
parts=[TextPart(content='world')],
|
|
144
|
-
model_name='claude-3-5-haiku-
|
|
144
|
+
model_name='claude-3-5-haiku-123',
|
|
145
145
|
timestamp=IsNow(tz=timezone.utc),
|
|
146
146
|
),
|
|
147
147
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
148
148
|
ModelResponse(
|
|
149
149
|
parts=[TextPart(content='world')],
|
|
150
|
-
model_name='claude-3-5-haiku-
|
|
150
|
+
model_name='claude-3-5-haiku-123',
|
|
151
151
|
timestamp=IsNow(tz=timezone.utc),
|
|
152
152
|
),
|
|
153
153
|
]
|
|
@@ -190,7 +190,7 @@ async def test_request_structured_response(allow_model_requests: None):
|
|
|
190
190
|
tool_call_id='123',
|
|
191
191
|
)
|
|
192
192
|
],
|
|
193
|
-
model_name='claude-3-5-haiku-
|
|
193
|
+
model_name='claude-3-5-haiku-123',
|
|
194
194
|
timestamp=IsNow(tz=timezone.utc),
|
|
195
195
|
),
|
|
196
196
|
ModelRequest(
|
|
@@ -252,7 +252,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
252
252
|
tool_call_id='1',
|
|
253
253
|
)
|
|
254
254
|
],
|
|
255
|
-
model_name='claude-3-5-haiku-
|
|
255
|
+
model_name='claude-3-5-haiku-123',
|
|
256
256
|
timestamp=IsNow(tz=timezone.utc),
|
|
257
257
|
),
|
|
258
258
|
ModelRequest(
|
|
@@ -273,7 +273,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
273
273
|
tool_call_id='2',
|
|
274
274
|
)
|
|
275
275
|
],
|
|
276
|
-
model_name='claude-3-5-haiku-
|
|
276
|
+
model_name='claude-3-5-haiku-123',
|
|
277
277
|
timestamp=IsNow(tz=timezone.utc),
|
|
278
278
|
),
|
|
279
279
|
ModelRequest(
|
|
@@ -288,7 +288,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
288
288
|
),
|
|
289
289
|
ModelResponse(
|
|
290
290
|
parts=[TextPart(content='final response')],
|
|
291
|
-
model_name='claude-3-5-haiku-
|
|
291
|
+
model_name='claude-3-5-haiku-123',
|
|
292
292
|
timestamp=IsNow(tz=timezone.utc),
|
|
293
293
|
),
|
|
294
294
|
]
|
|
@@ -440,7 +440,7 @@ def gemini_response(content: _GeminiContent, finish_reason: Literal['STOP'] | No
|
|
|
440
440
|
candidate = _GeminiCandidates(content=content, index=0, safety_ratings=[])
|
|
441
441
|
if finish_reason: # pragma: no cover
|
|
442
442
|
candidate['finish_reason'] = finish_reason
|
|
443
|
-
return _GeminiResponse(candidates=[candidate], usage_metadata=example_usage())
|
|
443
|
+
return _GeminiResponse(candidates=[candidate], usage_metadata=example_usage(), model_version='gemini-1.5-flash-123')
|
|
444
444
|
|
|
445
445
|
|
|
446
446
|
def example_usage() -> _GeminiUsageMetaData:
|
|
@@ -459,7 +459,9 @@ async def test_text_success(get_gemini_client: GetGeminiClient):
|
|
|
459
459
|
[
|
|
460
460
|
ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
461
461
|
ModelResponse(
|
|
462
|
-
parts=[TextPart(content='Hello world')],
|
|
462
|
+
parts=[TextPart(content='Hello world')],
|
|
463
|
+
model_name='gemini-1.5-flash-123',
|
|
464
|
+
timestamp=IsNow(tz=timezone.utc),
|
|
463
465
|
),
|
|
464
466
|
]
|
|
465
467
|
)
|
|
@@ -472,13 +474,13 @@ async def test_text_success(get_gemini_client: GetGeminiClient):
|
|
|
472
474
|
ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
473
475
|
ModelResponse(
|
|
474
476
|
parts=[TextPart(content='Hello world')],
|
|
475
|
-
model_name='gemini-1.5-flash',
|
|
477
|
+
model_name='gemini-1.5-flash-123',
|
|
476
478
|
timestamp=IsNow(tz=timezone.utc),
|
|
477
479
|
),
|
|
478
480
|
ModelRequest(parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
479
481
|
ModelResponse(
|
|
480
482
|
parts=[TextPart(content='Hello world')],
|
|
481
|
-
model_name='gemini-1.5-flash',
|
|
483
|
+
model_name='gemini-1.5-flash-123',
|
|
482
484
|
timestamp=IsNow(tz=timezone.utc),
|
|
483
485
|
),
|
|
484
486
|
]
|
|
@@ -505,7 +507,7 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient):
|
|
|
505
507
|
args={'response': [1, 2, 123]},
|
|
506
508
|
)
|
|
507
509
|
],
|
|
508
|
-
model_name='gemini-1.5-flash',
|
|
510
|
+
model_name='gemini-1.5-flash-123',
|
|
509
511
|
timestamp=IsNow(tz=timezone.utc),
|
|
510
512
|
),
|
|
511
513
|
ModelRequest(
|
|
@@ -566,7 +568,7 @@ async def test_request_tool_call(get_gemini_client: GetGeminiClient):
|
|
|
566
568
|
args={'loc_name': 'San Fransisco'},
|
|
567
569
|
)
|
|
568
570
|
],
|
|
569
|
-
model_name='gemini-1.5-flash',
|
|
571
|
+
model_name='gemini-1.5-flash-123',
|
|
570
572
|
timestamp=IsNow(tz=timezone.utc),
|
|
571
573
|
),
|
|
572
574
|
ModelRequest(
|
|
@@ -589,7 +591,7 @@ async def test_request_tool_call(get_gemini_client: GetGeminiClient):
|
|
|
589
591
|
args={'loc_name': 'New York'},
|
|
590
592
|
),
|
|
591
593
|
],
|
|
592
|
-
model_name='gemini-1.5-flash',
|
|
594
|
+
model_name='gemini-1.5-flash-123',
|
|
593
595
|
timestamp=IsNow(tz=timezone.utc),
|
|
594
596
|
),
|
|
595
597
|
ModelRequest(
|
|
@@ -604,7 +606,7 @@ async def test_request_tool_call(get_gemini_client: GetGeminiClient):
|
|
|
604
606
|
),
|
|
605
607
|
ModelResponse(
|
|
606
608
|
parts=[TextPart(content='final response')],
|
|
607
|
-
model_name='gemini-1.5-flash',
|
|
609
|
+
model_name='gemini-1.5-flash-123',
|
|
608
610
|
timestamp=IsNow(tz=timezone.utc),
|
|
609
611
|
),
|
|
610
612
|
]
|
|
@@ -103,7 +103,7 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
|
|
|
103
103
|
id='123',
|
|
104
104
|
choices=[Choice(finish_reason='stop', index=0, message=message)],
|
|
105
105
|
created=1704067200, # 2024-01-01
|
|
106
|
-
model='llama-3.3-70b-versatile',
|
|
106
|
+
model='llama-3.3-70b-versatile-123',
|
|
107
107
|
object='chat.completion',
|
|
108
108
|
usage=usage,
|
|
109
109
|
)
|
|
@@ -130,13 +130,13 @@ async def test_request_simple_success(allow_model_requests: None):
|
|
|
130
130
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
131
131
|
ModelResponse(
|
|
132
132
|
parts=[TextPart(content='world')],
|
|
133
|
-
model_name='llama-3.3-70b-versatile',
|
|
133
|
+
model_name='llama-3.3-70b-versatile-123',
|
|
134
134
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
135
135
|
),
|
|
136
136
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
137
137
|
ModelResponse(
|
|
138
138
|
parts=[TextPart(content='world')],
|
|
139
|
-
model_name='llama-3.3-70b-versatile',
|
|
139
|
+
model_name='llama-3.3-70b-versatile-123',
|
|
140
140
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
141
141
|
),
|
|
142
142
|
]
|
|
@@ -187,7 +187,7 @@ async def test_request_structured_response(allow_model_requests: None):
|
|
|
187
187
|
tool_call_id='123',
|
|
188
188
|
)
|
|
189
189
|
],
|
|
190
|
-
model_name='llama-3.3-70b-versatile',
|
|
190
|
+
model_name='llama-3.3-70b-versatile-123',
|
|
191
191
|
timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc),
|
|
192
192
|
),
|
|
193
193
|
ModelRequest(
|
|
@@ -273,7 +273,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
273
273
|
tool_call_id='1',
|
|
274
274
|
)
|
|
275
275
|
],
|
|
276
|
-
model_name='llama-3.3-70b-versatile',
|
|
276
|
+
model_name='llama-3.3-70b-versatile-123',
|
|
277
277
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
278
278
|
),
|
|
279
279
|
ModelRequest(
|
|
@@ -294,7 +294,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
294
294
|
tool_call_id='2',
|
|
295
295
|
)
|
|
296
296
|
],
|
|
297
|
-
model_name='llama-3.3-70b-versatile',
|
|
297
|
+
model_name='llama-3.3-70b-versatile-123',
|
|
298
298
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
299
299
|
),
|
|
300
300
|
ModelRequest(
|
|
@@ -309,7 +309,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
309
309
|
),
|
|
310
310
|
ModelResponse(
|
|
311
311
|
parts=[TextPart(content='final response')],
|
|
312
|
-
model_name='llama-3.3-70b-versatile',
|
|
312
|
+
model_name='llama-3.3-70b-versatile-123',
|
|
313
313
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
314
314
|
),
|
|
315
315
|
]
|
|
@@ -123,7 +123,7 @@ def completion_message(
|
|
|
123
123
|
id='123',
|
|
124
124
|
choices=[MistralChatCompletionChoice(finish_reason='stop', index=0, message=message)],
|
|
125
125
|
created=1704067200 if with_created else None, # 2024-01-01
|
|
126
|
-
model='mistral-large-
|
|
126
|
+
model='mistral-large-123',
|
|
127
127
|
object='chat.completion',
|
|
128
128
|
usage=usage or MistralUsageInfo(prompt_tokens=1, completion_tokens=1, total_tokens=1),
|
|
129
129
|
)
|
|
@@ -217,13 +217,13 @@ async def test_multiple_completions(allow_model_requests: None):
|
|
|
217
217
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
218
218
|
ModelResponse(
|
|
219
219
|
parts=[TextPart(content='world')],
|
|
220
|
-
model_name='mistral-large-
|
|
220
|
+
model_name='mistral-large-123',
|
|
221
221
|
timestamp=IsNow(tz=timezone.utc),
|
|
222
222
|
),
|
|
223
223
|
ModelRequest(parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))]),
|
|
224
224
|
ModelResponse(
|
|
225
225
|
parts=[TextPart(content='hello again')],
|
|
226
|
-
model_name='mistral-large-
|
|
226
|
+
model_name='mistral-large-123',
|
|
227
227
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
228
228
|
),
|
|
229
229
|
]
|
|
@@ -269,19 +269,19 @@ async def test_three_completions(allow_model_requests: None):
|
|
|
269
269
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
270
270
|
ModelResponse(
|
|
271
271
|
parts=[TextPart(content='world')],
|
|
272
|
-
model_name='mistral-large-
|
|
272
|
+
model_name='mistral-large-123',
|
|
273
273
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
274
274
|
),
|
|
275
275
|
ModelRequest(parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))]),
|
|
276
276
|
ModelResponse(
|
|
277
277
|
parts=[TextPart(content='hello again')],
|
|
278
|
-
model_name='mistral-large-
|
|
278
|
+
model_name='mistral-large-123',
|
|
279
279
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
280
280
|
),
|
|
281
281
|
ModelRequest(parts=[UserPromptPart(content='final message', timestamp=IsNow(tz=timezone.utc))]),
|
|
282
282
|
ModelResponse(
|
|
283
283
|
parts=[TextPart(content='final message')],
|
|
284
|
-
model_name='mistral-large-
|
|
284
|
+
model_name='mistral-large-123',
|
|
285
285
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
286
286
|
),
|
|
287
287
|
]
|
|
@@ -396,7 +396,7 @@ async def test_request_model_structured_with_arguments_dict_response(allow_model
|
|
|
396
396
|
tool_call_id='123',
|
|
397
397
|
)
|
|
398
398
|
],
|
|
399
|
-
model_name='mistral-large-
|
|
399
|
+
model_name='mistral-large-123',
|
|
400
400
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
401
401
|
),
|
|
402
402
|
ModelRequest(
|
|
@@ -458,7 +458,7 @@ async def test_request_model_structured_with_arguments_str_response(allow_model_
|
|
|
458
458
|
tool_call_id='123',
|
|
459
459
|
)
|
|
460
460
|
],
|
|
461
|
-
model_name='mistral-large-
|
|
461
|
+
model_name='mistral-large-123',
|
|
462
462
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
463
463
|
),
|
|
464
464
|
ModelRequest(
|
|
@@ -519,7 +519,7 @@ async def test_request_result_type_with_arguments_str_response(allow_model_reque
|
|
|
519
519
|
tool_call_id='123',
|
|
520
520
|
)
|
|
521
521
|
],
|
|
522
|
-
model_name='mistral-large-
|
|
522
|
+
model_name='mistral-large-123',
|
|
523
523
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
524
524
|
),
|
|
525
525
|
ModelRequest(
|
|
@@ -1104,7 +1104,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
1104
1104
|
tool_call_id='1',
|
|
1105
1105
|
)
|
|
1106
1106
|
],
|
|
1107
|
-
model_name='mistral-large-
|
|
1107
|
+
model_name='mistral-large-123',
|
|
1108
1108
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
1109
1109
|
),
|
|
1110
1110
|
ModelRequest(
|
|
@@ -1125,7 +1125,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
1125
1125
|
tool_call_id='2',
|
|
1126
1126
|
)
|
|
1127
1127
|
],
|
|
1128
|
-
model_name='mistral-large-
|
|
1128
|
+
model_name='mistral-large-123',
|
|
1129
1129
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
1130
1130
|
),
|
|
1131
1131
|
ModelRequest(
|
|
@@ -1140,7 +1140,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
1140
1140
|
),
|
|
1141
1141
|
ModelResponse(
|
|
1142
1142
|
parts=[TextPart(content='final response')],
|
|
1143
|
-
model_name='mistral-large-
|
|
1143
|
+
model_name='mistral-large-123',
|
|
1144
1144
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
1145
1145
|
),
|
|
1146
1146
|
]
|
|
@@ -1244,7 +1244,7 @@ async def test_request_tool_call_with_result_type(allow_model_requests: None):
|
|
|
1244
1244
|
tool_call_id='1',
|
|
1245
1245
|
)
|
|
1246
1246
|
],
|
|
1247
|
-
model_name='mistral-large-
|
|
1247
|
+
model_name='mistral-large-123',
|
|
1248
1248
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
1249
1249
|
),
|
|
1250
1250
|
ModelRequest(
|
|
@@ -1265,7 +1265,7 @@ async def test_request_tool_call_with_result_type(allow_model_requests: None):
|
|
|
1265
1265
|
tool_call_id='2',
|
|
1266
1266
|
)
|
|
1267
1267
|
],
|
|
1268
|
-
model_name='mistral-large-
|
|
1268
|
+
model_name='mistral-large-123',
|
|
1269
1269
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
1270
1270
|
),
|
|
1271
1271
|
ModelRequest(
|
|
@@ -1286,7 +1286,7 @@ async def test_request_tool_call_with_result_type(allow_model_requests: None):
|
|
|
1286
1286
|
tool_call_id='1',
|
|
1287
1287
|
)
|
|
1288
1288
|
],
|
|
1289
|
-
model_name='mistral-large-
|
|
1289
|
+
model_name='mistral-large-123',
|
|
1290
1290
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
1291
1291
|
),
|
|
1292
1292
|
ModelRequest(
|
|
@@ -65,6 +65,11 @@ def test_init_with_base_url():
|
|
|
65
65
|
m.name()
|
|
66
66
|
|
|
67
67
|
|
|
68
|
+
def test_init_with_no_api_key_will_still_setup_client():
|
|
69
|
+
m = OpenAIModel('llama3.2', base_url='http://localhost:19434/v1')
|
|
70
|
+
assert str(m.client.base_url) == 'http://localhost:19434/v1/'
|
|
71
|
+
|
|
72
|
+
|
|
68
73
|
def test_init_with_non_openai_model():
|
|
69
74
|
m = OpenAIModel('llama3.2-vision:latest', base_url='https://example.com/v1/')
|
|
70
75
|
m.name()
|
|
@@ -136,7 +141,7 @@ def completion_message(message: ChatCompletionMessage, *, usage: CompletionUsage
|
|
|
136
141
|
id='123',
|
|
137
142
|
choices=[Choice(finish_reason='stop', index=0, message=message)],
|
|
138
143
|
created=1704067200, # 2024-01-01
|
|
139
|
-
model='gpt-4o',
|
|
144
|
+
model='gpt-4o-123',
|
|
140
145
|
object='chat.completion',
|
|
141
146
|
usage=usage,
|
|
142
147
|
)
|
|
@@ -163,13 +168,13 @@ async def test_request_simple_success(allow_model_requests: None):
|
|
|
163
168
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
164
169
|
ModelResponse(
|
|
165
170
|
parts=[TextPart(content='world')],
|
|
166
|
-
model_name='gpt-4o',
|
|
171
|
+
model_name='gpt-4o-123',
|
|
167
172
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
168
173
|
),
|
|
169
174
|
ModelRequest(parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))]),
|
|
170
175
|
ModelResponse(
|
|
171
176
|
parts=[TextPart(content='world')],
|
|
172
|
-
model_name='gpt-4o',
|
|
177
|
+
model_name='gpt-4o-123',
|
|
173
178
|
timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc),
|
|
174
179
|
),
|
|
175
180
|
]
|
|
@@ -233,7 +238,7 @@ async def test_request_structured_response(allow_model_requests: None):
|
|
|
233
238
|
tool_call_id='123',
|
|
234
239
|
)
|
|
235
240
|
],
|
|
236
|
-
model_name='gpt-4o',
|
|
241
|
+
model_name='gpt-4o-123',
|
|
237
242
|
timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc),
|
|
238
243
|
),
|
|
239
244
|
ModelRequest(
|
|
@@ -321,7 +326,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
321
326
|
tool_call_id='1',
|
|
322
327
|
)
|
|
323
328
|
],
|
|
324
|
-
model_name='gpt-4o',
|
|
329
|
+
model_name='gpt-4o-123',
|
|
325
330
|
timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc),
|
|
326
331
|
),
|
|
327
332
|
ModelRequest(
|
|
@@ -342,7 +347,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
342
347
|
tool_call_id='2',
|
|
343
348
|
)
|
|
344
349
|
],
|
|
345
|
-
model_name='gpt-4o',
|
|
350
|
+
model_name='gpt-4o-123',
|
|
346
351
|
timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc),
|
|
347
352
|
),
|
|
348
353
|
ModelRequest(
|
|
@@ -357,7 +362,7 @@ async def test_request_tool_call(allow_model_requests: None):
|
|
|
357
362
|
),
|
|
358
363
|
ModelResponse(
|
|
359
364
|
parts=[TextPart(content='final response')],
|
|
360
|
-
model_name='gpt-4o',
|
|
365
|
+
model_name='gpt-4o-123',
|
|
361
366
|
timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc),
|
|
362
367
|
),
|
|
363
368
|
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|