pydantic-ai-slim 1.0.17__tar.gz → 1.0.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/PKG-INFO +3 -3
  2. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_parts_manager.py +3 -0
  3. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/__init__.py +2 -0
  4. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/messages.py +7 -0
  5. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/__init__.py +1 -0
  6. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/openai.py +17 -10
  7. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/__init__.py +4 -0
  8. pydantic_ai_slim-1.0.18/pydantic_ai/providers/nebius.py +102 -0
  9. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/.gitignore +0 -0
  10. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/LICENSE +0 -0
  11. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/README.md +0 -0
  12. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/__init__.py +0 -0
  13. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/__main__.py +0 -0
  14. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_a2a.py +0 -0
  15. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_agent_graph.py +0 -0
  16. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_cli.py +0 -0
  17. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_function_schema.py +0 -0
  18. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_griffe.py +0 -0
  19. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_instrumentation.py +0 -0
  20. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_json_schema.py +0 -0
  21. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_mcp.py +0 -0
  22. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_otel_messages.py +0 -0
  23. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_output.py +0 -0
  24. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_run_context.py +0 -0
  25. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_system_prompt.py +0 -0
  26. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_thinking_part.py +0 -0
  27. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_tool_manager.py +0 -0
  28. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/_utils.py +0 -0
  29. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/ag_ui.py +0 -0
  30. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/agent/__init__.py +0 -0
  31. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/agent/abstract.py +0 -0
  32. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/agent/wrapper.py +0 -0
  33. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/builtin_tools.py +0 -0
  34. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/common_tools/__init__.py +0 -0
  35. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/common_tools/duckduckgo.py +0 -0
  36. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/common_tools/tavily.py +0 -0
  37. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/direct.py +0 -0
  38. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/__init__.py +0 -0
  39. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/dbos/__init__.py +0 -0
  40. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/dbos/_agent.py +0 -0
  41. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/dbos/_mcp_server.py +0 -0
  42. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/dbos/_model.py +0 -0
  43. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/dbos/_utils.py +0 -0
  44. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/_agent.py +0 -0
  45. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/_function_toolset.py +0 -0
  46. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/_logfire.py +0 -0
  47. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/_mcp_server.py +0 -0
  48. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/_model.py +0 -0
  49. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/_run_context.py +0 -0
  50. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/durable_exec/temporal/_toolset.py +0 -0
  51. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/exceptions.py +0 -0
  52. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/ext/__init__.py +0 -0
  53. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/ext/aci.py +0 -0
  54. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/ext/langchain.py +0 -0
  55. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/format_prompt.py +0 -0
  56. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/mcp.py +0 -0
  57. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/anthropic.py +0 -0
  58. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/bedrock.py +0 -0
  59. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/cohere.py +0 -0
  60. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/fallback.py +0 -0
  61. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/function.py +0 -0
  62. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/gemini.py +0 -0
  63. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/google.py +0 -0
  64. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/groq.py +0 -0
  65. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/huggingface.py +0 -0
  66. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/instrumented.py +0 -0
  67. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/mcp_sampling.py +0 -0
  68. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/mistral.py +0 -0
  69. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/test.py +0 -0
  70. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/models/wrapper.py +0 -0
  71. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/output.py +0 -0
  72. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/__init__.py +0 -0
  73. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/amazon.py +0 -0
  74. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/anthropic.py +0 -0
  75. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/cohere.py +0 -0
  76. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/deepseek.py +0 -0
  77. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/google.py +0 -0
  78. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/grok.py +0 -0
  79. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/groq.py +0 -0
  80. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/harmony.py +0 -0
  81. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/meta.py +0 -0
  82. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/mistral.py +0 -0
  83. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/moonshotai.py +0 -0
  84. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/openai.py +0 -0
  85. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/profiles/qwen.py +0 -0
  86. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/anthropic.py +0 -0
  87. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/azure.py +0 -0
  88. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/bedrock.py +0 -0
  89. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/cerebras.py +0 -0
  90. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/cohere.py +0 -0
  91. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/deepseek.py +0 -0
  92. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/fireworks.py +0 -0
  93. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/gateway.py +0 -0
  94. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/github.py +0 -0
  95. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/google.py +0 -0
  96. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/google_gla.py +0 -0
  97. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/google_vertex.py +0 -0
  98. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/grok.py +0 -0
  99. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/groq.py +0 -0
  100. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/heroku.py +0 -0
  101. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/huggingface.py +0 -0
  102. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/litellm.py +0 -0
  103. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/mistral.py +0 -0
  104. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/moonshotai.py +0 -0
  105. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/ollama.py +0 -0
  106. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/openai.py +0 -0
  107. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/openrouter.py +0 -0
  108. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/together.py +0 -0
  109. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/providers/vercel.py +0 -0
  110. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/py.typed +0 -0
  111. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/result.py +0 -0
  112. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/retries.py +0 -0
  113. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/run.py +0 -0
  114. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/settings.py +0 -0
  115. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/tools.py +0 -0
  116. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/__init__.py +0 -0
  117. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/_dynamic.py +0 -0
  118. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/abstract.py +0 -0
  119. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/approval_required.py +0 -0
  120. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/combined.py +0 -0
  121. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/external.py +0 -0
  122. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/filtered.py +0 -0
  123. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/function.py +0 -0
  124. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/prefixed.py +0 -0
  125. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/prepared.py +0 -0
  126. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/renamed.py +0 -0
  127. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/toolsets/wrapper.py +0 -0
  128. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pydantic_ai/usage.py +0 -0
  129. {pydantic_ai_slim-1.0.17 → pydantic_ai_slim-1.0.18}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-ai-slim
3
- Version: 1.0.17
3
+ Version: 1.0.18
4
4
  Summary: Agent Framework / shim to use Pydantic with LLMs, slim package
5
5
  Project-URL: Homepage, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai/tree/main/pydantic_ai_slim
@@ -33,7 +33,7 @@ Requires-Dist: genai-prices>=0.0.30
33
33
  Requires-Dist: griffe>=1.3.2
34
34
  Requires-Dist: httpx>=0.27
35
35
  Requires-Dist: opentelemetry-api>=1.28.0
36
- Requires-Dist: pydantic-graph==1.0.17
36
+ Requires-Dist: pydantic-graph==1.0.18
37
37
  Requires-Dist: pydantic>=2.10
38
38
  Requires-Dist: typing-inspection>=0.4.0
39
39
  Provides-Extra: a2a
@@ -57,7 +57,7 @@ Requires-Dist: dbos>=1.14.0; extra == 'dbos'
57
57
  Provides-Extra: duckduckgo
58
58
  Requires-Dist: ddgs>=9.0.0; extra == 'duckduckgo'
59
59
  Provides-Extra: evals
60
- Requires-Dist: pydantic-evals==1.0.17; extra == 'evals'
60
+ Requires-Dist: pydantic-evals==1.0.18; extra == 'evals'
61
61
  Provides-Extra: google
62
62
  Requires-Dist: google-genai>=1.31.0; extra == 'google'
63
63
  Provides-Extra: groq
@@ -312,6 +312,7 @@ class ModelResponsePartsManager:
312
312
  tool_name: str,
313
313
  args: str | dict[str, Any] | None,
314
314
  tool_call_id: str | None = None,
315
+ id: str | None = None,
315
316
  ) -> ModelResponseStreamEvent:
316
317
  """Immediately create or fully-overwrite a ToolCallPart with the given information.
317
318
 
@@ -323,6 +324,7 @@ class ModelResponsePartsManager:
323
324
  tool_name: The name of the tool being invoked.
324
325
  args: The arguments for the tool call, either as a string, a dictionary, or None.
325
326
  tool_call_id: An optional string identifier for this tool call.
327
+ id: An optional identifier for this tool call part.
326
328
 
327
329
  Returns:
328
330
  ModelResponseStreamEvent: A `PartStartEvent` indicating that a new tool call part
@@ -332,6 +334,7 @@ class ModelResponsePartsManager:
332
334
  tool_name=tool_name,
333
335
  args=args,
334
336
  tool_call_id=tool_call_id or _generate_tool_call_id(),
337
+ id=id,
335
338
  )
336
339
  if vendor_part_id is None:
337
340
  # vendor_part_id is None, so we unconditionally append a new ToolCallPart to the end of the list
@@ -62,6 +62,8 @@ class PydanticAIPlugin(ClientPlugin, WorkerPlugin):
62
62
  'logfire',
63
63
  'rich',
64
64
  'httpx',
65
+ 'anyio',
66
+ 'httpcore',
65
67
  # Imported inside `logfire._internal.json_encoder` when running `logfire.info` inside an activity with attributes to serialize
66
68
  'attrs',
67
69
  # Imported inside `logfire._internal.json_schema` when running `logfire.info` inside an activity with attributes to serialize
@@ -1052,6 +1052,13 @@ class BaseToolCallPart:
1052
1052
  In case the tool call id is not provided by the model, Pydantic AI will generate a random one.
1053
1053
  """
1054
1054
 
1055
+ _: KW_ONLY
1056
+
1057
+ id: str | None = None
1058
+ """An optional identifier of the tool call part, separate from the tool call ID.
1059
+
1060
+ This is used by some APIs like OpenAI Responses."""
1061
+
1055
1062
  def args_as_dict(self) -> dict[str, Any]:
1056
1063
  """Return the arguments as a Python dictionary.
1057
1064
 
@@ -691,6 +691,7 @@ def infer_model(model: Model | KnownModelName | str) -> Model: # noqa: C901
691
691
  'together',
692
692
  'vercel',
693
693
  'litellm',
694
+ 'nebius',
694
695
  ):
695
696
  from .openai import OpenAIChatModel
696
697
 
@@ -284,6 +284,7 @@ class OpenAIChatModel(Model):
284
284
  'together',
285
285
  'vercel',
286
286
  'litellm',
287
+ 'nebius',
287
288
  ]
288
289
  | Provider[AsyncOpenAI] = 'openai',
289
290
  profile: ModelProfileSpec | None = None,
@@ -312,6 +313,7 @@ class OpenAIChatModel(Model):
312
313
  'together',
313
314
  'vercel',
314
315
  'litellm',
316
+ 'nebius',
315
317
  ]
316
318
  | Provider[AsyncOpenAI] = 'openai',
317
319
  profile: ModelProfileSpec | None = None,
@@ -339,6 +341,7 @@ class OpenAIChatModel(Model):
339
341
  'together',
340
342
  'vercel',
341
343
  'litellm',
344
+ 'nebius',
342
345
  ]
343
346
  | Provider[AsyncOpenAI] = 'openai',
344
347
  profile: ModelProfileSpec | None = None,
@@ -899,7 +902,7 @@ class OpenAIResponsesModel(Model):
899
902
  self,
900
903
  model_name: OpenAIModelName,
901
904
  *,
902
- provider: Literal['openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together']
905
+ provider: Literal['openai', 'deepseek', 'azure', 'openrouter', 'grok', 'fireworks', 'together', 'nebius']
903
906
  | Provider[AsyncOpenAI] = 'openai',
904
907
  profile: ModelProfileSpec | None = None,
905
908
  settings: ModelSettings | None = None,
@@ -1005,7 +1008,12 @@ class OpenAIResponsesModel(Model):
1005
1008
  items.append(TextPart(content.text, id=item.id))
1006
1009
  elif isinstance(item, responses.ResponseFunctionToolCall):
1007
1010
  items.append(
1008
- ToolCallPart(item.name, item.arguments, tool_call_id=_combine_tool_call_ids(item.call_id, item.id))
1011
+ ToolCallPart(
1012
+ item.name,
1013
+ item.arguments,
1014
+ tool_call_id=item.call_id,
1015
+ id=item.id,
1016
+ )
1009
1017
  )
1010
1018
  elif isinstance(item, responses.ResponseCodeInterpreterToolCall):
1011
1019
  call_part, return_part, file_parts = _map_code_interpreter_tool_call(item, self.system)
@@ -1178,7 +1186,7 @@ class OpenAIResponsesModel(Model):
1178
1186
  truncation=model_settings.get('openai_truncation', NOT_GIVEN),
1179
1187
  timeout=model_settings.get('timeout', NOT_GIVEN),
1180
1188
  service_tier=model_settings.get('openai_service_tier', NOT_GIVEN),
1181
- previous_response_id=previous_response_id,
1189
+ previous_response_id=previous_response_id or NOT_GIVEN,
1182
1190
  reasoning=reasoning,
1183
1191
  user=model_settings.get('openai_user', NOT_GIVEN),
1184
1192
  text=text or NOT_GIVEN,
@@ -1361,6 +1369,7 @@ class OpenAIResponsesModel(Model):
1361
1369
  elif isinstance(item, ToolCallPart):
1362
1370
  call_id = _guard_tool_call_id(t=item)
1363
1371
  call_id, id = _split_combined_tool_call_id(call_id)
1372
+ id = id or item.id
1364
1373
 
1365
1374
  param = responses.ResponseFunctionToolCallParam(
1366
1375
  name=item.tool_name,
@@ -1724,7 +1733,8 @@ class OpenAIResponsesStreamedResponse(StreamedResponse):
1724
1733
  vendor_part_id=chunk.item.id,
1725
1734
  tool_name=chunk.item.name,
1726
1735
  args=chunk.item.arguments,
1727
- tool_call_id=_combine_tool_call_ids(chunk.item.call_id, chunk.item.id),
1736
+ tool_call_id=chunk.item.call_id,
1737
+ id=chunk.item.id,
1728
1738
  )
1729
1739
  elif isinstance(chunk.item, responses.ResponseReasoningItem):
1730
1740
  pass
@@ -1963,18 +1973,15 @@ def _map_usage(response: chat.ChatCompletion | ChatCompletionChunk | responses.R
1963
1973
  return u
1964
1974
 
1965
1975
 
1966
- def _combine_tool_call_ids(call_id: str, id: str | None) -> str:
1976
+ def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
1967
1977
  # When reasoning, the Responses API requires the `ResponseFunctionToolCall` to be returned with both the `call_id` and `id` fields.
1968
- # Our `ToolCallPart` has only the `call_id` field, so we combine the two fields into a single string.
1969
- return f'{call_id}|{id}' if id else call_id
1978
+ # Before our `ToolCallPart` gained the `id` field alongside `tool_call_id` field, we combined the two fields into a single string stored on `tool_call_id`.
1970
1979
 
1971
-
1972
- def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]:
1973
1980
  if '|' in combined_id:
1974
1981
  call_id, id = combined_id.split('|', 1)
1975
1982
  return call_id, id
1976
1983
  else:
1977
- return combined_id, None # pragma: no cover
1984
+ return combined_id, None
1978
1985
 
1979
1986
 
1980
1987
  def _map_code_interpreter_tool_call(
@@ -142,6 +142,10 @@ def infer_provider_class(provider: str) -> type[Provider[Any]]: # noqa: C901
142
142
  from .litellm import LiteLLMProvider
143
143
 
144
144
  return LiteLLMProvider
145
+ elif provider == 'nebius':
146
+ from .nebius import NebiusProvider
147
+
148
+ return NebiusProvider
145
149
  else: # pragma: no cover
146
150
  raise ValueError(f'Unknown provider: {provider}')
147
151
 
@@ -0,0 +1,102 @@
1
+ from __future__ import annotations as _annotations
2
+
3
+ import os
4
+ from typing import overload
5
+
6
+ import httpx
7
+
8
+ from pydantic_ai import ModelProfile
9
+ from pydantic_ai.exceptions import UserError
10
+ from pydantic_ai.models import cached_async_http_client
11
+ from pydantic_ai.profiles.deepseek import deepseek_model_profile
12
+ from pydantic_ai.profiles.google import google_model_profile
13
+ from pydantic_ai.profiles.harmony import harmony_model_profile
14
+ from pydantic_ai.profiles.meta import meta_model_profile
15
+ from pydantic_ai.profiles.mistral import mistral_model_profile
16
+ from pydantic_ai.profiles.moonshotai import moonshotai_model_profile
17
+ from pydantic_ai.profiles.openai import OpenAIJsonSchemaTransformer, OpenAIModelProfile
18
+ from pydantic_ai.profiles.qwen import qwen_model_profile
19
+ from pydantic_ai.providers import Provider
20
+
21
+ try:
22
+ from openai import AsyncOpenAI
23
+ except ImportError as _import_error: # pragma: no cover
24
+ raise ImportError(
25
+ 'Please install the `openai` package to use the Nebius provider, '
26
+ 'you can use the `openai` optional group — `pip install "pydantic-ai-slim[openai]"`'
27
+ ) from _import_error
28
+
29
+
30
+ class NebiusProvider(Provider[AsyncOpenAI]):
31
+ """Provider for Nebius AI Studio API."""
32
+
33
+ @property
34
+ def name(self) -> str:
35
+ return 'nebius'
36
+
37
+ @property
38
+ def base_url(self) -> str:
39
+ return 'https://api.studio.nebius.com/v1'
40
+
41
+ @property
42
+ def client(self) -> AsyncOpenAI:
43
+ return self._client
44
+
45
+ def model_profile(self, model_name: str) -> ModelProfile | None:
46
+ provider_to_profile = {
47
+ 'meta-llama': meta_model_profile,
48
+ 'deepseek-ai': deepseek_model_profile,
49
+ 'qwen': qwen_model_profile,
50
+ 'google': google_model_profile,
51
+ 'openai': harmony_model_profile, # used for gpt-oss models on Nebius
52
+ 'mistralai': mistral_model_profile,
53
+ 'moonshotai': moonshotai_model_profile,
54
+ }
55
+
56
+ profile = None
57
+
58
+ try:
59
+ model_name = model_name.lower()
60
+ provider, model_name = model_name.split('/', 1)
61
+ except ValueError:
62
+ raise UserError(f"Model name must be in 'provider/model' format, got: {model_name!r}")
63
+ if provider in provider_to_profile:
64
+ profile = provider_to_profile[provider](model_name)
65
+
66
+ # As NebiusProvider is always used with OpenAIChatModel, which used to unconditionally use OpenAIJsonSchemaTransformer,
67
+ # we need to maintain that behavior unless json_schema_transformer is set explicitly
68
+ return OpenAIModelProfile(json_schema_transformer=OpenAIJsonSchemaTransformer).update(profile)
69
+
70
+ @overload
71
+ def __init__(self) -> None: ...
72
+
73
+ @overload
74
+ def __init__(self, *, api_key: str) -> None: ...
75
+
76
+ @overload
77
+ def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ...
78
+
79
+ @overload
80
+ def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ...
81
+
82
+ def __init__(
83
+ self,
84
+ *,
85
+ api_key: str | None = None,
86
+ openai_client: AsyncOpenAI | None = None,
87
+ http_client: httpx.AsyncClient | None = None,
88
+ ) -> None:
89
+ api_key = api_key or os.getenv('NEBIUS_API_KEY')
90
+ if not api_key and openai_client is None:
91
+ raise UserError(
92
+ 'Set the `NEBIUS_API_KEY` environment variable or pass it via '
93
+ '`NebiusProvider(api_key=...)` to use the Nebius AI Studio provider.'
94
+ )
95
+
96
+ if openai_client is not None:
97
+ self._client = openai_client
98
+ elif http_client is not None:
99
+ self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)
100
+ else:
101
+ http_client = cached_async_http_client(provider='nebius')
102
+ self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client)