mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,469 @@
1
+ """OpenAI Responses API client implementation."""
2
+
3
+ from collections.abc import Sequence
4
+ from typing_extensions import Unpack
5
+
6
+ from openai import AsyncOpenAI, OpenAI
7
+
8
+ from ....context import Context, DepsT
9
+ from ....formatting import Format, FormattableT
10
+ from ....messages import Message
11
+ from ....responses import (
12
+ AsyncContextResponse,
13
+ AsyncContextStreamResponse,
14
+ AsyncResponse,
15
+ AsyncStreamResponse,
16
+ ContextResponse,
17
+ ContextStreamResponse,
18
+ Response,
19
+ StreamResponse,
20
+ )
21
+ from ....tools import (
22
+ AsyncContextTool,
23
+ AsyncContextToolkit,
24
+ AsyncTool,
25
+ AsyncToolkit,
26
+ ContextTool,
27
+ ContextToolkit,
28
+ Tool,
29
+ Toolkit,
30
+ )
31
+ from ...base import BaseProvider, Params
32
+ from ..model_id import OpenAIModelId, model_name
33
+ from . import _utils
34
+
35
+
36
+ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
37
+ """The client for the OpenAI Responses API."""
38
+
39
+ id = "openai:responses"
40
+ default_scope = "openai/"
41
+
42
+ def __init__(
43
+ self,
44
+ *,
45
+ api_key: str | None = None,
46
+ base_url: str | None = None,
47
+ ) -> None:
48
+ """Initialize the OpenAI Responses client."""
49
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
50
+ self.async_client = AsyncOpenAI(api_key=api_key, base_url=base_url)
51
+
52
+ def _call(
53
+ self,
54
+ *,
55
+ model_id: OpenAIModelId,
56
+ messages: Sequence[Message],
57
+ tools: Sequence[Tool] | Toolkit | None = None,
58
+ format: type[FormattableT] | Format[FormattableT] | None = None,
59
+ **params: Unpack[Params],
60
+ ) -> Response | Response[FormattableT]:
61
+ """Generate an `llm.Response` by synchronously calling the OpenAI Responses API.
62
+
63
+ Args:
64
+ model_id: Model identifier to use.
65
+ messages: Messages to send to the LLM.
66
+ tools: Optional tools that the model may invoke.
67
+ format: Optional response format specifier.
68
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
69
+
70
+ Returns:
71
+ An `llm.Response` object containing the LLM-generated content.
72
+ """
73
+ messages, format, kwargs = _utils.encode_request(
74
+ model_id=model_id,
75
+ messages=messages,
76
+ tools=tools,
77
+ format=format,
78
+ params=params,
79
+ )
80
+
81
+ openai_response = self.client.responses.create(**kwargs)
82
+
83
+ assistant_message, finish_reason, usage = _utils.decode_response(
84
+ openai_response, model_id, self.id
85
+ )
86
+ provider_model_name = model_name(model_id, "responses")
87
+
88
+ return Response(
89
+ raw=openai_response,
90
+ provider_id=self.id,
91
+ model_id=model_id,
92
+ provider_model_name=provider_model_name,
93
+ params=params,
94
+ tools=tools,
95
+ input_messages=messages,
96
+ assistant_message=assistant_message,
97
+ finish_reason=finish_reason,
98
+ usage=usage,
99
+ format=format,
100
+ )
101
+
102
+ async def _call_async(
103
+ self,
104
+ *,
105
+ model_id: OpenAIModelId,
106
+ messages: Sequence[Message],
107
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
108
+ format: type[FormattableT] | Format[FormattableT] | None = None,
109
+ **params: Unpack[Params],
110
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
111
+ """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI Responses API.
112
+
113
+ Args:
114
+ model_id: Model identifier to use.
115
+ messages: Messages to send to the LLM.
116
+ tools: Optional tools that the model may invoke.
117
+ format: Optional response format specifier.
118
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
119
+
120
+ Returns:
121
+ An `llm.AsyncResponse` object containing the LLM-generated content.
122
+ """
123
+ messages, format, kwargs = _utils.encode_request(
124
+ model_id=model_id,
125
+ messages=messages,
126
+ tools=tools,
127
+ format=format,
128
+ params=params,
129
+ )
130
+
131
+ openai_response = await self.async_client.responses.create(**kwargs)
132
+
133
+ assistant_message, finish_reason, usage = _utils.decode_response(
134
+ openai_response, model_id, self.id
135
+ )
136
+ provider_model_name = model_name(model_id, "responses")
137
+
138
+ return AsyncResponse(
139
+ raw=openai_response,
140
+ provider_id=self.id,
141
+ model_id=model_id,
142
+ provider_model_name=provider_model_name,
143
+ params=params,
144
+ tools=tools,
145
+ input_messages=messages,
146
+ assistant_message=assistant_message,
147
+ finish_reason=finish_reason,
148
+ usage=usage,
149
+ format=format,
150
+ )
151
+
152
+ def _stream(
153
+ self,
154
+ *,
155
+ model_id: OpenAIModelId,
156
+ messages: Sequence[Message],
157
+ tools: Sequence[Tool] | Toolkit | None = None,
158
+ format: type[FormattableT] | Format[FormattableT] | None = None,
159
+ **params: Unpack[Params],
160
+ ) -> StreamResponse | StreamResponse[FormattableT]:
161
+ """Generate a `llm.StreamResponse` by synchronously streaming from the OpenAI Responses API.
162
+
163
+ Args:
164
+ model_id: Model identifier to use.
165
+ messages: Messages to send to the LLM.
166
+ tools: Optional tools that the model may invoke.
167
+ format: Optional response format specifier.
168
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
169
+
170
+ Returns:
171
+ A `llm.StreamResponse` object containing the LLM-generated content stream.
172
+ """
173
+ messages, format, kwargs = _utils.encode_request(
174
+ model_id=model_id,
175
+ messages=messages,
176
+ tools=tools,
177
+ format=format,
178
+ params=params,
179
+ )
180
+
181
+ openai_stream = self.client.responses.create(
182
+ **kwargs,
183
+ stream=True,
184
+ )
185
+
186
+ chunk_iterator = _utils.decode_stream(
187
+ openai_stream,
188
+ )
189
+ provider_model_name = model_name(model_id, "responses")
190
+
191
+ return StreamResponse(
192
+ provider_id=self.id,
193
+ model_id=model_id,
194
+ provider_model_name=provider_model_name,
195
+ params=params,
196
+ tools=tools,
197
+ input_messages=messages,
198
+ chunk_iterator=chunk_iterator,
199
+ format=format,
200
+ )
201
+
202
+ async def _stream_async(
203
+ self,
204
+ *,
205
+ model_id: OpenAIModelId,
206
+ messages: Sequence[Message],
207
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
208
+ format: type[FormattableT] | Format[FormattableT] | None = None,
209
+ **params: Unpack[Params],
210
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
211
+ """Generate a `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI Responses API.
212
+
213
+ Args:
214
+ model_id: Model identifier to use.
215
+ messages: Messages to send to the LLM.
216
+ tools: Optional tools that the model may invoke.
217
+ format: Optional response format specifier.
218
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
219
+
220
+ Returns:
221
+ A `llm.AsyncStreamResponse` object containing the LLM-generated content stream.
222
+ """
223
+ messages, format, kwargs = _utils.encode_request(
224
+ model_id=model_id,
225
+ messages=messages,
226
+ tools=tools,
227
+ format=format,
228
+ params=params,
229
+ )
230
+
231
+ openai_stream = await self.async_client.responses.create(
232
+ **kwargs,
233
+ stream=True,
234
+ )
235
+
236
+ chunk_iterator = _utils.decode_async_stream(
237
+ openai_stream,
238
+ )
239
+ provider_model_name = model_name(model_id, "responses")
240
+
241
+ return AsyncStreamResponse(
242
+ provider_id=self.id,
243
+ model_id=model_id,
244
+ provider_model_name=provider_model_name,
245
+ params=params,
246
+ tools=tools,
247
+ input_messages=messages,
248
+ chunk_iterator=chunk_iterator,
249
+ format=format,
250
+ )
251
+
252
+ def _context_call(
253
+ self,
254
+ *,
255
+ ctx: Context[DepsT],
256
+ model_id: OpenAIModelId,
257
+ messages: Sequence[Message],
258
+ tools: Sequence[Tool | ContextTool[DepsT]]
259
+ | ContextToolkit[DepsT]
260
+ | None = None,
261
+ format: type[FormattableT] | Format[FormattableT] | None = None,
262
+ **params: Unpack[Params],
263
+ ) -> ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]:
264
+ """Generate a `llm.ContextResponse` by synchronously calling the OpenAI Responses API with context.
265
+
266
+ Args:
267
+ ctx: The context object containing dependencies.
268
+ model_id: Model identifier to use.
269
+ messages: Messages to send to the LLM.
270
+ tools: Optional tools that the model may invoke.
271
+ format: Optional response format specifier.
272
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
273
+
274
+ Returns:
275
+ A `llm.ContextResponse` object containing the LLM-generated content and context.
276
+ """
277
+ messages, format, kwargs = _utils.encode_request(
278
+ model_id=model_id,
279
+ messages=messages,
280
+ tools=tools,
281
+ format=format,
282
+ params=params,
283
+ )
284
+
285
+ openai_response = self.client.responses.create(**kwargs)
286
+
287
+ assistant_message, finish_reason, usage = _utils.decode_response(
288
+ openai_response, model_id, self.id
289
+ )
290
+ provider_model_name = model_name(model_id, "responses")
291
+
292
+ return ContextResponse(
293
+ raw=openai_response,
294
+ provider_id=self.id,
295
+ model_id=model_id,
296
+ provider_model_name=provider_model_name,
297
+ params=params,
298
+ tools=tools,
299
+ input_messages=messages,
300
+ assistant_message=assistant_message,
301
+ finish_reason=finish_reason,
302
+ usage=usage,
303
+ format=format,
304
+ )
305
+
306
+ async def _context_call_async(
307
+ self,
308
+ *,
309
+ ctx: Context[DepsT],
310
+ model_id: OpenAIModelId,
311
+ messages: Sequence[Message],
312
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
313
+ | AsyncContextToolkit[DepsT]
314
+ | None = None,
315
+ format: type[FormattableT] | Format[FormattableT] | None = None,
316
+ **params: Unpack[Params],
317
+ ) -> AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]:
318
+ """Generate a `llm.AsyncContextResponse` by asynchronously calling the OpenAI Responses API with context.
319
+
320
+ Args:
321
+ ctx: The context object containing dependencies.
322
+ model_id: Model identifier to use.
323
+ messages: Messages to send to the LLM.
324
+ tools: Optional tools that the model may invoke.
325
+ format: Optional response format specifier.
326
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
327
+
328
+ Returns:
329
+ A `llm.AsyncContextResponse` object containing the LLM-generated content and context.
330
+ """
331
+ messages, format, kwargs = _utils.encode_request(
332
+ model_id=model_id,
333
+ messages=messages,
334
+ tools=tools,
335
+ format=format,
336
+ params=params,
337
+ )
338
+
339
+ openai_response = await self.async_client.responses.create(**kwargs)
340
+
341
+ assistant_message, finish_reason, usage = _utils.decode_response(
342
+ openai_response, model_id, self.id
343
+ )
344
+ provider_model_name = model_name(model_id, "responses")
345
+
346
+ return AsyncContextResponse(
347
+ raw=openai_response,
348
+ provider_id=self.id,
349
+ model_id=model_id,
350
+ provider_model_name=provider_model_name,
351
+ params=params,
352
+ tools=tools,
353
+ input_messages=messages,
354
+ assistant_message=assistant_message,
355
+ finish_reason=finish_reason,
356
+ usage=usage,
357
+ format=format,
358
+ )
359
+
360
+ def _context_stream(
361
+ self,
362
+ *,
363
+ ctx: Context[DepsT],
364
+ model_id: OpenAIModelId,
365
+ messages: Sequence[Message],
366
+ tools: Sequence[Tool | ContextTool[DepsT]]
367
+ | ContextToolkit[DepsT]
368
+ | None = None,
369
+ format: type[FormattableT] | Format[FormattableT] | None = None,
370
+ **params: Unpack[Params],
371
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
372
+ """Generate a `llm.ContextStreamResponse` by synchronously streaming from the OpenAI Responses API with context.
373
+
374
+ Args:
375
+ ctx: The context object containing dependencies.
376
+ model_id: Model identifier to use.
377
+ messages: Messages to send to the LLM.
378
+ tools: Optional tools that the model may invoke.
379
+ format: Optional response format specifier.
380
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
381
+
382
+ Returns:
383
+ A `llm.ContextStreamResponse` object containing the LLM-generated content stream and context.
384
+ """
385
+ messages, format, kwargs = _utils.encode_request(
386
+ model_id=model_id,
387
+ messages=messages,
388
+ tools=tools,
389
+ format=format,
390
+ params=params,
391
+ )
392
+
393
+ openai_stream = self.client.responses.create(
394
+ **kwargs,
395
+ stream=True,
396
+ )
397
+
398
+ chunk_iterator = _utils.decode_stream(
399
+ openai_stream,
400
+ )
401
+ provider_model_name = model_name(model_id, "responses")
402
+
403
+ return ContextStreamResponse(
404
+ provider_id=self.id,
405
+ model_id=model_id,
406
+ provider_model_name=provider_model_name,
407
+ params=params,
408
+ tools=tools,
409
+ input_messages=messages,
410
+ chunk_iterator=chunk_iterator,
411
+ format=format,
412
+ )
413
+
414
+ async def _context_stream_async(
415
+ self,
416
+ *,
417
+ ctx: Context[DepsT],
418
+ model_id: OpenAIModelId,
419
+ messages: Sequence[Message],
420
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
421
+ | AsyncContextToolkit[DepsT]
422
+ | None = None,
423
+ format: type[FormattableT] | Format[FormattableT] | None = None,
424
+ **params: Unpack[Params],
425
+ ) -> (
426
+ AsyncContextStreamResponse[DepsT]
427
+ | AsyncContextStreamResponse[DepsT, FormattableT]
428
+ ):
429
+ """Generate a `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI Responses API with context.
430
+
431
+ Args:
432
+ ctx: The context object containing dependencies.
433
+ model_id: Model identifier to use.
434
+ messages: Messages to send to the LLM.
435
+ tools: Optional tools that the model may invoke.
436
+ format: Optional response format specifier.
437
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
438
+
439
+ Returns:
440
+ A `llm.AsyncContextStreamResponse` object containing the LLM-generated content stream and context.
441
+ """
442
+ messages, format, kwargs = _utils.encode_request(
443
+ model_id=model_id,
444
+ messages=messages,
445
+ tools=tools,
446
+ format=format,
447
+ params=params,
448
+ )
449
+
450
+ openai_stream = await self.async_client.responses.create(
451
+ **kwargs,
452
+ stream=True,
453
+ )
454
+
455
+ chunk_iterator = _utils.decode_async_stream(
456
+ openai_stream,
457
+ )
458
+ provider_model_name = model_name(model_id, "responses")
459
+
460
+ return AsyncContextStreamResponse(
461
+ provider_id=self.id,
462
+ model_id=model_id,
463
+ provider_model_name=provider_model_name,
464
+ params=params,
465
+ tools=tools,
466
+ input_messages=messages,
467
+ chunk_iterator=chunk_iterator,
468
+ format=format,
469
+ )
@@ -0,0 +1,23 @@
1
+ """Identifiers for all registered providers."""
2
+
3
+ from typing import Literal, TypeAlias, get_args
4
+
5
+ KnownProviderId: TypeAlias = Literal[
6
+ "anthropic", # Anthropic provider via AnthropicProvider
7
+ "anthropic-beta", # Anthropic beta provider via AnthropicBetaProvider
8
+ "google", # Google provider via GoogleProvider
9
+ "mlx", # Local inference powered by `mlx-lm`, via MLXProvider
10
+ "ollama", # Ollama provider via OllamaProvider
11
+ "openai", # OpenAI provider via OpenAIProvider (prefers Responses routing when available)
12
+ "together", # Together AI provider via TogetherProvider
13
+ ]
14
+ KNOWN_PROVIDER_IDS = get_args(KnownProviderId)
15
+
16
+ ProviderId = KnownProviderId | str
17
+
18
+ OpenAICompletionsCompatibleProviderId: TypeAlias = Literal[
19
+ "ollama", # Ollama (OpenAI-compatible)
20
+ "openai", # OpenAI via OpenAIProvider (routes to completions)
21
+ "openai:completions", # OpenAI Completions API directly
22
+ "together", # Together AI (OpenAI-compatible)
23
+ ]
@@ -0,0 +1,169 @@
1
+ """Provider registry for managing provider instances and scopes."""
2
+
3
+ from typing import overload
4
+
5
+ from ..exceptions import NoRegisteredProviderError
6
+ from .base import Provider
7
+ from .load_provider import load_provider
8
+ from .provider_id import ProviderId
9
+
10
+ # Global registry mapping scopes to providers
11
+ # Scopes are matched by prefix (longest match wins)
12
+ PROVIDER_REGISTRY: dict[str, Provider] = {}
13
+
14
+ # Default auto-registration mapping for built-in providers
15
+ # These providers will be automatically registered on first use
16
+ DEFAULT_AUTO_REGISTER_SCOPES: dict[str, ProviderId] = {
17
+ "anthropic/": "anthropic",
18
+ "google/": "google",
19
+ "mlx-community/": "mlx",
20
+ "ollama/": "ollama",
21
+ "openai/": "openai",
22
+ "together/": "together",
23
+ }
24
+
25
+
26
+ @overload
27
+ def register_provider(
28
+ provider: Provider,
29
+ scope: str | list[str] | None = None,
30
+ ) -> Provider:
31
+ """Register a provider instance with scope(s).
32
+
33
+ Args:
34
+ provider: Provider instance to register.
35
+ scope: Scope string or list of scopes (e.g., "anthropic/", ["anthropic/", "openai/"]).
36
+ If None, uses the provider's default_scope.
37
+ """
38
+ ...
39
+
40
+
41
+ @overload
42
+ def register_provider(
43
+ provider: ProviderId,
44
+ scope: str | list[str] | None = None,
45
+ *,
46
+ api_key: str | None = None,
47
+ base_url: str | None = None,
48
+ ) -> Provider:
49
+ """Register a provider by ID with scope(s).
50
+
51
+ Args:
52
+ provider: Provider ID string (e.g., "anthropic", "openai").
53
+ scope: Scope string or list of scopes (e.g., "anthropic/", ["anthropic/", "openai/"]).
54
+ If None, uses the provider's default_scope.
55
+ api_key: API key for authentication.
56
+ base_url: Base URL for the API.
57
+ """
58
+ ...
59
+
60
+
61
+ def register_provider(
62
+ provider: ProviderId | Provider,
63
+ scope: str | list[str] | None = None,
64
+ *,
65
+ api_key: str | None = None,
66
+ base_url: str | None = None,
67
+ ) -> Provider:
68
+ """Register a provider with scope(s) in the global registry.
69
+
70
+ Scopes use prefix matching on model IDs:
71
+ - "anthropic/" matches "anthropic/*"
72
+ - "anthropic/claude-4-5" matches "anthropic/claude-4-5*"
73
+ - "anthropic/claude-4-5-sonnet" matches exactly "anthropic/claude-4-5-sonnet"
74
+
75
+ When multiple scopes match a model_id, the longest match wins.
76
+
77
+ Args:
78
+ provider: Either a provider ID string or a provider instance.
79
+ scope: Scope string or list of scopes for prefix matching on model IDs.
80
+ If None, uses the provider's default_scope attribute.
81
+ Can be a single string or a list of strings.
82
+ api_key: API key for authentication (only used if provider is a string).
83
+ base_url: Base URL for the API (only used if provider is a string).
84
+
85
+ Example:
86
+ ```python
87
+ # Register with default scope
88
+ llm.register_provider("anthropic", api_key="key")
89
+
90
+ # Register for specific models
91
+ llm.register_provider("openai", scope="openai/gpt-4")
92
+
93
+ # Register for multiple scopes
94
+ llm.register_provider("aws-bedrock", scope=["anthropic/", "openai/"])
95
+
96
+ # Register a custom instance
97
+ custom = llm.providers.AnthropicProvider(api_key="team-key")
98
+ llm.register_provider(custom, scope="anthropic/claude-4-5-sonnet")
99
+ ```
100
+ """
101
+
102
+ if isinstance(provider, str):
103
+ provider = load_provider(provider, api_key=api_key, base_url=base_url)
104
+
105
+ if scope is None:
106
+ scope = provider.default_scope
107
+
108
+ scopes = [scope] if isinstance(scope, str) else scope
109
+ for s in scopes:
110
+ PROVIDER_REGISTRY[s] = provider
111
+
112
+ return provider
113
+
114
+
115
+ def get_provider_for_model(model_id: str) -> Provider:
116
+ """Get the provider for a model_id based on the registry.
117
+
118
+ Uses longest prefix matching to find the most specific provider for the model.
119
+ If no explicit registration is found, checks for auto-registration defaults
120
+ and automatically registers the provider on first use.
121
+
122
+ Args:
123
+ model_id: The full model ID (e.g., "anthropic/claude-4-5-sonnet").
124
+
125
+ Returns:
126
+ The provider instance registered for this model.
127
+
128
+ Raises:
129
+ ValueError: If no provider is registered or available for this model.
130
+
131
+ Example:
132
+ ```python
133
+ # Assuming providers are registered:
134
+ # - "anthropic/" -> AnthropicProvider()
135
+ # - "anthropic/claude-4-5-sonnet" -> CustomProvider()
136
+
137
+ provider = get_provider_for_model("anthropic/claude-4-5-sonnet")
138
+ # Returns CustomProvider (longest match)
139
+
140
+ provider = get_provider_for_model("anthropic/claude-3-opus")
141
+ # Returns AnthropicProvider (matches "anthropic/" prefix)
142
+
143
+ # Auto-registration on first use:
144
+ provider = get_provider_for_model("openai/gpt-4")
145
+ # Automatically loads and registers OpenAIProvider() for "openai/"
146
+ ```
147
+ """
148
+ # Try explicit registry first (longest match wins)
149
+ matching_scopes = [
150
+ scope for scope in PROVIDER_REGISTRY if model_id.startswith(scope)
151
+ ]
152
+ if matching_scopes:
153
+ best_scope = max(matching_scopes, key=len)
154
+ return PROVIDER_REGISTRY[best_scope]
155
+
156
+ # Fall back to auto-registration
157
+ matching_defaults = [
158
+ scope for scope in DEFAULT_AUTO_REGISTER_SCOPES if model_id.startswith(scope)
159
+ ]
160
+ if matching_defaults:
161
+ best_scope = max(matching_defaults, key=len)
162
+ provider_id = DEFAULT_AUTO_REGISTER_SCOPES[best_scope]
163
+ provider = load_provider(provider_id)
164
+ # Auto-register for future calls
165
+ PROVIDER_REGISTRY[best_scope] = provider
166
+ return provider
167
+
168
+ # No provider found
169
+ raise NoRegisteredProviderError(model_id)
@@ -0,0 +1,19 @@
1
+ """Together AI provider implementation."""
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from .provider import TogetherProvider
7
+ else:
8
+ try:
9
+ from .provider import TogetherProvider
10
+ except ImportError: # pragma: no cover
11
+ from .._missing_import_stubs import (
12
+ create_provider_stub,
13
+ )
14
+
15
+ TogetherProvider = create_provider_stub("openai", "TogetherProvider")
16
+
17
+ __all__ = [
18
+ "TogetherProvider",
19
+ ]