mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -1,832 +0,0 @@
1
- """OpenAI Responses API client implementation."""
2
-
3
- import os
4
- from collections.abc import Sequence
5
- from contextvars import ContextVar
6
- from functools import lru_cache
7
- from typing import overload
8
- from typing_extensions import Unpack
9
-
10
- from openai import AsyncOpenAI, OpenAI
11
-
12
- from ....context import Context, DepsT
13
- from ....formatting import Format, FormattableT
14
- from ....messages import Message
15
- from ....responses import (
16
- AsyncContextResponse,
17
- AsyncContextStreamResponse,
18
- AsyncResponse,
19
- AsyncStreamResponse,
20
- ContextResponse,
21
- ContextStreamResponse,
22
- Response,
23
- StreamResponse,
24
- )
25
- from ....tools import (
26
- AsyncContextTool,
27
- AsyncContextToolkit,
28
- AsyncTool,
29
- AsyncToolkit,
30
- ContextTool,
31
- ContextToolkit,
32
- Tool,
33
- Toolkit,
34
- )
35
- from ...base import BaseClient, Params
36
- from . import _utils
37
- from .model_ids import OpenAIResponsesModelId
38
-
39
- OPENAI_RESPONSES_CLIENT_CONTEXT: ContextVar["OpenAIResponsesClient | None"] = (
40
- ContextVar("OPENAI_RESPONSES_CLIENT_CONTEXT", default=None)
41
- )
42
-
43
-
44
- @lru_cache(maxsize=256)
45
- def _openai_responses_singleton(
46
- api_key: str | None, base_url: str | None
47
- ) -> "OpenAIResponsesClient":
48
- """Return a cached `OpenAIResponsesClient` instance for the given parameters."""
49
- return OpenAIResponsesClient(api_key=api_key, base_url=base_url)
50
-
51
-
52
- def client(
53
- *, api_key: str | None = None, base_url: str | None = None
54
- ) -> "OpenAIResponsesClient":
55
- """Return an `OpenAIResponsesClient`."""
56
- api_key = api_key or os.getenv("OPENAI_API_KEY")
57
- base_url = base_url or os.getenv("OPENAI_BASE_URL")
58
- return _openai_responses_singleton(api_key, base_url)
59
-
60
-
61
- def get_client() -> "OpenAIResponsesClient":
62
- """Get the current `OpenAIResponsesClient` from context."""
63
- current_client = OPENAI_RESPONSES_CLIENT_CONTEXT.get()
64
- if current_client is None:
65
- current_client = client()
66
- OPENAI_RESPONSES_CLIENT_CONTEXT.set(current_client)
67
- return current_client
68
-
69
-
70
- class OpenAIResponsesClient(BaseClient[OpenAIResponsesModelId, OpenAI]):
71
- """The client for the OpenAI Responses API."""
72
-
73
- @property
74
- def _context_var(self) -> ContextVar["OpenAIResponsesClient | None"]:
75
- return OPENAI_RESPONSES_CLIENT_CONTEXT
76
-
77
- def __init__(
78
- self, *, api_key: str | None = None, base_url: str | None = None
79
- ) -> None:
80
- """Initialize the OpenAI Responses client."""
81
- self.client = OpenAI(api_key=api_key, base_url=base_url)
82
- self.async_client = AsyncOpenAI(api_key=api_key, base_url=base_url)
83
-
84
- @overload
85
- def call(
86
- self,
87
- *,
88
- model_id: OpenAIResponsesModelId,
89
- messages: Sequence[Message],
90
- tools: Sequence[Tool] | Toolkit | None = None,
91
- format: None = None,
92
- **params: Unpack[Params],
93
- ) -> Response:
94
- """Generate an `llm.Response` without a response format."""
95
- ...
96
-
97
- @overload
98
- def call(
99
- self,
100
- *,
101
- model_id: OpenAIResponsesModelId,
102
- messages: Sequence[Message],
103
- tools: Sequence[Tool] | Toolkit | None = None,
104
- format: type[FormattableT] | Format[FormattableT],
105
- **params: Unpack[Params],
106
- ) -> Response[FormattableT]:
107
- """Generate an `llm.Response` with a response format."""
108
- ...
109
-
110
- @overload
111
- def call(
112
- self,
113
- *,
114
- model_id: OpenAIResponsesModelId,
115
- messages: Sequence[Message],
116
- tools: Sequence[Tool] | Toolkit | None = None,
117
- format: type[FormattableT] | Format[FormattableT] | None = None,
118
- **params: Unpack[Params],
119
- ) -> Response | Response[FormattableT]:
120
- """Generate an `llm.Response` with optional response format."""
121
- ...
122
-
123
- def call(
124
- self,
125
- *,
126
- model_id: OpenAIResponsesModelId,
127
- messages: Sequence[Message],
128
- tools: Sequence[Tool] | Toolkit | None = None,
129
- format: type[FormattableT] | Format[FormattableT] | None = None,
130
- **params: Unpack[Params],
131
- ) -> Response | Response[FormattableT]:
132
- """Generate an `llm.Response` by synchronously calling the OpenAI Responses API.
133
-
134
- Args:
135
- model_id: Model identifier to use.
136
- messages: Messages to send to the LLM.
137
- tools: Optional tools that the model may invoke.
138
- format: Optional response format specifier.
139
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
140
-
141
- Returns:
142
- An `llm.Response` object containing the LLM-generated content.
143
- """
144
- messages, format, kwargs = _utils.encode_request(
145
- model_id=model_id,
146
- messages=messages,
147
- tools=tools,
148
- format=format,
149
- params=params,
150
- )
151
-
152
- openai_response = self.client.responses.create(**kwargs)
153
-
154
- assistant_message, finish_reason = _utils.decode_response(
155
- openai_response, model_id
156
- )
157
-
158
- return Response(
159
- raw=openai_response,
160
- provider="openai:responses",
161
- model_id=model_id,
162
- params=params,
163
- tools=tools,
164
- input_messages=messages,
165
- assistant_message=assistant_message,
166
- finish_reason=finish_reason,
167
- format=format,
168
- )
169
-
170
- @overload
171
- async def call_async(
172
- self,
173
- *,
174
- model_id: OpenAIResponsesModelId,
175
- messages: Sequence[Message],
176
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
177
- format: None = None,
178
- **params: Unpack[Params],
179
- ) -> AsyncResponse:
180
- """Generate an `llm.AsyncResponse` without a response format."""
181
- ...
182
-
183
- @overload
184
- async def call_async(
185
- self,
186
- *,
187
- model_id: OpenAIResponsesModelId,
188
- messages: Sequence[Message],
189
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
190
- format: type[FormattableT] | Format[FormattableT],
191
- **params: Unpack[Params],
192
- ) -> AsyncResponse[FormattableT]:
193
- """Generate an `llm.AsyncResponse` with a response format."""
194
- ...
195
-
196
- @overload
197
- async def call_async(
198
- self,
199
- *,
200
- model_id: OpenAIResponsesModelId,
201
- messages: Sequence[Message],
202
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
203
- format: type[FormattableT] | Format[FormattableT] | None = None,
204
- **params: Unpack[Params],
205
- ) -> AsyncResponse | AsyncResponse[FormattableT]:
206
- """Generate an `llm.AsyncResponse` with optional response format."""
207
- ...
208
-
209
- async def call_async(
210
- self,
211
- *,
212
- model_id: OpenAIResponsesModelId,
213
- messages: Sequence[Message],
214
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
215
- format: type[FormattableT] | Format[FormattableT] | None = None,
216
- **params: Unpack[Params],
217
- ) -> AsyncResponse | AsyncResponse[FormattableT]:
218
- """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI Responses API.
219
-
220
- Args:
221
- model_id: Model identifier to use.
222
- messages: Messages to send to the LLM.
223
- tools: Optional tools that the model may invoke.
224
- format: Optional response format specifier.
225
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
226
-
227
- Returns:
228
- An `llm.AsyncResponse` object containing the LLM-generated content.
229
- """
230
- messages, format, kwargs = _utils.encode_request(
231
- model_id=model_id,
232
- messages=messages,
233
- tools=tools,
234
- format=format,
235
- params=params,
236
- )
237
-
238
- openai_response = await self.async_client.responses.create(**kwargs)
239
-
240
- assistant_message, finish_reason = _utils.decode_response(
241
- openai_response, model_id
242
- )
243
-
244
- return AsyncResponse(
245
- raw=openai_response,
246
- provider="openai:responses",
247
- model_id=model_id,
248
- params=params,
249
- tools=tools,
250
- input_messages=messages,
251
- assistant_message=assistant_message,
252
- finish_reason=finish_reason,
253
- format=format,
254
- )
255
-
256
- @overload
257
- def stream(
258
- self,
259
- *,
260
- model_id: OpenAIResponsesModelId,
261
- messages: Sequence[Message],
262
- tools: Sequence[Tool] | Toolkit | None = None,
263
- format: None = None,
264
- **params: Unpack[Params],
265
- ) -> StreamResponse:
266
- """Generate a `llm.StreamResponse` without a response format."""
267
- ...
268
-
269
- @overload
270
- def stream(
271
- self,
272
- *,
273
- model_id: OpenAIResponsesModelId,
274
- messages: Sequence[Message],
275
- tools: Sequence[Tool] | Toolkit | None = None,
276
- format: type[FormattableT] | Format[FormattableT],
277
- **params: Unpack[Params],
278
- ) -> StreamResponse[FormattableT]:
279
- """Generate a `llm.StreamResponse` with a response format."""
280
- ...
281
-
282
- @overload
283
- def stream(
284
- self,
285
- *,
286
- model_id: OpenAIResponsesModelId,
287
- messages: Sequence[Message],
288
- tools: Sequence[Tool] | Toolkit | None = None,
289
- format: type[FormattableT] | Format[FormattableT] | None = None,
290
- **params: Unpack[Params],
291
- ) -> StreamResponse | StreamResponse[FormattableT]:
292
- """Generate a `llm.StreamResponse` with optional response format."""
293
- ...
294
-
295
- def stream(
296
- self,
297
- *,
298
- model_id: OpenAIResponsesModelId,
299
- messages: Sequence[Message],
300
- tools: Sequence[Tool] | Toolkit | None = None,
301
- format: type[FormattableT] | Format[FormattableT] | None = None,
302
- **params: Unpack[Params],
303
- ) -> StreamResponse | StreamResponse[FormattableT]:
304
- """Generate a `llm.StreamResponse` by synchronously streaming from the OpenAI Responses API.
305
-
306
- Args:
307
- model_id: Model identifier to use.
308
- messages: Messages to send to the LLM.
309
- tools: Optional tools that the model may invoke.
310
- format: Optional response format specifier.
311
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
312
-
313
- Returns:
314
- A `llm.StreamResponse` object containing the LLM-generated content stream.
315
- """
316
- messages, format, kwargs = _utils.encode_request(
317
- model_id=model_id,
318
- messages=messages,
319
- tools=tools,
320
- format=format,
321
- params=params,
322
- )
323
-
324
- openai_stream = self.client.responses.create(
325
- **kwargs,
326
- stream=True,
327
- )
328
-
329
- chunk_iterator = _utils.decode_stream(
330
- openai_stream,
331
- )
332
-
333
- return StreamResponse(
334
- provider="openai:responses",
335
- model_id=model_id,
336
- params=params,
337
- tools=tools,
338
- input_messages=messages,
339
- chunk_iterator=chunk_iterator,
340
- format=format,
341
- )
342
-
343
- @overload
344
- async def stream_async(
345
- self,
346
- *,
347
- model_id: OpenAIResponsesModelId,
348
- messages: Sequence[Message],
349
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
350
- format: None = None,
351
- **params: Unpack[Params],
352
- ) -> AsyncStreamResponse:
353
- """Generate a `llm.AsyncStreamResponse` without a response format."""
354
- ...
355
-
356
- @overload
357
- async def stream_async(
358
- self,
359
- *,
360
- model_id: OpenAIResponsesModelId,
361
- messages: Sequence[Message],
362
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
363
- format: type[FormattableT] | Format[FormattableT],
364
- **params: Unpack[Params],
365
- ) -> AsyncStreamResponse[FormattableT]:
366
- """Generate a `llm.AsyncStreamResponse` with a response format."""
367
- ...
368
-
369
- @overload
370
- async def stream_async(
371
- self,
372
- *,
373
- model_id: OpenAIResponsesModelId,
374
- messages: Sequence[Message],
375
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
376
- format: type[FormattableT] | Format[FormattableT] | None = None,
377
- **params: Unpack[Params],
378
- ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
379
- """Generate a `llm.AsyncStreamResponse` with optional response format."""
380
- ...
381
-
382
- async def stream_async(
383
- self,
384
- *,
385
- model_id: OpenAIResponsesModelId,
386
- messages: Sequence[Message],
387
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
388
- format: type[FormattableT] | Format[FormattableT] | None = None,
389
- **params: Unpack[Params],
390
- ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
391
- """Generate a `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI Responses API.
392
-
393
- Args:
394
- model_id: Model identifier to use.
395
- messages: Messages to send to the LLM.
396
- tools: Optional tools that the model may invoke.
397
- format: Optional response format specifier.
398
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
399
-
400
- Returns:
401
- A `llm.AsyncStreamResponse` object containing the LLM-generated content stream.
402
- """
403
- messages, format, kwargs = _utils.encode_request(
404
- model_id=model_id,
405
- messages=messages,
406
- tools=tools,
407
- format=format,
408
- params=params,
409
- )
410
-
411
- openai_stream = await self.async_client.responses.create(
412
- **kwargs,
413
- stream=True,
414
- )
415
-
416
- chunk_iterator = _utils.decode_async_stream(
417
- openai_stream,
418
- )
419
-
420
- return AsyncStreamResponse(
421
- provider="openai:responses",
422
- model_id=model_id,
423
- params=params,
424
- tools=tools,
425
- input_messages=messages,
426
- chunk_iterator=chunk_iterator,
427
- format=format,
428
- )
429
-
430
- @overload
431
- def context_call(
432
- self,
433
- *,
434
- ctx: Context[DepsT],
435
- model_id: OpenAIResponsesModelId,
436
- messages: Sequence[Message],
437
- tools: Sequence[Tool | ContextTool[DepsT]]
438
- | ContextToolkit[DepsT]
439
- | None = None,
440
- format: None = None,
441
- **params: Unpack[Params],
442
- ) -> ContextResponse[DepsT]:
443
- """Generate a `llm.ContextResponse` without a response format."""
444
- ...
445
-
446
- @overload
447
- def context_call(
448
- self,
449
- *,
450
- ctx: Context[DepsT],
451
- model_id: OpenAIResponsesModelId,
452
- messages: Sequence[Message],
453
- tools: Sequence[Tool | ContextTool[DepsT]]
454
- | ContextToolkit[DepsT]
455
- | None = None,
456
- format: type[FormattableT] | Format[FormattableT],
457
- **params: Unpack[Params],
458
- ) -> ContextResponse[DepsT, FormattableT]:
459
- """Generate a `llm.ContextResponse` with a response format."""
460
- ...
461
-
462
- @overload
463
- def context_call(
464
- self,
465
- *,
466
- ctx: Context[DepsT],
467
- model_id: OpenAIResponsesModelId,
468
- messages: Sequence[Message],
469
- tools: Sequence[Tool | ContextTool[DepsT]]
470
- | ContextToolkit[DepsT]
471
- | None = None,
472
- format: type[FormattableT] | Format[FormattableT] | None = None,
473
- **params: Unpack[Params],
474
- ) -> ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]:
475
- """Generate a `llm.ContextResponse` with optional response format."""
476
- ...
477
-
478
- def context_call(
479
- self,
480
- *,
481
- ctx: Context[DepsT],
482
- model_id: OpenAIResponsesModelId,
483
- messages: Sequence[Message],
484
- tools: Sequence[Tool | ContextTool[DepsT]]
485
- | ContextToolkit[DepsT]
486
- | None = None,
487
- format: type[FormattableT] | Format[FormattableT] | None = None,
488
- **params: Unpack[Params],
489
- ) -> ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]:
490
- """Generate a `llm.ContextResponse` by synchronously calling the OpenAI Responses API with context.
491
-
492
- Args:
493
- ctx: The context object containing dependencies.
494
- model_id: Model identifier to use.
495
- messages: Messages to send to the LLM.
496
- tools: Optional tools that the model may invoke.
497
- format: Optional response format specifier.
498
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
499
-
500
- Returns:
501
- A `llm.ContextResponse` object containing the LLM-generated content and context.
502
- """
503
- messages, format, kwargs = _utils.encode_request(
504
- model_id=model_id,
505
- messages=messages,
506
- tools=tools,
507
- format=format,
508
- params=params,
509
- )
510
-
511
- openai_response = self.client.responses.create(**kwargs)
512
-
513
- assistant_message, finish_reason = _utils.decode_response(
514
- openai_response, model_id
515
- )
516
-
517
- return ContextResponse(
518
- raw=openai_response,
519
- provider="openai:responses",
520
- model_id=model_id,
521
- params=params,
522
- tools=tools,
523
- input_messages=messages,
524
- assistant_message=assistant_message,
525
- finish_reason=finish_reason,
526
- format=format,
527
- )
528
-
529
- @overload
530
- async def context_call_async(
531
- self,
532
- *,
533
- ctx: Context[DepsT],
534
- model_id: OpenAIResponsesModelId,
535
- messages: Sequence[Message],
536
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
537
- | AsyncContextToolkit[DepsT]
538
- | None = None,
539
- format: None = None,
540
- **params: Unpack[Params],
541
- ) -> AsyncContextResponse[DepsT]:
542
- """Generate a `llm.AsyncContextResponse` without a response format."""
543
- ...
544
-
545
- @overload
546
- async def context_call_async(
547
- self,
548
- *,
549
- ctx: Context[DepsT],
550
- model_id: OpenAIResponsesModelId,
551
- messages: Sequence[Message],
552
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
553
- | AsyncContextToolkit[DepsT]
554
- | None = None,
555
- format: type[FormattableT] | Format[FormattableT],
556
- **params: Unpack[Params],
557
- ) -> AsyncContextResponse[DepsT, FormattableT]:
558
- """Generate a `llm.AsyncContextResponse` with a response format."""
559
- ...
560
-
561
- @overload
562
- async def context_call_async(
563
- self,
564
- *,
565
- ctx: Context[DepsT],
566
- model_id: OpenAIResponsesModelId,
567
- messages: Sequence[Message],
568
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
569
- | AsyncContextToolkit[DepsT]
570
- | None = None,
571
- format: type[FormattableT] | Format[FormattableT] | None = None,
572
- **params: Unpack[Params],
573
- ) -> AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]:
574
- """Generate a `llm.AsyncContextResponse` with optional response format."""
575
- ...
576
-
577
- async def context_call_async(
578
- self,
579
- *,
580
- ctx: Context[DepsT],
581
- model_id: OpenAIResponsesModelId,
582
- messages: Sequence[Message],
583
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
584
- | AsyncContextToolkit[DepsT]
585
- | None = None,
586
- format: type[FormattableT] | Format[FormattableT] | None = None,
587
- **params: Unpack[Params],
588
- ) -> AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]:
589
- """Generate a `llm.AsyncContextResponse` by asynchronously calling the OpenAI Responses API with context.
590
-
591
- Args:
592
- ctx: The context object containing dependencies.
593
- model_id: Model identifier to use.
594
- messages: Messages to send to the LLM.
595
- tools: Optional tools that the model may invoke.
596
- format: Optional response format specifier.
597
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
598
-
599
- Returns:
600
- A `llm.AsyncContextResponse` object containing the LLM-generated content and context.
601
- """
602
- messages, format, kwargs = _utils.encode_request(
603
- model_id=model_id,
604
- messages=messages,
605
- tools=tools,
606
- format=format,
607
- params=params,
608
- )
609
-
610
- openai_response = await self.async_client.responses.create(**kwargs)
611
-
612
- assistant_message, finish_reason = _utils.decode_response(
613
- openai_response, model_id
614
- )
615
-
616
- return AsyncContextResponse(
617
- raw=openai_response,
618
- provider="openai:responses",
619
- model_id=model_id,
620
- params=params,
621
- tools=tools,
622
- input_messages=messages,
623
- assistant_message=assistant_message,
624
- finish_reason=finish_reason,
625
- format=format,
626
- )
627
-
628
- @overload
629
- def context_stream(
630
- self,
631
- *,
632
- ctx: Context[DepsT],
633
- model_id: OpenAIResponsesModelId,
634
- messages: Sequence[Message],
635
- tools: Sequence[Tool | ContextTool[DepsT]]
636
- | ContextToolkit[DepsT]
637
- | None = None,
638
- format: None = None,
639
- **params: Unpack[Params],
640
- ) -> ContextStreamResponse[DepsT]:
641
- """Generate a `llm.ContextStreamResponse` without a response format."""
642
- ...
643
-
644
- @overload
645
- def context_stream(
646
- self,
647
- *,
648
- ctx: Context[DepsT],
649
- model_id: OpenAIResponsesModelId,
650
- messages: Sequence[Message],
651
- tools: Sequence[Tool | ContextTool[DepsT]]
652
- | ContextToolkit[DepsT]
653
- | None = None,
654
- format: type[FormattableT] | Format[FormattableT],
655
- **params: Unpack[Params],
656
- ) -> ContextStreamResponse[DepsT, FormattableT]:
657
- """Generate a `llm.ContextStreamResponse` with a response format."""
658
- ...
659
-
660
- @overload
661
- def context_stream(
662
- self,
663
- *,
664
- ctx: Context[DepsT],
665
- model_id: OpenAIResponsesModelId,
666
- messages: Sequence[Message],
667
- tools: Sequence[Tool | ContextTool[DepsT]]
668
- | ContextToolkit[DepsT]
669
- | None = None,
670
- format: type[FormattableT] | Format[FormattableT] | None = None,
671
- **params: Unpack[Params],
672
- ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
673
- """Generate a `llm.ContextStreamResponse` with optional response format."""
674
- ...
675
-
676
- def context_stream(
677
- self,
678
- *,
679
- ctx: Context[DepsT],
680
- model_id: OpenAIResponsesModelId,
681
- messages: Sequence[Message],
682
- tools: Sequence[Tool | ContextTool[DepsT]]
683
- | ContextToolkit[DepsT]
684
- | None = None,
685
- format: type[FormattableT] | Format[FormattableT] | None = None,
686
- **params: Unpack[Params],
687
- ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
688
- """Generate a `llm.ContextStreamResponse` by synchronously streaming from the OpenAI Responses API with context.
689
-
690
- Args:
691
- ctx: The context object containing dependencies.
692
- model_id: Model identifier to use.
693
- messages: Messages to send to the LLM.
694
- tools: Optional tools that the model may invoke.
695
- format: Optional response format specifier.
696
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
697
-
698
- Returns:
699
- A `llm.ContextStreamResponse` object containing the LLM-generated content stream and context.
700
- """
701
- messages, format, kwargs = _utils.encode_request(
702
- model_id=model_id,
703
- messages=messages,
704
- tools=tools,
705
- format=format,
706
- params=params,
707
- )
708
-
709
- openai_stream = self.client.responses.create(
710
- **kwargs,
711
- stream=True,
712
- )
713
-
714
- chunk_iterator = _utils.decode_stream(
715
- openai_stream,
716
- )
717
-
718
- return ContextStreamResponse(
719
- provider="openai:responses",
720
- model_id=model_id,
721
- params=params,
722
- tools=tools,
723
- input_messages=messages,
724
- chunk_iterator=chunk_iterator,
725
- format=format,
726
- )
727
-
728
- @overload
729
- async def context_stream_async(
730
- self,
731
- *,
732
- ctx: Context[DepsT],
733
- model_id: OpenAIResponsesModelId,
734
- messages: Sequence[Message],
735
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
736
- | AsyncContextToolkit[DepsT]
737
- | None = None,
738
- format: None = None,
739
- **params: Unpack[Params],
740
- ) -> AsyncContextStreamResponse[DepsT]:
741
- """Generate a `llm.AsyncContextStreamResponse` without a response format."""
742
- ...
743
-
744
- @overload
745
- async def context_stream_async(
746
- self,
747
- *,
748
- ctx: Context[DepsT],
749
- model_id: OpenAIResponsesModelId,
750
- messages: Sequence[Message],
751
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
752
- | AsyncContextToolkit[DepsT]
753
- | None = None,
754
- format: type[FormattableT] | Format[FormattableT],
755
- **params: Unpack[Params],
756
- ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
757
- """Generate a `llm.AsyncContextStreamResponse` with a response format."""
758
- ...
759
-
760
- @overload
761
- async def context_stream_async(
762
- self,
763
- *,
764
- ctx: Context[DepsT],
765
- model_id: OpenAIResponsesModelId,
766
- messages: Sequence[Message],
767
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
768
- | AsyncContextToolkit[DepsT]
769
- | None = None,
770
- format: type[FormattableT] | Format[FormattableT] | None = None,
771
- **params: Unpack[Params],
772
- ) -> (
773
- AsyncContextStreamResponse[DepsT]
774
- | AsyncContextStreamResponse[DepsT, FormattableT]
775
- ):
776
- """Generate a `llm.AsyncContextStreamResponse` with optional response format."""
777
- ...
778
-
779
- async def context_stream_async(
780
- self,
781
- *,
782
- ctx: Context[DepsT],
783
- model_id: OpenAIResponsesModelId,
784
- messages: Sequence[Message],
785
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
786
- | AsyncContextToolkit[DepsT]
787
- | None = None,
788
- format: type[FormattableT] | Format[FormattableT] | None = None,
789
- **params: Unpack[Params],
790
- ) -> (
791
- AsyncContextStreamResponse[DepsT]
792
- | AsyncContextStreamResponse[DepsT, FormattableT]
793
- ):
794
- """Generate a `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI Responses API with context.
795
-
796
- Args:
797
- ctx: The context object containing dependencies.
798
- model_id: Model identifier to use.
799
- messages: Messages to send to the LLM.
800
- tools: Optional tools that the model may invoke.
801
- format: Optional response format specifier.
802
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
803
-
804
- Returns:
805
- A `llm.AsyncContextStreamResponse` object containing the LLM-generated content stream and context.
806
- """
807
- messages, format, kwargs = _utils.encode_request(
808
- model_id=model_id,
809
- messages=messages,
810
- tools=tools,
811
- format=format,
812
- params=params,
813
- )
814
-
815
- openai_stream = await self.async_client.responses.create(
816
- **kwargs,
817
- stream=True,
818
- )
819
-
820
- chunk_iterator = _utils.decode_async_stream(
821
- openai_stream,
822
- )
823
-
824
- return AsyncContextStreamResponse(
825
- provider="openai:responses",
826
- model_id=model_id,
827
- params=params,
828
- tools=tools,
829
- input_messages=messages,
830
- chunk_iterator=chunk_iterator,
831
- format=format,
832
- )