mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -1,819 +0,0 @@
1
- """Anthropic client implementation."""
2
-
3
- import os
4
- from collections.abc import Sequence
5
- from contextvars import ContextVar
6
- from functools import lru_cache
7
- from typing import overload
8
- from typing_extensions import Unpack
9
-
10
- from anthropic import Anthropic, AsyncAnthropic
11
-
12
- from ...context import Context, DepsT
13
- from ...formatting import Format, FormattableT
14
- from ...messages import Message
15
- from ...responses import (
16
- AsyncContextResponse,
17
- AsyncContextStreamResponse,
18
- AsyncResponse,
19
- AsyncStreamResponse,
20
- ContextResponse,
21
- ContextStreamResponse,
22
- Response,
23
- StreamResponse,
24
- )
25
- from ...tools import (
26
- AsyncContextTool,
27
- AsyncContextToolkit,
28
- AsyncTool,
29
- AsyncToolkit,
30
- ContextTool,
31
- ContextToolkit,
32
- Tool,
33
- Toolkit,
34
- )
35
- from ..base import BaseClient, Params
36
- from . import _utils
37
- from .model_ids import AnthropicModelId
38
-
39
- ANTHROPIC_CLIENT_CONTEXT: ContextVar["AnthropicClient | None"] = ContextVar(
40
- "ANTHROPIC_CLIENT_CONTEXT", default=None
41
- )
42
-
43
-
44
- @lru_cache(maxsize=256)
45
- def _anthropic_singleton(
46
- api_key: str | None, base_url: str | None
47
- ) -> "AnthropicClient":
48
- """Return a cached Anthropic client instance for the given parameters."""
49
- return AnthropicClient(api_key=api_key, base_url=base_url)
50
-
51
-
52
- def client(
53
- *, api_key: str | None = None, base_url: str | None = None
54
- ) -> "AnthropicClient":
55
- """Create or retrieve an Anthropic client with the given parameters.
56
-
57
- If a client has already been created with these parameters, it will be
58
- retrieved from cache and returned.
59
-
60
- Args:
61
- api_key: API key for authentication. If None, uses ANTHROPIC_API_KEY env var.
62
- base_url: Base URL for the API. If None, uses ANTHROPIC_BASE_URL env var.
63
-
64
- Returns:
65
- An Anthropic client instance.
66
- """
67
- api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
68
- base_url = base_url or os.getenv("ANTHROPIC_BASE_URL")
69
- return _anthropic_singleton(api_key, base_url)
70
-
71
-
72
- def get_client() -> "AnthropicClient":
73
- """Retrieve the current Anthropic client from context, or a global default.
74
-
75
- Returns:
76
- The current Anthropic client from context if available, otherwise
77
- a global default client based on environment variables.
78
- """
79
- ctx_client = ANTHROPIC_CLIENT_CONTEXT.get()
80
- return ctx_client or client()
81
-
82
-
83
- class AnthropicClient(BaseClient[AnthropicModelId, Anthropic]):
84
- """The client for the Anthropic LLM model."""
85
-
86
- @property
87
- def _context_var(self) -> ContextVar["AnthropicClient | None"]:
88
- return ANTHROPIC_CLIENT_CONTEXT
89
-
90
- def __init__(
91
- self, *, api_key: str | None = None, base_url: str | None = None
92
- ) -> None:
93
- """Initialize the Anthropic client."""
94
- self.client = Anthropic(api_key=api_key, base_url=base_url)
95
- self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
96
-
97
- @overload
98
- def call(
99
- self,
100
- *,
101
- model_id: AnthropicModelId,
102
- messages: Sequence[Message],
103
- tools: Sequence[Tool] | Toolkit | None = None,
104
- format: None = None,
105
- **params: Unpack[Params],
106
- ) -> Response:
107
- """Generate an `llm.Response` without a response format."""
108
- ...
109
-
110
- @overload
111
- def call(
112
- self,
113
- *,
114
- model_id: AnthropicModelId,
115
- messages: Sequence[Message],
116
- tools: Sequence[Tool] | Toolkit | None = None,
117
- format: type[FormattableT] | Format[FormattableT],
118
- **params: Unpack[Params],
119
- ) -> Response[FormattableT]:
120
- """Generate an `llm.Response` with a response format."""
121
- ...
122
-
123
- @overload
124
- def call(
125
- self,
126
- *,
127
- model_id: AnthropicModelId,
128
- messages: Sequence[Message],
129
- tools: Sequence[Tool] | Toolkit | None = None,
130
- format: type[FormattableT] | Format[FormattableT] | None,
131
- **params: Unpack[Params],
132
- ) -> Response | Response[FormattableT]:
133
- """Generate an `llm.Response` with an optional response format."""
134
- ...
135
-
136
- def call(
137
- self,
138
- *,
139
- model_id: AnthropicModelId,
140
- messages: Sequence[Message],
141
- tools: Sequence[Tool] | Toolkit | None = None,
142
- format: type[FormattableT] | Format[FormattableT] | None = None,
143
- **params: Unpack[Params],
144
- ) -> Response | Response[FormattableT]:
145
- """Generate an `llm.Response` by synchronously calling the Anthropic Messages API.
146
-
147
- Args:
148
- model_id: Model identifier to use.
149
- messages: Messages to send to the LLM.
150
- tools: Optional tools that the model may invoke.
151
- format: Optional response format specifier.
152
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
153
-
154
- Returns:
155
- An `llm.Response` object containing the LLM-generated content.
156
- """
157
- input_messages, format, kwargs = _utils.encode_request(
158
- model_id=model_id,
159
- messages=messages,
160
- tools=tools,
161
- format=format,
162
- params=params,
163
- )
164
-
165
- anthropic_response = self.client.messages.create(**kwargs)
166
-
167
- assistant_message, finish_reason = _utils.decode_response(
168
- anthropic_response, model_id
169
- )
170
-
171
- return Response(
172
- raw=anthropic_response,
173
- provider="anthropic",
174
- model_id=model_id,
175
- params=params,
176
- tools=tools,
177
- input_messages=input_messages,
178
- assistant_message=assistant_message,
179
- finish_reason=finish_reason,
180
- format=format,
181
- )
182
-
183
- @overload
184
- def context_call(
185
- self,
186
- *,
187
- ctx: Context[DepsT],
188
- model_id: AnthropicModelId,
189
- messages: Sequence[Message],
190
- tools: Sequence[Tool | ContextTool[DepsT]]
191
- | ContextToolkit[DepsT]
192
- | None = None,
193
- format: None = None,
194
- **params: Unpack[Params],
195
- ) -> ContextResponse[DepsT, None]:
196
- """Generate an `llm.ContextResponse` without a response format."""
197
- ...
198
-
199
- @overload
200
- def context_call(
201
- self,
202
- *,
203
- ctx: Context[DepsT],
204
- model_id: AnthropicModelId,
205
- messages: Sequence[Message],
206
- tools: Sequence[Tool | ContextTool[DepsT]]
207
- | ContextToolkit[DepsT]
208
- | None = None,
209
- format: type[FormattableT] | Format[FormattableT],
210
- **params: Unpack[Params],
211
- ) -> ContextResponse[DepsT, FormattableT]:
212
- """Generate an `llm.ContextResponse` with a response format."""
213
- ...
214
-
215
- @overload
216
- def context_call(
217
- self,
218
- *,
219
- ctx: Context[DepsT],
220
- model_id: AnthropicModelId,
221
- messages: Sequence[Message],
222
- tools: Sequence[Tool | ContextTool[DepsT]]
223
- | ContextToolkit[DepsT]
224
- | None = None,
225
- format: type[FormattableT] | Format[FormattableT] | None,
226
- **params: Unpack[Params],
227
- ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
228
- """Generate an `llm.ContextResponse` with an optional response format."""
229
- ...
230
-
231
- def context_call(
232
- self,
233
- *,
234
- ctx: Context[DepsT],
235
- model_id: AnthropicModelId,
236
- messages: Sequence[Message],
237
- tools: Sequence[Tool | ContextTool[DepsT]]
238
- | ContextToolkit[DepsT]
239
- | None = None,
240
- format: type[FormattableT] | Format[FormattableT] | None = None,
241
- **params: Unpack[Params],
242
- ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
243
- """Generate an `llm.ContextResponse` by synchronously calling the Anthropic Messages API.
244
-
245
- Args:
246
- ctx: Context object with dependencies for tools.
247
- model_id: Model identifier to use.
248
- messages: Messages to send to the LLM.
249
- tools: Optional tools that the model may invoke.
250
- format: Optional response format specifier.
251
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
252
-
253
- Returns:
254
- An `llm.ContextResponse` object containing the LLM-generated content.
255
- """
256
- input_messages, format, kwargs = _utils.encode_request(
257
- model_id=model_id,
258
- messages=messages,
259
- tools=tools,
260
- format=format,
261
- params=params,
262
- )
263
-
264
- anthropic_response = self.client.messages.create(**kwargs)
265
-
266
- assistant_message, finish_reason = _utils.decode_response(
267
- anthropic_response, model_id
268
- )
269
-
270
- return ContextResponse(
271
- raw=anthropic_response,
272
- provider="anthropic",
273
- model_id=model_id,
274
- params=params,
275
- tools=tools,
276
- input_messages=input_messages,
277
- assistant_message=assistant_message,
278
- finish_reason=finish_reason,
279
- format=format,
280
- )
281
-
282
- @overload
283
- async def call_async(
284
- self,
285
- *,
286
- model_id: AnthropicModelId,
287
- messages: Sequence[Message],
288
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
289
- format: None = None,
290
- **params: Unpack[Params],
291
- ) -> AsyncResponse:
292
- """Generate an `llm.AsyncResponse` without a response format."""
293
- ...
294
-
295
- @overload
296
- async def call_async(
297
- self,
298
- *,
299
- model_id: AnthropicModelId,
300
- messages: Sequence[Message],
301
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
302
- format: type[FormattableT] | Format[FormattableT],
303
- **params: Unpack[Params],
304
- ) -> AsyncResponse[FormattableT]:
305
- """Generate an `llm.AsyncResponse` with a response format."""
306
- ...
307
-
308
- @overload
309
- async def call_async(
310
- self,
311
- *,
312
- model_id: AnthropicModelId,
313
- messages: Sequence[Message],
314
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
315
- format: type[FormattableT] | Format[FormattableT] | None,
316
- **params: Unpack[Params],
317
- ) -> AsyncResponse | AsyncResponse[FormattableT]:
318
- """Generate an `llm.AsyncResponse` with an optional response format."""
319
- ...
320
-
321
- async def call_async(
322
- self,
323
- *,
324
- model_id: AnthropicModelId,
325
- messages: Sequence[Message],
326
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
327
- format: type[FormattableT] | Format[FormattableT] | None = None,
328
- **params: Unpack[Params],
329
- ) -> AsyncResponse | AsyncResponse[FormattableT]:
330
- """Generate an `llm.AsyncResponse` by asynchronously calling the Anthropic Messages API.
331
-
332
- Args:
333
- model_id: Model identifier to use.
334
- messages: Messages to send to the LLM.
335
- tools: Optional tools that the model may invoke.
336
- format: Optional response format specifier.
337
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
338
-
339
- Returns:
340
- An `llm.AsyncResponse` object containing the LLM-generated content.
341
- """
342
- input_messages, format, kwargs = _utils.encode_request(
343
- model_id=model_id,
344
- messages=messages,
345
- tools=tools,
346
- format=format,
347
- params=params,
348
- )
349
-
350
- anthropic_response = await self.async_client.messages.create(**kwargs)
351
-
352
- assistant_message, finish_reason = _utils.decode_response(
353
- anthropic_response, model_id
354
- )
355
-
356
- return AsyncResponse(
357
- raw=anthropic_response,
358
- provider="anthropic",
359
- model_id=model_id,
360
- params=params,
361
- tools=tools,
362
- input_messages=input_messages,
363
- assistant_message=assistant_message,
364
- finish_reason=finish_reason,
365
- format=format,
366
- )
367
-
368
- @overload
369
- async def context_call_async(
370
- self,
371
- *,
372
- ctx: Context[DepsT],
373
- model_id: AnthropicModelId,
374
- messages: Sequence[Message],
375
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
376
- | AsyncContextToolkit[DepsT]
377
- | None = None,
378
- format: None = None,
379
- **params: Unpack[Params],
380
- ) -> AsyncContextResponse[DepsT, None]:
381
- """Generate an `llm.AsyncContextResponse` without a response format."""
382
- ...
383
-
384
- @overload
385
- async def context_call_async(
386
- self,
387
- *,
388
- ctx: Context[DepsT],
389
- model_id: AnthropicModelId,
390
- messages: Sequence[Message],
391
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
392
- | AsyncContextToolkit[DepsT]
393
- | None = None,
394
- format: type[FormattableT] | Format[FormattableT],
395
- **params: Unpack[Params],
396
- ) -> AsyncContextResponse[DepsT, FormattableT]:
397
- """Generate an `llm.AsyncContextResponse` with a response format."""
398
- ...
399
-
400
- @overload
401
- async def context_call_async(
402
- self,
403
- *,
404
- ctx: Context[DepsT],
405
- model_id: AnthropicModelId,
406
- messages: Sequence[Message],
407
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
408
- | AsyncContextToolkit[DepsT]
409
- | None = None,
410
- format: type[FormattableT] | Format[FormattableT] | None,
411
- **params: Unpack[Params],
412
- ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
413
- """Generate an `llm.AsyncContextResponse` with an optional response format."""
414
- ...
415
-
416
- async def context_call_async(
417
- self,
418
- *,
419
- ctx: Context[DepsT],
420
- model_id: AnthropicModelId,
421
- messages: Sequence[Message],
422
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
423
- | AsyncContextToolkit[DepsT]
424
- | None = None,
425
- format: type[FormattableT] | Format[FormattableT] | None = None,
426
- **params: Unpack[Params],
427
- ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
428
- """Generate an `llm.AsyncContextResponse` by asynchronously calling the Anthropic Messages API.
429
-
430
- Args:
431
- ctx: Context object with dependencies for tools.
432
- model_id: Model identifier to use.
433
- messages: Messages to send to the LLM.
434
- tools: Optional tools that the model may invoke.
435
- format: Optional response format specifier.
436
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
437
-
438
- Returns:
439
- An `llm.AsyncContextResponse` object containing the LLM-generated content.
440
- """
441
- input_messages, format, kwargs = _utils.encode_request(
442
- model_id=model_id,
443
- messages=messages,
444
- tools=tools,
445
- format=format,
446
- params=params,
447
- )
448
-
449
- anthropic_response = await self.async_client.messages.create(**kwargs)
450
-
451
- assistant_message, finish_reason = _utils.decode_response(
452
- anthropic_response, model_id
453
- )
454
-
455
- return AsyncContextResponse(
456
- raw=anthropic_response,
457
- provider="anthropic",
458
- model_id=model_id,
459
- params=params,
460
- tools=tools,
461
- input_messages=input_messages,
462
- assistant_message=assistant_message,
463
- finish_reason=finish_reason,
464
- format=format,
465
- )
466
-
467
- @overload
468
- def stream(
469
- self,
470
- *,
471
- model_id: AnthropicModelId,
472
- messages: Sequence[Message],
473
- tools: Sequence[Tool] | Toolkit | None = None,
474
- format: None = None,
475
- **params: Unpack[Params],
476
- ) -> StreamResponse:
477
- """Stream an `llm.StreamResponse` without a response format."""
478
- ...
479
-
480
- @overload
481
- def stream(
482
- self,
483
- *,
484
- model_id: AnthropicModelId,
485
- messages: Sequence[Message],
486
- tools: Sequence[Tool] | Toolkit | None = None,
487
- format: type[FormattableT] | Format[FormattableT],
488
- **params: Unpack[Params],
489
- ) -> StreamResponse[FormattableT]:
490
- """Stream an `llm.StreamResponse` with a response format."""
491
- ...
492
-
493
- @overload
494
- def stream(
495
- self,
496
- *,
497
- model_id: AnthropicModelId,
498
- messages: Sequence[Message],
499
- tools: Sequence[Tool] | Toolkit | None = None,
500
- format: type[FormattableT] | Format[FormattableT] | None,
501
- **params: Unpack[Params],
502
- ) -> StreamResponse | StreamResponse[FormattableT]:
503
- """Stream an `llm.StreamResponse` with an optional response format."""
504
- ...
505
-
506
- def stream(
507
- self,
508
- *,
509
- model_id: AnthropicModelId,
510
- messages: Sequence[Message],
511
- tools: Sequence[Tool] | Toolkit | None = None,
512
- format: type[FormattableT] | Format[FormattableT] | None = None,
513
- **params: Unpack[Params],
514
- ) -> StreamResponse | StreamResponse[FormattableT]:
515
- """Generate an `llm.StreamResponse` by synchronously streaming from the Anthropic Messages API.
516
-
517
- Args:
518
- model_id: Model identifier to use.
519
- messages: Messages to send to the LLM.
520
- tools: Optional tools that the model may invoke.
521
- format: Optional response format specifier.
522
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
523
-
524
- Returns:
525
- An `llm.StreamResponse` object for iterating over the LLM-generated content.
526
- """
527
- input_messages, format, kwargs = _utils.encode_request(
528
- model_id=model_id,
529
- messages=messages,
530
- tools=tools,
531
- format=format,
532
- params=params,
533
- )
534
-
535
- anthropic_stream = self.client.messages.stream(**kwargs)
536
-
537
- chunk_iterator = _utils.decode_stream(anthropic_stream)
538
-
539
- return StreamResponse(
540
- provider="anthropic",
541
- model_id=model_id,
542
- params=params,
543
- tools=tools,
544
- input_messages=input_messages,
545
- chunk_iterator=chunk_iterator,
546
- format=format,
547
- )
548
-
549
- @overload
550
- def context_stream(
551
- self,
552
- *,
553
- ctx: Context[DepsT],
554
- model_id: AnthropicModelId,
555
- messages: Sequence[Message],
556
- tools: Sequence[Tool | ContextTool[DepsT]]
557
- | ContextToolkit[DepsT]
558
- | None = None,
559
- format: None = None,
560
- **params: Unpack[Params],
561
- ) -> ContextStreamResponse[DepsT]:
562
- """Stream an `llm.ContextStreamResponse` without a response format."""
563
- ...
564
-
565
- @overload
566
- def context_stream(
567
- self,
568
- *,
569
- ctx: Context[DepsT],
570
- model_id: AnthropicModelId,
571
- messages: Sequence[Message],
572
- tools: Sequence[Tool | ContextTool[DepsT]]
573
- | ContextToolkit[DepsT]
574
- | None = None,
575
- format: type[FormattableT] | Format[FormattableT],
576
- **params: Unpack[Params],
577
- ) -> ContextStreamResponse[DepsT, FormattableT]:
578
- """Stream an `llm.ContextStreamResponse` with a response format."""
579
- ...
580
-
581
- @overload
582
- def context_stream(
583
- self,
584
- *,
585
- ctx: Context[DepsT],
586
- model_id: AnthropicModelId,
587
- messages: Sequence[Message],
588
- tools: Sequence[Tool | ContextTool[DepsT]]
589
- | ContextToolkit[DepsT]
590
- | None = None,
591
- format: type[FormattableT] | Format[FormattableT] | None,
592
- **params: Unpack[Params],
593
- ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
594
- """Stream an `llm.ContextStreamResponse` with an optional response format."""
595
- ...
596
-
597
- def context_stream(
598
- self,
599
- *,
600
- ctx: Context[DepsT],
601
- model_id: AnthropicModelId,
602
- messages: Sequence[Message],
603
- tools: Sequence[Tool | ContextTool[DepsT]]
604
- | ContextToolkit[DepsT]
605
- | None = None,
606
- format: type[FormattableT] | Format[FormattableT] | None = None,
607
- **params: Unpack[Params],
608
- ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
609
- """Generate an `llm.ContextStreamResponse` by synchronously streaming from the Anthropic Messages API.
610
-
611
- Args:
612
- ctx: Context object with dependencies for tools.
613
- model_id: Model identifier to use.
614
- messages: Messages to send to the LLM.
615
- tools: Optional tools that the model may invoke.
616
- format: Optional response format specifier.
617
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
618
-
619
- Returns:
620
- An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
621
- """
622
- input_messages, format, kwargs = _utils.encode_request(
623
- model_id=model_id,
624
- messages=messages,
625
- tools=tools,
626
- format=format,
627
- params=params,
628
- )
629
-
630
- anthropic_stream = self.client.messages.stream(**kwargs)
631
-
632
- chunk_iterator = _utils.decode_stream(anthropic_stream)
633
-
634
- return ContextStreamResponse(
635
- provider="anthropic",
636
- model_id=model_id,
637
- params=params,
638
- tools=tools,
639
- input_messages=input_messages,
640
- chunk_iterator=chunk_iterator,
641
- format=format,
642
- )
643
-
644
- @overload
645
- async def stream_async(
646
- self,
647
- *,
648
- model_id: AnthropicModelId,
649
- messages: Sequence[Message],
650
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
651
- format: None = None,
652
- **params: Unpack[Params],
653
- ) -> AsyncStreamResponse:
654
- """Stream an `llm.AsyncStreamResponse` without a response format."""
655
- ...
656
-
657
- @overload
658
- async def stream_async(
659
- self,
660
- *,
661
- model_id: AnthropicModelId,
662
- messages: Sequence[Message],
663
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
664
- format: type[FormattableT] | Format[FormattableT],
665
- **params: Unpack[Params],
666
- ) -> AsyncStreamResponse[FormattableT]:
667
- """Stream an `llm.AsyncStreamResponse` with a response format."""
668
- ...
669
-
670
- @overload
671
- async def stream_async(
672
- self,
673
- *,
674
- model_id: AnthropicModelId,
675
- messages: Sequence[Message],
676
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
677
- format: type[FormattableT] | Format[FormattableT] | None,
678
- **params: Unpack[Params],
679
- ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
680
- """Stream an `llm.AsyncStreamResponse` with an optional response format."""
681
- ...
682
-
683
- async def stream_async(
684
- self,
685
- *,
686
- model_id: AnthropicModelId,
687
- messages: Sequence[Message],
688
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
689
- format: type[FormattableT] | Format[FormattableT] | None = None,
690
- **params: Unpack[Params],
691
- ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
692
- """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Anthropic Messages API.
693
-
694
- Args:
695
- model_id: Model identifier to use.
696
- messages: Messages to send to the LLM.
697
- tools: Optional tools that the model may invoke.
698
- format: Optional response format specifier.
699
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
700
-
701
- Returns:
702
- An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
703
- """
704
- input_messages, format, kwargs = _utils.encode_request(
705
- model_id=model_id,
706
- messages=messages,
707
- tools=tools,
708
- format=format,
709
- params=params,
710
- )
711
-
712
- anthropic_stream = self.async_client.messages.stream(**kwargs)
713
-
714
- chunk_iterator = _utils.decode_async_stream(anthropic_stream)
715
-
716
- return AsyncStreamResponse(
717
- provider="anthropic",
718
- model_id=model_id,
719
- params=params,
720
- tools=tools,
721
- input_messages=input_messages,
722
- chunk_iterator=chunk_iterator,
723
- format=format,
724
- )
725
-
726
- @overload
727
- async def context_stream_async(
728
- self,
729
- *,
730
- ctx: Context[DepsT],
731
- model_id: AnthropicModelId,
732
- messages: Sequence[Message],
733
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
734
- | AsyncContextToolkit[DepsT]
735
- | None = None,
736
- format: None = None,
737
- **params: Unpack[Params],
738
- ) -> AsyncContextStreamResponse[DepsT]:
739
- """Stream an `llm.AsyncContextStreamResponse` without a response format."""
740
- ...
741
-
742
- @overload
743
- async def context_stream_async(
744
- self,
745
- *,
746
- ctx: Context[DepsT],
747
- model_id: AnthropicModelId,
748
- messages: Sequence[Message],
749
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
750
- | AsyncContextToolkit[DepsT]
751
- | None = None,
752
- format: type[FormattableT] | Format[FormattableT],
753
- **params: Unpack[Params],
754
- ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
755
- """Stream an `llm.AsyncContextStreamResponse` with a response format."""
756
- ...
757
-
758
- @overload
759
- async def context_stream_async(
760
- self,
761
- *,
762
- ctx: Context[DepsT],
763
- model_id: AnthropicModelId,
764
- messages: Sequence[Message],
765
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
766
- | AsyncContextToolkit[DepsT]
767
- | None = None,
768
- format: type[FormattableT] | Format[FormattableT] | None,
769
- **params: Unpack[Params],
770
- ) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
771
- """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
772
- ...
773
-
774
- async def context_stream_async(
775
- self,
776
- *,
777
- ctx: Context[DepsT],
778
- model_id: AnthropicModelId,
779
- messages: Sequence[Message],
780
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
781
- | AsyncContextToolkit[DepsT]
782
- | None = None,
783
- format: type[FormattableT] | Format[FormattableT] | None = None,
784
- **params: Unpack[Params],
785
- ) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
786
- """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API.
787
-
788
- Args:
789
- ctx: Context object with dependencies for tools.
790
- model_id: Model identifier to use.
791
- messages: Messages to send to the LLM.
792
- tools: Optional tools that the model may invoke.
793
- format: Optional response format specifier.
794
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
795
-
796
- Returns:
797
- An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
798
- """
799
- input_messages, format, kwargs = _utils.encode_request(
800
- model_id=model_id,
801
- messages=messages,
802
- tools=tools,
803
- format=format,
804
- params=params,
805
- )
806
-
807
- anthropic_stream = self.async_client.messages.stream(**kwargs)
808
-
809
- chunk_iterator = _utils.decode_async_stream(anthropic_stream)
810
-
811
- return AsyncContextStreamResponse(
812
- provider="anthropic",
813
- model_id=model_id,
814
- params=params,
815
- tools=tools,
816
- input_messages=input_messages,
817
- chunk_iterator=chunk_iterator,
818
- format=format,
819
- )