mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -1,853 +0,0 @@
1
- """Google client implementation."""
2
-
3
- import os
4
- from collections.abc import Sequence
5
- from contextvars import ContextVar
6
- from functools import lru_cache
7
- from typing import overload
8
- from typing_extensions import Unpack
9
-
10
- from google.genai import Client
11
- from google.genai.types import HttpOptions
12
-
13
- from ...context import Context, DepsT
14
- from ...formatting import Format, FormattableT
15
- from ...messages import Message
16
- from ...responses import (
17
- AsyncContextResponse,
18
- AsyncContextStreamResponse,
19
- AsyncResponse,
20
- AsyncStreamResponse,
21
- ContextResponse,
22
- ContextStreamResponse,
23
- Response,
24
- StreamResponse,
25
- )
26
- from ...tools import (
27
- AsyncContextTool,
28
- AsyncContextToolkit,
29
- AsyncTool,
30
- AsyncToolkit,
31
- ContextTool,
32
- ContextToolkit,
33
- Tool,
34
- Toolkit,
35
- )
36
- from ..base import BaseClient, Params
37
- from . import _utils
38
- from .model_ids import GoogleModelId
39
-
40
- GOOGLE_CLIENT_CONTEXT: ContextVar["GoogleClient | None"] = ContextVar(
41
- "GOOGLE_CLIENT_CONTEXT", default=None
42
- )
43
-
44
-
45
- @lru_cache(maxsize=256)
46
- def _google_singleton(api_key: str | None, base_url: str | None) -> "GoogleClient":
47
- """Return a cached Google client instance for the given parameters."""
48
- return GoogleClient(api_key=api_key, base_url=base_url)
49
-
50
-
51
- def client(
52
- *, api_key: str | None = None, base_url: str | None = None
53
- ) -> "GoogleClient":
54
- """Create or retrieve a Google client with the given parameters.
55
-
56
- If a client has already been created with these parameters, it will be
57
- retrieved from cache and returned.
58
-
59
- Args:
60
- api_key: API key for authentication. If None, uses GOOGLE_API_KEY env var.
61
- base_url: Base URL for the API. If None, uses GOOGLE_BASE_URL env var.
62
-
63
- Returns:
64
- A Google client instance.
65
- """
66
- api_key = api_key or os.getenv("GOOGLE_API_KEY")
67
- base_url = base_url or os.getenv("GOOGLE_BASE_URL")
68
- return _google_singleton(api_key, base_url)
69
-
70
-
71
- def get_client() -> "GoogleClient":
72
- """Retrieve the current Google client from context, or a global default.
73
-
74
- Returns:
75
- The current Google client from context if available, otherwise
76
- a global default client based on environment variables.
77
- """
78
- ctx_client = GOOGLE_CLIENT_CONTEXT.get()
79
- return ctx_client or client()
80
-
81
-
82
- class GoogleClient(BaseClient[GoogleModelId, Client]):
83
- """The client for the Google LLM model."""
84
-
85
- @property
86
- def _context_var(self) -> ContextVar["GoogleClient | None"]:
87
- return GOOGLE_CLIENT_CONTEXT
88
-
89
- def __init__(
90
- self, *, api_key: str | None = None, base_url: str | None = None
91
- ) -> None:
92
- """Initialize the Google client."""
93
- http_options = None
94
- if base_url:
95
- http_options = HttpOptions(base_url=base_url)
96
-
97
- self.client = Client(api_key=api_key, http_options=http_options)
98
-
99
- @overload
100
- def call(
101
- self,
102
- *,
103
- model_id: GoogleModelId,
104
- messages: Sequence[Message],
105
- tools: Sequence[Tool] | Toolkit | None = None,
106
- format: None = None,
107
- **params: Unpack[Params],
108
- ) -> Response:
109
- """Generate an `llm.Response` without a response format."""
110
- ...
111
-
112
- @overload
113
- def call(
114
- self,
115
- *,
116
- model_id: GoogleModelId,
117
- messages: Sequence[Message],
118
- tools: Sequence[Tool] | Toolkit | None = None,
119
- format: type[FormattableT] | Format[FormattableT],
120
- **params: Unpack[Params],
121
- ) -> Response[FormattableT]:
122
- """Generate an `llm.Response` with a response format."""
123
- ...
124
-
125
- @overload
126
- def call(
127
- self,
128
- *,
129
- model_id: GoogleModelId,
130
- messages: Sequence[Message],
131
- tools: Sequence[Tool] | Toolkit | None = None,
132
- format: type[FormattableT] | Format[FormattableT] | None,
133
- **params: Unpack[Params],
134
- ) -> Response | Response[FormattableT]:
135
- """Generate an `llm.Response` with an optional response format."""
136
- ...
137
-
138
- def call(
139
- self,
140
- *,
141
- model_id: GoogleModelId,
142
- messages: Sequence[Message],
143
- tools: Sequence[Tool] | Toolkit | None = None,
144
- format: type[FormattableT] | Format[FormattableT] | None = None,
145
- **params: Unpack[Params],
146
- ) -> Response | Response[FormattableT]:
147
- """Generate an `llm.Response` by synchronously calling the Google GenAI API.
148
-
149
- Args:
150
- model_id: Model identifier to use.
151
- messages: Messages to send to the LLM.
152
- tools: Optional tools that the model may invoke.
153
- format: Optional response format specifier.
154
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
155
-
156
- Returns:
157
- An `llm.Response` object containing the LLM-generated content.
158
- """
159
- input_messages, format, contents, config = _utils.encode_request(
160
- model_id=model_id,
161
- messages=messages,
162
- tools=tools,
163
- format=format,
164
- params=params,
165
- )
166
-
167
- google_response = self.client.models.generate_content(
168
- model=model_id,
169
- contents=contents,
170
- config=config,
171
- )
172
-
173
- assistant_message, finish_reason = _utils.decode_response(
174
- google_response, model_id
175
- )
176
-
177
- return Response(
178
- raw=google_response,
179
- provider="google",
180
- model_id=model_id,
181
- params=params,
182
- tools=tools,
183
- input_messages=input_messages,
184
- assistant_message=assistant_message,
185
- finish_reason=finish_reason,
186
- format=format,
187
- )
188
-
189
- @overload
190
- def context_call(
191
- self,
192
- *,
193
- ctx: Context[DepsT],
194
- model_id: GoogleModelId,
195
- messages: Sequence[Message],
196
- tools: Sequence[Tool | ContextTool[DepsT]]
197
- | ContextToolkit[DepsT]
198
- | None = None,
199
- format: None = None,
200
- **params: Unpack[Params],
201
- ) -> ContextResponse[DepsT, None]:
202
- """Generate an `llm.ContextResponse` without a response format."""
203
- ...
204
-
205
- @overload
206
- def context_call(
207
- self,
208
- *,
209
- ctx: Context[DepsT],
210
- model_id: GoogleModelId,
211
- messages: Sequence[Message],
212
- tools: Sequence[Tool | ContextTool[DepsT]]
213
- | ContextToolkit[DepsT]
214
- | None = None,
215
- format: type[FormattableT] | Format[FormattableT],
216
- **params: Unpack[Params],
217
- ) -> ContextResponse[DepsT, FormattableT]:
218
- """Generate an `llm.ContextResponse` with a response format."""
219
- ...
220
-
221
- @overload
222
- def context_call(
223
- self,
224
- *,
225
- ctx: Context[DepsT],
226
- model_id: GoogleModelId,
227
- messages: Sequence[Message],
228
- tools: Sequence[Tool | ContextTool[DepsT]]
229
- | ContextToolkit[DepsT]
230
- | None = None,
231
- format: type[FormattableT] | Format[FormattableT] | None,
232
- **params: Unpack[Params],
233
- ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
234
- """Generate an `llm.ContextResponse` with an optional response format."""
235
- ...
236
-
237
- def context_call(
238
- self,
239
- *,
240
- ctx: Context[DepsT],
241
- model_id: GoogleModelId,
242
- messages: Sequence[Message],
243
- tools: Sequence[Tool | ContextTool[DepsT]]
244
- | ContextToolkit[DepsT]
245
- | None = None,
246
- format: type[FormattableT] | Format[FormattableT] | None = None,
247
- **params: Unpack[Params],
248
- ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
249
- """Generate an `llm.ContextResponse` by synchronously calling the Google GenAI API.
250
-
251
- Args:
252
- ctx: Context object with dependencies for tools.
253
- model_id: Model identifier to use.
254
- messages: Messages to send to the LLM.
255
- tools: Optional tools that the model may invoke.
256
- format: Optional response format specifier.
257
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
258
-
259
- Returns:
260
- An `llm.ContextResponse` object containing the LLM-generated content.
261
- """
262
- input_messages, format, contents, config = _utils.encode_request(
263
- model_id=model_id,
264
- messages=messages,
265
- tools=tools,
266
- format=format,
267
- params=params,
268
- )
269
-
270
- google_response = self.client.models.generate_content(
271
- model=model_id,
272
- contents=contents,
273
- config=config,
274
- )
275
-
276
- assistant_message, finish_reason = _utils.decode_response(
277
- google_response, model_id
278
- )
279
-
280
- return ContextResponse(
281
- raw=google_response,
282
- provider="google",
283
- model_id=model_id,
284
- params=params,
285
- tools=tools,
286
- input_messages=input_messages,
287
- assistant_message=assistant_message,
288
- finish_reason=finish_reason,
289
- format=format,
290
- )
291
-
292
- @overload
293
- async def call_async(
294
- self,
295
- *,
296
- model_id: GoogleModelId,
297
- messages: Sequence[Message],
298
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
299
- format: None = None,
300
- **params: Unpack[Params],
301
- ) -> AsyncResponse:
302
- """Generate an `llm.AsyncResponse` without a response format."""
303
- ...
304
-
305
- @overload
306
- async def call_async(
307
- self,
308
- *,
309
- model_id: GoogleModelId,
310
- messages: Sequence[Message],
311
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
312
- format: type[FormattableT] | Format[FormattableT],
313
- **params: Unpack[Params],
314
- ) -> AsyncResponse[FormattableT]:
315
- """Generate an `llm.AsyncResponse` with a response format."""
316
- ...
317
-
318
- @overload
319
- async def call_async(
320
- self,
321
- *,
322
- model_id: GoogleModelId,
323
- messages: Sequence[Message],
324
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
325
- format: type[FormattableT] | Format[FormattableT] | None,
326
- **params: Unpack[Params],
327
- ) -> AsyncResponse | AsyncResponse[FormattableT]:
328
- """Generate an `llm.AsyncResponse` with an optional response format."""
329
- ...
330
-
331
- async def call_async(
332
- self,
333
- *,
334
- model_id: GoogleModelId,
335
- messages: Sequence[Message],
336
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
337
- format: type[FormattableT] | Format[FormattableT] | None = None,
338
- **params: Unpack[Params],
339
- ) -> AsyncResponse | AsyncResponse[FormattableT]:
340
- """Generate an `llm.AsyncResponse` by asynchronously calling the Google GenAI API.
341
-
342
- Args:
343
- model_id: Model identifier to use.
344
- messages: Messages to send to the LLM.
345
- tools: Optional tools that the model may invoke.
346
- format: Optional response format specifier.
347
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
348
-
349
- Returns:
350
- An `llm.AsyncResponse` object containing the LLM-generated content.
351
- """
352
- input_messages, format, contents, config = _utils.encode_request(
353
- model_id=model_id,
354
- messages=messages,
355
- tools=tools,
356
- format=format,
357
- params=params,
358
- )
359
-
360
- google_response = await self.client.aio.models.generate_content(
361
- model=model_id,
362
- contents=contents,
363
- config=config,
364
- )
365
-
366
- assistant_message, finish_reason = _utils.decode_response(
367
- google_response, model_id
368
- )
369
-
370
- return AsyncResponse(
371
- raw=google_response,
372
- provider="google",
373
- model_id=model_id,
374
- params=params,
375
- tools=tools,
376
- input_messages=input_messages,
377
- assistant_message=assistant_message,
378
- finish_reason=finish_reason,
379
- format=format,
380
- )
381
-
382
- @overload
383
- async def context_call_async(
384
- self,
385
- *,
386
- ctx: Context[DepsT],
387
- model_id: GoogleModelId,
388
- messages: Sequence[Message],
389
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
390
- | AsyncContextToolkit[DepsT]
391
- | None = None,
392
- format: None = None,
393
- **params: Unpack[Params],
394
- ) -> AsyncContextResponse[DepsT, None]:
395
- """Generate an `llm.AsyncContextResponse` without a response format."""
396
- ...
397
-
398
- @overload
399
- async def context_call_async(
400
- self,
401
- *,
402
- ctx: Context[DepsT],
403
- model_id: GoogleModelId,
404
- messages: Sequence[Message],
405
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
406
- | AsyncContextToolkit[DepsT]
407
- | None = None,
408
- format: type[FormattableT] | Format[FormattableT],
409
- **params: Unpack[Params],
410
- ) -> AsyncContextResponse[DepsT, FormattableT]:
411
- """Generate an `llm.AsyncContextResponse` with a response format."""
412
- ...
413
-
414
- @overload
415
- async def context_call_async(
416
- self,
417
- *,
418
- ctx: Context[DepsT],
419
- model_id: GoogleModelId,
420
- messages: Sequence[Message],
421
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
422
- | AsyncContextToolkit[DepsT]
423
- | None = None,
424
- format: type[FormattableT] | Format[FormattableT] | None,
425
- **params: Unpack[Params],
426
- ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
427
- """Generate an `llm.AsyncContextResponse` with an optional response format."""
428
- ...
429
-
430
- async def context_call_async(
431
- self,
432
- *,
433
- ctx: Context[DepsT],
434
- model_id: GoogleModelId,
435
- messages: Sequence[Message],
436
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
437
- | AsyncContextToolkit[DepsT]
438
- | None = None,
439
- format: type[FormattableT] | Format[FormattableT] | None = None,
440
- **params: Unpack[Params],
441
- ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
442
- """Generate an `llm.AsyncContextResponse` by asynchronously calling the Google GenAI API.
443
-
444
- Args:
445
- ctx: Context object with dependencies for tools.
446
- model_id: Model identifier to use.
447
- messages: Messages to send to the LLM.
448
- tools: Optional tools that the model may invoke.
449
- format: Optional response format specifier.
450
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
451
-
452
- Returns:
453
- An `llm.AsyncContextResponse` object containing the LLM-generated content.
454
- """
455
- input_messages, format, contents, config = _utils.encode_request(
456
- model_id=model_id,
457
- messages=messages,
458
- tools=tools,
459
- format=format,
460
- params=params,
461
- )
462
-
463
- google_response = await self.client.aio.models.generate_content(
464
- model=model_id,
465
- contents=contents,
466
- config=config,
467
- )
468
-
469
- assistant_message, finish_reason = _utils.decode_response(
470
- google_response, model_id
471
- )
472
-
473
- return AsyncContextResponse(
474
- raw=google_response,
475
- provider="google",
476
- model_id=model_id,
477
- params=params,
478
- tools=tools,
479
- input_messages=input_messages,
480
- assistant_message=assistant_message,
481
- finish_reason=finish_reason,
482
- format=format,
483
- )
484
-
485
- @overload
486
- def stream(
487
- self,
488
- *,
489
- model_id: GoogleModelId,
490
- messages: Sequence[Message],
491
- tools: Sequence[Tool] | Toolkit | None = None,
492
- format: None = None,
493
- **params: Unpack[Params],
494
- ) -> StreamResponse:
495
- """Stream an `llm.StreamResponse` without a response format."""
496
- ...
497
-
498
- @overload
499
- def stream(
500
- self,
501
- *,
502
- model_id: GoogleModelId,
503
- messages: Sequence[Message],
504
- tools: Sequence[Tool] | Toolkit | None = None,
505
- format: type[FormattableT] | Format[FormattableT],
506
- **params: Unpack[Params],
507
- ) -> StreamResponse[FormattableT]:
508
- """Stream an `llm.StreamResponse` with a response format."""
509
- ...
510
-
511
- @overload
512
- def stream(
513
- self,
514
- *,
515
- model_id: GoogleModelId,
516
- messages: Sequence[Message],
517
- tools: Sequence[Tool] | Toolkit | None = None,
518
- format: type[FormattableT] | Format[FormattableT] | None,
519
- **params: Unpack[Params],
520
- ) -> StreamResponse | StreamResponse[FormattableT]:
521
- """Stream an `llm.StreamResponse` with an optional response format."""
522
- ...
523
-
524
- def stream(
525
- self,
526
- *,
527
- model_id: GoogleModelId,
528
- messages: Sequence[Message],
529
- tools: Sequence[Tool] | Toolkit | None = None,
530
- format: type[FormattableT] | Format[FormattableT] | None = None,
531
- **params: Unpack[Params],
532
- ) -> StreamResponse | StreamResponse[FormattableT]:
533
- """Generate an `llm.StreamResponse` by synchronously streaming from the Google GenAI API.
534
-
535
- Args:
536
- model_id: Model identifier to use.
537
- messages: Messages to send to the LLM.
538
- tools: Optional tools that the model may invoke.
539
- format: Optional response format specifier.
540
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
541
-
542
- Returns:
543
- An `llm.StreamResponse` object for iterating over the LLM-generated content.
544
- """
545
- input_messages, format, contents, config = _utils.encode_request(
546
- model_id=model_id,
547
- messages=messages,
548
- tools=tools,
549
- format=format,
550
- params=params,
551
- )
552
-
553
- google_stream = self.client.models.generate_content_stream(
554
- model=model_id,
555
- contents=contents,
556
- config=config,
557
- )
558
-
559
- chunk_iterator = _utils.decode_stream(google_stream)
560
-
561
- return StreamResponse(
562
- provider="google",
563
- model_id=model_id,
564
- params=params,
565
- tools=tools,
566
- input_messages=input_messages,
567
- chunk_iterator=chunk_iterator,
568
- format=format,
569
- )
570
-
571
- @overload
572
- def context_stream(
573
- self,
574
- *,
575
- ctx: Context[DepsT],
576
- model_id: GoogleModelId,
577
- messages: Sequence[Message],
578
- tools: Sequence[Tool | ContextTool[DepsT]]
579
- | ContextToolkit[DepsT]
580
- | None = None,
581
- format: None = None,
582
- **params: Unpack[Params],
583
- ) -> ContextStreamResponse[DepsT]:
584
- """Stream an `llm.ContextStreamResponse` without a response format."""
585
- ...
586
-
587
- @overload
588
- def context_stream(
589
- self,
590
- *,
591
- ctx: Context[DepsT],
592
- model_id: GoogleModelId,
593
- messages: Sequence[Message],
594
- tools: Sequence[Tool | ContextTool[DepsT]]
595
- | ContextToolkit[DepsT]
596
- | None = None,
597
- format: type[FormattableT] | Format[FormattableT],
598
- **params: Unpack[Params],
599
- ) -> ContextStreamResponse[DepsT, FormattableT]:
600
- """Stream an `llm.ContextStreamResponse` with a response format."""
601
- ...
602
-
603
- @overload
604
- def context_stream(
605
- self,
606
- *,
607
- ctx: Context[DepsT],
608
- model_id: GoogleModelId,
609
- messages: Sequence[Message],
610
- tools: Sequence[Tool | ContextTool[DepsT]]
611
- | ContextToolkit[DepsT]
612
- | None = None,
613
- format: type[FormattableT] | Format[FormattableT] | None,
614
- **params: Unpack[Params],
615
- ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
616
- """Stream an `llm.ContextStreamResponse` with an optional response format."""
617
- ...
618
-
619
- def context_stream(
620
- self,
621
- *,
622
- ctx: Context[DepsT],
623
- model_id: GoogleModelId,
624
- messages: Sequence[Message],
625
- tools: Sequence[Tool | ContextTool[DepsT]]
626
- | ContextToolkit[DepsT]
627
- | None = None,
628
- format: type[FormattableT] | Format[FormattableT] | None = None,
629
- **params: Unpack[Params],
630
- ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
631
- """Generate an `llm.ContextStreamResponse` by synchronously streaming from the Google GenAI API.
632
-
633
- Args:
634
- ctx: Context object with dependencies for tools.
635
- model_id: Model identifier to use.
636
- messages: Messages to send to the LLM.
637
- tools: Optional tools that the model may invoke.
638
- format: Optional response format specifier.
639
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
640
-
641
- Returns:
642
- An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
643
- """
644
- input_messages, format, contents, config = _utils.encode_request(
645
- model_id=model_id,
646
- messages=messages,
647
- tools=tools,
648
- format=format,
649
- params=params,
650
- )
651
-
652
- google_stream = self.client.models.generate_content_stream(
653
- model=model_id,
654
- contents=contents,
655
- config=config,
656
- )
657
-
658
- chunk_iterator = _utils.decode_stream(google_stream)
659
-
660
- return ContextStreamResponse(
661
- provider="google",
662
- model_id=model_id,
663
- params=params,
664
- tools=tools,
665
- input_messages=input_messages,
666
- chunk_iterator=chunk_iterator,
667
- format=format,
668
- )
669
-
670
- @overload
671
- async def stream_async(
672
- self,
673
- *,
674
- model_id: GoogleModelId,
675
- messages: Sequence[Message],
676
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
677
- format: None = None,
678
- **params: Unpack[Params],
679
- ) -> AsyncStreamResponse:
680
- """Stream an `llm.AsyncStreamResponse` without a response format."""
681
- ...
682
-
683
- @overload
684
- async def stream_async(
685
- self,
686
- *,
687
- model_id: GoogleModelId,
688
- messages: Sequence[Message],
689
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
690
- format: type[FormattableT] | Format[FormattableT],
691
- **params: Unpack[Params],
692
- ) -> AsyncStreamResponse[FormattableT]:
693
- """Stream an `llm.AsyncStreamResponse` with a response format."""
694
- ...
695
-
696
- @overload
697
- async def stream_async(
698
- self,
699
- *,
700
- model_id: GoogleModelId,
701
- messages: Sequence[Message],
702
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
703
- format: type[FormattableT] | Format[FormattableT] | None,
704
- **params: Unpack[Params],
705
- ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
706
- """Stream an `llm.AsyncStreamResponse` with an optional response format."""
707
- ...
708
-
709
- async def stream_async(
710
- self,
711
- *,
712
- model_id: GoogleModelId,
713
- messages: Sequence[Message],
714
- tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
715
- format: type[FormattableT] | Format[FormattableT] | None = None,
716
- **params: Unpack[Params],
717
- ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
718
- """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Google GenAI API.
719
-
720
- Args:
721
- model_id: Model identifier to use.
722
- messages: Messages to send to the LLM.
723
- tools: Optional tools that the model may invoke.
724
- format: Optional response format specifier.
725
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
726
-
727
- Returns:
728
- An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
729
- """
730
- input_messages, format, contents, config = _utils.encode_request(
731
- model_id=model_id,
732
- messages=messages,
733
- tools=tools,
734
- format=format,
735
- params=params,
736
- )
737
-
738
- google_stream = await self.client.aio.models.generate_content_stream(
739
- model=model_id,
740
- contents=contents,
741
- config=config,
742
- )
743
-
744
- chunk_iterator = _utils.decode_async_stream(google_stream)
745
-
746
- return AsyncStreamResponse(
747
- provider="google",
748
- model_id=model_id,
749
- params=params,
750
- tools=tools,
751
- input_messages=input_messages,
752
- chunk_iterator=chunk_iterator,
753
- format=format,
754
- )
755
-
756
- @overload
757
- async def context_stream_async(
758
- self,
759
- *,
760
- ctx: Context[DepsT],
761
- model_id: GoogleModelId,
762
- messages: Sequence[Message],
763
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
764
- | AsyncContextToolkit[DepsT]
765
- | None = None,
766
- format: None = None,
767
- **params: Unpack[Params],
768
- ) -> AsyncContextStreamResponse[DepsT]:
769
- """Stream an `llm.AsyncContextStreamResponse` without a response format."""
770
- ...
771
-
772
- @overload
773
- async def context_stream_async(
774
- self,
775
- *,
776
- ctx: Context[DepsT],
777
- model_id: GoogleModelId,
778
- messages: Sequence[Message],
779
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
780
- | AsyncContextToolkit[DepsT]
781
- | None = None,
782
- format: type[FormattableT] | Format[FormattableT],
783
- **params: Unpack[Params],
784
- ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
785
- """Stream an `llm.AsyncContextStreamResponse` with a response format."""
786
- ...
787
-
788
- @overload
789
- async def context_stream_async(
790
- self,
791
- *,
792
- ctx: Context[DepsT],
793
- model_id: GoogleModelId,
794
- messages: Sequence[Message],
795
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
796
- | AsyncContextToolkit[DepsT]
797
- | None = None,
798
- format: type[FormattableT] | Format[FormattableT] | None,
799
- **params: Unpack[Params],
800
- ) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
801
- """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
802
- ...
803
-
804
- async def context_stream_async(
805
- self,
806
- *,
807
- ctx: Context[DepsT],
808
- model_id: GoogleModelId,
809
- messages: Sequence[Message],
810
- tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
811
- | AsyncContextToolkit[DepsT]
812
- | None = None,
813
- format: type[FormattableT] | Format[FormattableT] | None = None,
814
- **params: Unpack[Params],
815
- ) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
816
- """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Google GenAI API.
817
-
818
- Args:
819
- ctx: Context object with dependencies for tools.
820
- model_id: Model identifier to use.
821
- messages: Messages to send to the LLM.
822
- tools: Optional tools that the model may invoke.
823
- format: Optional response format specifier.
824
- **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
825
-
826
- Returns:
827
- An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
828
- """
829
- input_messages, format, contents, config = _utils.encode_request(
830
- model_id=model_id,
831
- messages=messages,
832
- tools=tools,
833
- format=format,
834
- params=params,
835
- )
836
-
837
- google_stream = await self.client.aio.models.generate_content_stream(
838
- model=model_id,
839
- contents=contents,
840
- config=config,
841
- )
842
-
843
- chunk_iterator = _utils.decode_async_stream(google_stream)
844
-
845
- return AsyncContextStreamResponse(
846
- provider="google",
847
- model_id=model_id,
848
- params=params,
849
- tools=tools,
850
- input_messages=input_messages,
851
- chunk_iterator=chunk_iterator,
852
- format=format,
853
- )