mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -5,6 +5,7 @@ from typing import Generic, overload
5
5
 
6
6
  from ..context import Context, DepsT
7
7
  from ..formatting import FormattableT
8
+ from ..models import Model, use_model
8
9
  from ..prompts import (
9
10
  AsyncContextPrompt,
10
11
  AsyncPrompt,
@@ -21,19 +22,37 @@ from ..responses import (
21
22
  Response,
22
23
  StreamResponse,
23
24
  )
24
- from ..tools import (
25
- AsyncContextToolkit,
26
- AsyncToolkit,
27
- ContextToolkit,
28
- Toolkit,
29
- )
30
25
  from ..types import P
31
- from .base_call import BaseCall
32
26
 
33
27
 
34
28
  @dataclass
35
- class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT]):
36
- """A class for generating responses using LLMs."""
29
+ class BaseCall:
30
+ """Base class for all Call types with shared model functionality."""
31
+
32
+ default_model: Model
33
+ """The default model that will be used if no model is set in context."""
34
+
35
+ @property
36
+ def model(self) -> Model:
37
+ """The model used for generating responses. May be overwritten via `with llm.model(...)`."""
38
+ return use_model(self.default_model)
39
+
40
+
41
+ @dataclass
42
+ class Call(BaseCall, Generic[P, FormattableT]):
43
+ """A call that directly generates LLM responses without requiring a model argument.
44
+
45
+ Created by decorating a `MessageTemplate` with `llm.call`. The decorated function
46
+ becomes directly callable to generate responses, with the `Model` bundled in.
47
+
48
+ A `Call` is essentially: `MessageTemplate` + tools + format + `Model`.
49
+ It can be invoked directly: `call(*args, **kwargs)` (no model argument needed).
50
+
51
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
52
+ """
53
+
54
+ prompt: Prompt[P, FormattableT]
55
+ """The underlying Prompt instance that generates messages with tools and format."""
37
56
 
38
57
  @overload
39
58
  def __call__(
@@ -63,10 +82,7 @@ class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT])
63
82
  self, *args: P.args, **kwargs: P.kwargs
64
83
  ) -> Response | Response[FormattableT]:
65
84
  """Generates a response using the LLM."""
66
- messages = self.fn(*args, **kwargs)
67
- return self.model.call(
68
- messages=messages, tools=self.toolkit, format=self.format
69
- )
85
+ return self.prompt.call(self.model, *args, **kwargs)
70
86
 
71
87
  @overload
72
88
  def stream(
@@ -82,18 +98,24 @@ class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT])
82
98
  self, *args: P.args, **kwargs: P.kwargs
83
99
  ) -> StreamResponse | StreamResponse[FormattableT]:
84
100
  """Generates a streaming response using the LLM."""
85
- messages = self.fn(*args, **kwargs)
86
- return self.model.stream(
87
- messages=messages, tools=self.toolkit, format=self.format
88
- )
101
+ return self.prompt.stream(self.model, *args, **kwargs)
89
102
 
90
103
 
91
104
  @dataclass
92
- class AsyncCall(
93
- BaseCall[P, AsyncPrompt, AsyncToolkit, FormattableT],
94
- Generic[P, FormattableT],
95
- ):
96
- """A class for generating responses using LLMs asynchronously."""
105
+ class AsyncCall(BaseCall, Generic[P, FormattableT]):
106
+ """An async call that directly generates LLM responses without requiring a model argument.
107
+
108
+ Created by decorating an async `MessageTemplate` with `llm.call`. The decorated async
109
+ function becomes directly callable to generate responses asynchronously, with the `Model` bundled in.
110
+
111
+ An `AsyncCall` is essentially: async `MessageTemplate` + tools + format + `Model`.
112
+ It can be invoked directly: `await call(*args, **kwargs)` (no model argument needed).
113
+
114
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
115
+ """
116
+
117
+ prompt: AsyncPrompt[P, FormattableT]
118
+ """The underlying AsyncPrompt instance that generates messages with tools and format."""
97
119
 
98
120
  @overload
99
121
  async def __call__(
@@ -108,7 +130,7 @@ class AsyncCall(
108
130
  async def __call__(
109
131
  self, *args: P.args, **kwargs: P.kwargs
110
132
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
111
- """Generates a Asyncresponse using the LLM asynchronously."""
133
+ """Generates a response using the LLM asynchronously."""
112
134
  return await self.call(*args, **kwargs)
113
135
 
114
136
  @overload
@@ -125,10 +147,7 @@ class AsyncCall(
125
147
  self, *args: P.args, **kwargs: P.kwargs
126
148
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
127
149
  """Generates a response using the LLM asynchronously."""
128
- messages = await self.fn(*args, **kwargs)
129
- return await self.model.call_async(
130
- messages=messages, tools=self.toolkit, format=self.format
131
- )
150
+ return await self.prompt.call(self.model, *args, **kwargs)
132
151
 
133
152
  @overload
134
153
  async def stream(
@@ -144,18 +163,25 @@ class AsyncCall(
144
163
  self, *args: P.args, **kwargs: P.kwargs
145
164
  ) -> AsyncStreamResponse[FormattableT] | AsyncStreamResponse:
146
165
  """Generates a streaming response using the LLM asynchronously."""
147
- messages = await self.fn(*args, **kwargs)
148
- return await self.model.stream_async(
149
- messages=messages, tools=self.toolkit, format=self.format
150
- )
166
+ return await self.prompt.stream(self.model, *args, **kwargs)
151
167
 
152
168
 
153
169
  @dataclass
154
- class ContextCall(
155
- BaseCall[P, ContextPrompt, ContextToolkit[DepsT], FormattableT],
156
- Generic[P, DepsT, FormattableT],
157
- ):
158
- """A class for generating responses using LLMs."""
170
+ class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
171
+ """A context-aware call that directly generates LLM responses without requiring a model argument.
172
+
173
+ Created by decorating a `ContextMessageTemplate` with `llm.call`. The decorated function
174
+ (with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
175
+ responses with context dependencies, with the `Model` bundled in.
176
+
177
+ A `ContextCall` is essentially: `ContextMessageTemplate` + tools + format + `Model`.
178
+ It can be invoked directly: `call(ctx, *args, **kwargs)` (no model argument needed).
179
+
180
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
181
+ """
182
+
183
+ prompt: ContextPrompt[P, DepsT, FormattableT]
184
+ """The underlying ContextPrompt instance that generates messages with tools and format."""
159
185
 
160
186
  @overload
161
187
  def __call__(
@@ -199,10 +225,7 @@ class ContextCall(
199
225
  self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
200
226
  ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
201
227
  """Generates a response using the LLM."""
202
- messages = self.fn(ctx, *args, **kwargs)
203
- return self.model.context_call(
204
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
205
- )
228
+ return self.prompt.call(self.model, ctx, *args, **kwargs)
206
229
 
207
230
  @overload
208
231
  def stream(
@@ -226,18 +249,25 @@ class ContextCall(
226
249
  ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
227
250
  ):
228
251
  """Generates a streaming response using the LLM."""
229
- messages = self.fn(ctx, *args, **kwargs)
230
- return self.model.context_stream(
231
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
232
- )
252
+ return self.prompt.stream(self.model, ctx, *args, **kwargs)
233
253
 
234
254
 
235
255
  @dataclass
236
- class AsyncContextCall(
237
- BaseCall[P, AsyncContextPrompt, AsyncContextToolkit[DepsT], FormattableT],
238
- Generic[P, DepsT, FormattableT],
239
- ):
240
- """A class for generating responses using LLMs asynchronously."""
256
+ class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
257
+ """An async context-aware call that directly generates LLM responses without requiring a model argument.
258
+
259
+ Created by decorating an async `ContextMessageTemplate` with `llm.call`. The decorated async
260
+ function (with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
261
+ responses asynchronously with context dependencies, with the `Model` bundled in.
262
+
263
+ An `AsyncContextCall` is essentially: async `ContextMessageTemplate` + tools + format + `Model`.
264
+ It can be invoked directly: `await call(ctx, *args, **kwargs)` (no model argument needed).
265
+
266
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
267
+ """
268
+
269
+ prompt: AsyncContextPrompt[P, DepsT, FormattableT]
270
+ """The underlying AsyncContextPrompt instance that generates messages with tools and format."""
241
271
 
242
272
  @overload
243
273
  async def __call__(
@@ -281,10 +311,7 @@ class AsyncContextCall(
281
311
  self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
282
312
  ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
283
313
  """Generates a response using the LLM asynchronously."""
284
- messages = await self.fn(ctx, *args, **kwargs)
285
- return await self.model.context_call_async(
286
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
287
- )
314
+ return await self.prompt.call(self.model, ctx, *args, **kwargs)
288
315
 
289
316
  @overload
290
317
  async def stream(
@@ -309,7 +336,4 @@ class AsyncContextCall(
309
336
  | AsyncContextStreamResponse[DepsT, FormattableT]
310
337
  ):
311
338
  """Generates a streaming response using the LLM asynchronously."""
312
- messages = await self.fn(ctx, *args, **kwargs)
313
- return await self.model.context_stream_async(
314
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
315
- )
339
+ return await self.prompt.stream(self.model, ctx, *args, **kwargs)
@@ -4,29 +4,24 @@ from __future__ import annotations
4
4
 
5
5
  from collections.abc import Sequence
6
6
  from dataclasses import dataclass
7
- from typing import Generic, Literal, cast, overload
7
+ from typing import Generic, cast, overload
8
8
  from typing_extensions import Unpack
9
9
 
10
- from ..clients import (
11
- AnthropicModelId,
12
- GoogleModelId,
13
- ModelId,
14
- OpenAICompletionsModelId,
15
- OpenAIResponsesModelId,
16
- Params,
17
- Provider,
18
- )
19
10
  from ..context import DepsT
20
11
  from ..formatting import Format, FormattableT
21
12
  from ..models import Model
22
13
  from ..prompts import (
23
- AsyncContextPromptable,
24
- AsyncPromptable,
25
- ContextPromptable,
26
- Promptable,
27
- _utils as _prompt_utils,
28
- prompt,
14
+ AsyncContextMessageTemplate,
15
+ AsyncContextPrompt,
16
+ AsyncMessageTemplate,
17
+ AsyncPrompt,
18
+ ContextMessageTemplate,
19
+ ContextPrompt,
20
+ MessageTemplate,
21
+ Prompt,
22
+ _utils,
29
23
  )
24
+ from ..providers import ModelId, Params
30
25
  from ..tools import (
31
26
  AsyncContextTool,
32
27
  AsyncContextToolkit,
@@ -44,16 +39,32 @@ from .calls import AsyncCall, AsyncContextCall, Call, ContextCall
44
39
 
45
40
  @dataclass(kw_only=True)
46
41
  class CallDecorator(Generic[ToolT, FormattableT]):
47
- """A decorator for converting prompts to calls."""
42
+ """Decorator for converting a `MessageTemplate` into a `Call`.
43
+
44
+ Takes a raw prompt function that returns message content and wraps it with tools,
45
+ format, and a model to create a `Call` that can be invoked directly without needing
46
+ to pass a model argument.
47
+
48
+ The decorator automatically detects whether the function is async or context-aware
49
+ and creates the appropriate `Call` variant (`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`).
50
+
51
+ Conceptually: `CallDecorator` = `PromptDecorator` + `Model`
52
+ Result: `Call` = `MessageTemplate` + tools + format + `Model`
53
+ """
48
54
 
49
55
  model: Model
56
+ """The default model to use with this call. May be overridden."""
57
+
50
58
  tools: Sequence[ToolT] | None
59
+ """The tools that are included in the prompt, if any."""
60
+
51
61
  format: type[FormattableT] | Format[FormattableT] | None
62
+ """The structured output format off the prompt, if any."""
52
63
 
53
64
  @overload
54
65
  def __call__(
55
66
  self: CallDecorator[AsyncTool | AsyncContextTool[DepsT], FormattableT],
56
- fn: AsyncContextPromptable[P, DepsT],
67
+ fn: AsyncContextMessageTemplate[P, DepsT],
57
68
  ) -> AsyncContextCall[P, DepsT, FormattableT]:
58
69
  """Decorate an async context prompt into an AsyncContextCall."""
59
70
  ...
@@ -61,31 +72,31 @@ class CallDecorator(Generic[ToolT, FormattableT]):
61
72
  @overload
62
73
  def __call__(
63
74
  self: CallDecorator[Tool | ContextTool[DepsT], FormattableT],
64
- fn: ContextPromptable[P, DepsT],
75
+ fn: ContextMessageTemplate[P, DepsT],
65
76
  ) -> ContextCall[P, DepsT, FormattableT]:
66
77
  """Decorate a context prompt into a ContextCall."""
67
78
  ...
68
79
 
69
80
  @overload
70
81
  def __call__(
71
- self: CallDecorator[AsyncTool, FormattableT], fn: AsyncPromptable[P]
82
+ self: CallDecorator[AsyncTool, FormattableT], fn: AsyncMessageTemplate[P]
72
83
  ) -> AsyncCall[P, FormattableT]:
73
84
  """Decorate an async prompt into an AsyncCall."""
74
85
  ...
75
86
 
76
87
  @overload
77
88
  def __call__(
78
- self: CallDecorator[Tool, FormattableT], fn: Promptable[P]
89
+ self: CallDecorator[Tool, FormattableT], fn: MessageTemplate[P]
79
90
  ) -> Call[P, FormattableT]:
80
91
  """Decorate a prompt into a Call."""
81
92
  ...
82
93
 
83
94
  def __call__(
84
95
  self,
85
- fn: ContextPromptable[P, DepsT]
86
- | AsyncContextPromptable[P, DepsT]
87
- | Promptable[P]
88
- | AsyncPromptable[P],
96
+ fn: ContextMessageTemplate[P, DepsT]
97
+ | AsyncContextMessageTemplate[P, DepsT]
98
+ | MessageTemplate[P]
99
+ | AsyncMessageTemplate[P],
89
100
  ) -> (
90
101
  ContextCall[P, DepsT, FormattableT]
91
102
  | AsyncContextCall[P, DepsT, FormattableT]
@@ -93,123 +104,122 @@ class CallDecorator(Generic[ToolT, FormattableT]):
93
104
  | AsyncCall[P, FormattableT]
94
105
  ):
95
106
  """Decorates a prompt into a Call or ContextCall."""
96
- is_context = _prompt_utils.is_context_promptable(fn)
97
- is_async = _prompt_utils.is_async_promptable(fn)
107
+ is_context = _utils.is_context_promptable(fn)
108
+ is_async = _utils.is_async_promptable(fn)
98
109
 
99
110
  if is_context and is_async:
100
111
  tools = cast(
101
112
  Sequence[AsyncTool | AsyncContextTool[DepsT]] | None, self.tools
102
113
  )
114
+ prompt = AsyncContextPrompt(
115
+ fn=fn,
116
+ toolkit=AsyncContextToolkit(tools=tools),
117
+ format=self.format,
118
+ )
103
119
  return AsyncContextCall(
104
- fn=prompt(fn),
120
+ prompt=prompt,
105
121
  default_model=self.model,
106
- format=self.format,
107
- toolkit=AsyncContextToolkit(tools=tools),
108
122
  )
109
123
  elif is_context:
110
124
  tools = cast(Sequence[Tool | ContextTool[DepsT]] | None, self.tools)
125
+ prompt = ContextPrompt(
126
+ fn=fn,
127
+ toolkit=ContextToolkit(tools=tools),
128
+ format=self.format,
129
+ )
111
130
  return ContextCall(
112
- fn=prompt(fn),
131
+ prompt=prompt,
113
132
  default_model=self.model,
114
- format=self.format,
115
- toolkit=ContextToolkit(tools=tools),
116
133
  )
117
134
  elif is_async:
118
135
  tools = cast(Sequence[AsyncTool] | None, self.tools)
136
+ prompt = AsyncPrompt(
137
+ fn=fn, toolkit=AsyncToolkit(tools=tools), format=self.format
138
+ )
119
139
  return AsyncCall(
120
- fn=prompt(fn),
140
+ prompt=prompt,
121
141
  default_model=self.model,
122
- format=self.format,
123
- toolkit=AsyncToolkit(tools=tools),
124
142
  )
125
143
  else:
126
144
  tools = cast(Sequence[Tool] | None, self.tools)
145
+ prompt = Prompt(fn=fn, toolkit=Toolkit(tools=tools), format=self.format)
127
146
  return Call(
128
- fn=prompt(fn),
147
+ prompt=prompt,
129
148
  default_model=self.model,
130
- format=self.format,
131
- toolkit=Toolkit(tools=tools),
132
149
  )
133
150
 
134
151
 
135
152
  @overload
136
153
  def call(
154
+ model: ModelId,
137
155
  *,
138
- provider: Literal["anthropic"],
139
- model_id: AnthropicModelId,
140
- tools: list[ToolT] | None = None,
156
+ tools: Sequence[ToolT] | None = None,
141
157
  format: type[FormattableT] | Format[FormattableT] | None = None,
142
158
  **params: Unpack[Params],
143
159
  ) -> CallDecorator[ToolT, FormattableT]:
144
- """Decorate a prompt into a Call using Anthropic models."""
145
- ...
146
-
160
+ """Decorator for converting prompt functions into LLM calls.
147
161
 
148
- @overload
149
- def call(
150
- *,
151
- provider: Literal["google"],
152
- model_id: GoogleModelId,
153
- tools: list[ToolT] | None = None,
154
- format: type[FormattableT] | Format[FormattableT] | None = None,
155
- **params: Unpack[Params],
156
- ) -> CallDecorator[ToolT, FormattableT]:
157
- """Decorate a prompt into a Call using Google models."""
162
+ This overload accepts a model ID string and allows additional params.
163
+ """
158
164
  ...
159
165
 
160
166
 
161
167
  @overload
162
168
  def call(
169
+ model: Model,
163
170
  *,
164
- provider: Literal["openai:completions"],
165
- model_id: OpenAICompletionsModelId,
166
- tools: list[ToolT] | None = None,
171
+ tools: Sequence[ToolT] | None = None,
167
172
  format: type[FormattableT] | Format[FormattableT] | None = None,
168
- **params: Unpack[Params],
169
173
  ) -> CallDecorator[ToolT, FormattableT]:
170
- """Decorate a prompt into a Call using OpenAI models."""
171
- ...
172
-
174
+ """Decorator for converting prompt functions into LLM calls.
173
175
 
174
- @overload
175
- def call(
176
- *,
177
- provider: Literal["openai:responses", "openai"],
178
- model_id: OpenAIResponsesModelId,
179
- tools: list[ToolT] | None = None,
180
- format: type[FormattableT] | Format[FormattableT] | None = None,
181
- **params: Unpack[Params],
182
- ) -> CallDecorator[ToolT, FormattableT]:
183
- """Decorate a prompt into a Call using OpenAI models (Responses API)."""
184
- ...
185
-
186
-
187
- @overload
188
- def call(
189
- *,
190
- provider: Provider,
191
- model_id: ModelId,
192
- tools: list[ToolT] | None = None,
193
- format: type[FormattableT] | Format[FormattableT] | None = None,
194
- **params: Unpack[Params],
195
- ) -> CallDecorator[ToolT, FormattableT]:
196
- """Decorate a prompt into a Call using a generic provider and model."""
176
+ This overload accepts a Model instance and does not allow additional params.
177
+ """
197
178
  ...
198
179
 
199
180
 
200
181
  def call(
182
+ model: ModelId | Model,
201
183
  *,
202
- provider: Provider,
203
- model_id: ModelId,
204
- tools: list[ToolT] | None = None,
184
+ tools: Sequence[ToolT] | None = None,
205
185
  format: type[FormattableT] | Format[FormattableT] | None = None,
206
186
  **params: Unpack[Params],
207
187
  ) -> CallDecorator[ToolT, FormattableT]:
208
- """Returns a decorator for turning prompt template functions into generations.
209
-
210
- This decorator creates a `Call` or `ContextCall` that can be used with prompt functions.
211
- If the first parameter is typed as `llm.Context[T]`, it creates a ContextCall.
212
- Otherwise, it creates a regular Call.
188
+ """Decorates a `MessageTemplate` to create a `Call` that can be invoked directly.
189
+
190
+ The `llm.call` decorator is the most convenient way to use Mirascope. It transforms
191
+ a raw prompt function (that returns message content) into a `Call` object that bundles
192
+ the function with tools, format, and a model. The resulting `Call` can be invoked
193
+ directly to generate LLM responses without needing to pass a model argument.
194
+
195
+ The decorator automatically detects the function type:
196
+ - If the first parameter is named `'ctx'` with type `llm.Context[T]` (or a subclass thereof),
197
+ creates a `ContextCall`
198
+ - If the function is async, creates an `AsyncCall` or `AsyncContextCall`
199
+ - Otherwise, creates a regular `Call`
200
+
201
+ The model specified in the decorator can be overridden at runtime using the
202
+ `llm.model()` context manager. When overridden, the context model completely
203
+ replaces the decorated model, including all parameters.
204
+
205
+ Conceptual flow:
206
+ - `MessageTemplate`: raw function returning content
207
+ - `@llm.prompt`: `MessageTemplate` → `Prompt`
208
+ Includes tools and format, if applicable. Can be called by providing a `Model`.
209
+ - `@llm.call`: `MessageTemplate` → `Call`. Includes a model, tools, and format. The
210
+ model may be created on the fly from a model identifier and optional params, or
211
+ provided outright.
212
+
213
+ Args:
214
+ model: A model ID string (e.g., "openai/gpt-4") or a `Model` instance
215
+ tools: Optional `Sequence` of tools to make available to the LLM
216
+ format: Optional response format class (`BaseModel`) or Format instance
217
+ **params: Additional call parameters (temperature, max_tokens, etc.)
218
+ Only available when passing a model ID string
219
+
220
+ Returns:
221
+ A `CallDecorator` that converts prompt functions into `Call` variants
222
+ (`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`)
213
223
 
214
224
  Example:
215
225
 
@@ -217,15 +227,12 @@ def call(
217
227
  ```python
218
228
  from mirascope import llm
219
229
 
220
- @llm.call(
221
- provider="openai:completions",
222
- model_id="gpt-4o-mini",
223
- )
224
- def answer_question(question: str) -> str:
225
- return f"Answer this question: {question}"
230
+ @llm.call("openai/gpt-4")
231
+ def recommend_book(genre: str):
232
+ return f"Please recommend a book in {genre}."
226
233
 
227
- response: llm.Response = answer_question("What is the capital of France?")
228
- print(response)
234
+ response: llm.Response = recommend_book("fantasy")
235
+ print(response.pretty())
229
236
  ```
230
237
 
231
238
  Example:
@@ -236,20 +243,19 @@ def call(
236
243
  from mirascope import llm
237
244
 
238
245
  @dataclass
239
- class Personality:
240
- vibe: str
241
-
242
- @llm.call(
243
- provider="openai:completions",
244
- model_id="gpt-4o-mini",
245
- )
246
- def answer_question(ctx: llm.Context[Personality], question: str) -> str:
247
- return f"Your vibe is {ctx.deps.vibe}. Answer this question: {question}"
248
-
249
- ctx = llm.Context(deps=Personality(vibe="snarky"))
250
- response = answer_question(ctx, "What is the capital of France?")
251
- print(response)
246
+ class User:
247
+ name: str
248
+ age: int
249
+
250
+ @llm.call("openai/gpt-4")
251
+ def recommend_book(ctx: llm.Context[User], genre: str):
252
+ return f"Recommend a {genre} book for {ctx.deps.name}, age {ctx.deps.age}."
253
+
254
+ ctx = llm.Context(deps=User(name="Alice", age=15))
255
+ response = recommend_book(ctx, "fantasy")
256
+ print(response.pretty())
252
257
  ```
253
258
  """
254
- model = Model(provider=provider, model_id=model_id, **params)
259
+ if isinstance(model, str):
260
+ model = Model(model, **params)
255
261
  return CallDecorator(model=model, tools=tools, format=format)
@@ -2,6 +2,7 @@
2
2
 
3
3
  from typing import TypeAlias
4
4
 
5
+ from ..types import Jsonable
5
6
  from .audio import Audio, Base64AudioSource
6
7
  from .document import (
7
8
  Base64DocumentSource,
@@ -16,11 +17,11 @@ from .tool_call import ToolCall, ToolCallChunk, ToolCallEndChunk, ToolCallStartC
16
17
  from .tool_output import ToolOutput
17
18
 
18
19
  ContentPart: TypeAlias = (
19
- Text | Image | Audio | Document | ToolOutput | ToolCall | Thought
20
+ Text | Image | Audio | Document | ToolOutput[Jsonable] | ToolCall | Thought
20
21
  )
21
22
  """Content parts that may be included in a Message."""
22
23
 
23
- UserContentPart: TypeAlias = Text | Image | Audio | Document | ToolOutput
24
+ UserContentPart: TypeAlias = Text | Image | Audio | Document | ToolOutput[Jsonable]
24
25
  """Content parts that can be included in a UserMessage."""
25
26
 
26
27
  AssistantContentPart: TypeAlias = Text | ToolCall | Thought
@@ -1,11 +1,12 @@
1
1
  import inspect
2
+ import typing
2
3
  from collections.abc import Callable
3
- from typing import get_origin
4
+ from typing import Any, get_origin
4
5
 
5
6
  from .context import Context
6
7
 
7
8
 
8
- def first_param_is_context(fn: Callable) -> bool:
9
+ def first_param_is_context(fn: Callable[..., Any]) -> bool:
9
10
  """Returns whether the first argument to a function is `ctx: Context`.
10
11
 
11
12
  Also returns true if the first argument is a subclass of `Context`.
@@ -21,8 +22,20 @@ def first_param_is_context(fn: Callable) -> bool:
21
22
  else:
22
23
  first_param = params[0]
23
24
 
24
- type_is_context = get_origin(first_param.annotation) is Context
25
- subclass_of_context = isinstance(first_param.annotation, type) and issubclass(
26
- first_param.annotation, Context
25
+ if first_param.name != "ctx":
26
+ return False
27
+
28
+ try:
29
+ hints = typing.get_type_hints(fn)
30
+ annotation = hints.get(first_param.name)
31
+ except (NameError, AttributeError, TypeError):
32
+ annotation = first_param.annotation
33
+
34
+ if annotation is None or annotation is inspect.Parameter.empty:
35
+ return False
36
+
37
+ type_is_context = get_origin(annotation) is Context
38
+ subclass_of_context = isinstance(annotation, type) and issubclass(
39
+ annotation, Context
27
40
  )
28
- return first_param.name == "ctx" and (type_is_context or subclass_of_context)
41
+ return type_is_context or subclass_of_context