mirascope 2.0.0a6__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (226) hide show
  1. mirascope/api/_generated/__init__.py +186 -5
  2. mirascope/api/_generated/annotations/client.py +38 -6
  3. mirascope/api/_generated/annotations/raw_client.py +366 -47
  4. mirascope/api/_generated/annotations/types/annotations_create_response.py +19 -6
  5. mirascope/api/_generated/annotations/types/annotations_get_response.py +19 -6
  6. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +22 -7
  7. mirascope/api/_generated/annotations/types/annotations_update_response.py +19 -6
  8. mirascope/api/_generated/api_keys/__init__.py +12 -2
  9. mirascope/api/_generated/api_keys/client.py +107 -6
  10. mirascope/api/_generated/api_keys/raw_client.py +486 -38
  11. mirascope/api/_generated/api_keys/types/__init__.py +7 -1
  12. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
  13. mirascope/api/_generated/client.py +36 -0
  14. mirascope/api/_generated/docs/raw_client.py +71 -9
  15. mirascope/api/_generated/environment.py +3 -3
  16. mirascope/api/_generated/environments/__init__.py +6 -0
  17. mirascope/api/_generated/environments/client.py +158 -9
  18. mirascope/api/_generated/environments/raw_client.py +620 -52
  19. mirascope/api/_generated/environments/types/__init__.py +10 -0
  20. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
  21. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
  22. mirascope/api/_generated/{organizations/types/organizations_credits_response.py → environments/types/environments_get_analytics_response_top_models_item.py} +6 -3
  23. mirascope/api/_generated/errors/__init__.py +6 -0
  24. mirascope/api/_generated/errors/bad_request_error.py +5 -2
  25. mirascope/api/_generated/errors/conflict_error.py +5 -2
  26. mirascope/api/_generated/errors/payment_required_error.py +15 -0
  27. mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
  28. mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
  29. mirascope/api/_generated/functions/__init__.py +10 -0
  30. mirascope/api/_generated/functions/client.py +222 -8
  31. mirascope/api/_generated/functions/raw_client.py +975 -134
  32. mirascope/api/_generated/functions/types/__init__.py +28 -4
  33. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
  34. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
  35. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
  36. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
  37. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
  38. mirascope/api/_generated/health/raw_client.py +74 -10
  39. mirascope/api/_generated/organization_invitations/__init__.py +33 -0
  40. mirascope/api/_generated/organization_invitations/client.py +546 -0
  41. mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
  42. mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
  43. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
  44. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
  45. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
  46. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
  47. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
  48. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
  49. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
  50. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
  51. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
  52. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
  53. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
  54. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
  55. mirascope/api/_generated/organization_memberships/__init__.py +19 -0
  56. mirascope/api/_generated/organization_memberships/client.py +302 -0
  57. mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
  58. mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
  59. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
  60. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
  61. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
  62. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
  63. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
  64. mirascope/api/_generated/organizations/__init__.py +26 -2
  65. mirascope/api/_generated/organizations/client.py +442 -20
  66. mirascope/api/_generated/organizations/raw_client.py +1763 -164
  67. mirascope/api/_generated/organizations/types/__init__.py +48 -2
  68. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
  69. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
  70. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
  71. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
  72. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
  73. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
  74. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
  75. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
  76. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
  77. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
  78. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
  79. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
  80. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
  81. mirascope/api/_generated/project_memberships/__init__.py +25 -0
  82. mirascope/api/_generated/project_memberships/client.py +437 -0
  83. mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
  84. mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
  85. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
  86. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
  87. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
  88. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
  89. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
  90. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
  91. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
  92. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
  93. mirascope/api/_generated/projects/raw_client.py +415 -58
  94. mirascope/api/_generated/reference.md +2767 -397
  95. mirascope/api/_generated/tags/__init__.py +19 -0
  96. mirascope/api/_generated/tags/client.py +504 -0
  97. mirascope/api/_generated/tags/raw_client.py +1288 -0
  98. mirascope/api/_generated/tags/types/__init__.py +17 -0
  99. mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
  100. mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
  101. mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
  102. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
  103. mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
  104. mirascope/api/_generated/token_cost/__init__.py +7 -0
  105. mirascope/api/_generated/token_cost/client.py +160 -0
  106. mirascope/api/_generated/token_cost/raw_client.py +264 -0
  107. mirascope/api/_generated/token_cost/types/__init__.py +8 -0
  108. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
  109. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
  110. mirascope/api/_generated/traces/__init__.py +20 -0
  111. mirascope/api/_generated/traces/client.py +543 -0
  112. mirascope/api/_generated/traces/raw_client.py +1366 -96
  113. mirascope/api/_generated/traces/types/__init__.py +28 -0
  114. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +6 -0
  115. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
  116. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
  117. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -2
  118. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
  119. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
  120. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
  121. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
  122. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
  123. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
  124. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
  125. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
  126. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +10 -1
  127. mirascope/api/_generated/types/__init__.py +32 -2
  128. mirascope/api/_generated/types/bad_request_error_body.py +50 -0
  129. mirascope/api/_generated/types/date.py +3 -0
  130. mirascope/api/_generated/types/immutable_resource_error.py +22 -0
  131. mirascope/api/_generated/types/internal_server_error_body.py +3 -3
  132. mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
  133. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
  134. mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
  135. mirascope/api/_generated/types/rate_limit_error.py +31 -0
  136. mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
  137. mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
  138. mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
  139. mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
  140. mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
  141. mirascope/api/settings.py +19 -1
  142. mirascope/llm/__init__.py +53 -10
  143. mirascope/llm/calls/__init__.py +2 -1
  144. mirascope/llm/calls/calls.py +3 -1
  145. mirascope/llm/calls/decorator.py +21 -7
  146. mirascope/llm/content/tool_output.py +22 -5
  147. mirascope/llm/exceptions.py +284 -71
  148. mirascope/llm/formatting/__init__.py +17 -0
  149. mirascope/llm/formatting/format.py +112 -35
  150. mirascope/llm/formatting/output_parser.py +178 -0
  151. mirascope/llm/formatting/partial.py +80 -7
  152. mirascope/llm/formatting/primitives.py +192 -0
  153. mirascope/llm/formatting/types.py +20 -8
  154. mirascope/llm/messages/__init__.py +3 -0
  155. mirascope/llm/messages/_utils.py +34 -0
  156. mirascope/llm/models/__init__.py +5 -0
  157. mirascope/llm/models/models.py +137 -69
  158. mirascope/llm/{providers/base → models}/params.py +7 -57
  159. mirascope/llm/models/thinking_config.py +61 -0
  160. mirascope/llm/prompts/_utils.py +0 -32
  161. mirascope/llm/prompts/decorator.py +16 -5
  162. mirascope/llm/prompts/prompts.py +131 -68
  163. mirascope/llm/providers/__init__.py +1 -4
  164. mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
  165. mirascope/llm/providers/anthropic/_utils/beta_decode.py +18 -9
  166. mirascope/llm/providers/anthropic/_utils/beta_encode.py +62 -13
  167. mirascope/llm/providers/anthropic/_utils/decode.py +18 -9
  168. mirascope/llm/providers/anthropic/_utils/encode.py +26 -7
  169. mirascope/llm/providers/anthropic/_utils/errors.py +2 -2
  170. mirascope/llm/providers/anthropic/beta_provider.py +64 -18
  171. mirascope/llm/providers/anthropic/provider.py +91 -33
  172. mirascope/llm/providers/base/__init__.py +0 -4
  173. mirascope/llm/providers/base/_utils.py +55 -6
  174. mirascope/llm/providers/base/base_provider.py +116 -37
  175. mirascope/llm/providers/google/_utils/__init__.py +2 -0
  176. mirascope/llm/providers/google/_utils/decode.py +20 -7
  177. mirascope/llm/providers/google/_utils/encode.py +26 -7
  178. mirascope/llm/providers/google/_utils/errors.py +3 -2
  179. mirascope/llm/providers/google/provider.py +64 -18
  180. mirascope/llm/providers/mirascope/_utils.py +13 -17
  181. mirascope/llm/providers/mirascope/provider.py +49 -18
  182. mirascope/llm/providers/mlx/_utils.py +7 -2
  183. mirascope/llm/providers/mlx/encoding/base.py +5 -2
  184. mirascope/llm/providers/mlx/encoding/transformers.py +5 -2
  185. mirascope/llm/providers/mlx/mlx.py +23 -6
  186. mirascope/llm/providers/mlx/provider.py +42 -13
  187. mirascope/llm/providers/openai/_utils/errors.py +2 -2
  188. mirascope/llm/providers/openai/completions/_utils/encode.py +20 -16
  189. mirascope/llm/providers/openai/completions/base_provider.py +40 -11
  190. mirascope/llm/providers/openai/provider.py +40 -10
  191. mirascope/llm/providers/openai/responses/_utils/__init__.py +2 -0
  192. mirascope/llm/providers/openai/responses/_utils/decode.py +19 -6
  193. mirascope/llm/providers/openai/responses/_utils/encode.py +22 -10
  194. mirascope/llm/providers/openai/responses/provider.py +56 -18
  195. mirascope/llm/providers/provider_registry.py +93 -19
  196. mirascope/llm/responses/__init__.py +6 -1
  197. mirascope/llm/responses/_utils.py +102 -12
  198. mirascope/llm/responses/base_response.py +5 -2
  199. mirascope/llm/responses/base_stream_response.py +115 -25
  200. mirascope/llm/responses/response.py +2 -1
  201. mirascope/llm/responses/root_response.py +89 -17
  202. mirascope/llm/responses/stream_response.py +6 -9
  203. mirascope/llm/tools/decorator.py +9 -4
  204. mirascope/llm/tools/tool_schema.py +12 -6
  205. mirascope/llm/tools/toolkit.py +35 -27
  206. mirascope/llm/tools/tools.py +45 -20
  207. mirascope/ops/__init__.py +4 -0
  208. mirascope/ops/_internal/configuration.py +82 -31
  209. mirascope/ops/_internal/exporters/exporters.py +64 -11
  210. mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
  211. mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
  212. mirascope/ops/_internal/instrumentation/llm/encode.py +1 -1
  213. mirascope/ops/_internal/instrumentation/llm/llm.py +116 -1242
  214. mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
  215. mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
  216. mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
  217. mirascope/ops/_internal/protocols.py +83 -1
  218. mirascope/ops/_internal/traced_calls.py +4 -0
  219. mirascope/ops/_internal/traced_functions.py +118 -8
  220. mirascope/ops/_internal/tracing.py +78 -1
  221. mirascope/ops/_internal/utils.py +52 -4
  222. {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/METADATA +12 -11
  223. mirascope-2.0.1.dist-info/RECORD +423 -0
  224. {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/licenses/LICENSE +1 -1
  225. mirascope-2.0.0a6.dist-info/RECORD +0 -316
  226. {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1798 @@
1
+ """OpenTelemetry GenAI instrumentation for `mirascope.llm.Model` methods."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import weakref
6
+ from collections.abc import AsyncIterator, Iterator, Sequence
7
+ from contextlib import AbstractContextManager
8
+ from functools import wraps
9
+ from types import TracebackType
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ cast,
14
+ overload,
15
+ )
16
+
17
+ from opentelemetry import trace as otel_trace
18
+
19
+ from .....llm import (
20
+ AsyncContextResponse,
21
+ AsyncContextStreamResponse,
22
+ AsyncContextTool,
23
+ AsyncContextToolkit,
24
+ AsyncResponse,
25
+ AsyncStreamResponse,
26
+ AsyncTool,
27
+ AsyncToolkit,
28
+ Context,
29
+ ContextResponse,
30
+ ContextStreamResponse,
31
+ ContextTool,
32
+ ContextToolkit,
33
+ DepsT,
34
+ FormattableT,
35
+ Message,
36
+ Model,
37
+ OutputParser,
38
+ Response,
39
+ RootResponse,
40
+ StreamResponse,
41
+ StreamResponseChunk,
42
+ Tool,
43
+ Toolkit,
44
+ UserContent,
45
+ )
46
+ from .....llm.messages import promote_to_messages, user
47
+ from .common import (
48
+ FormatParam,
49
+ SpanContext,
50
+ ToolsParam,
51
+ attach_response,
52
+ attach_response_async,
53
+ record_dropped_params,
54
+ start_model_span,
55
+ )
56
+
57
+ if TYPE_CHECKING:
58
+ from opentelemetry.trace import Span
59
+
60
+
61
+ # =============================================================================
62
+ # Original method references and wrapped state flags
63
+ # =============================================================================
64
+
65
+ _ORIGINAL_MODEL_CALL = Model.call
66
+ _MODEL_CALL_WRAPPED = False
67
+ _ORIGINAL_MODEL_CALL_ASYNC = Model.call_async
68
+ _MODEL_CALL_ASYNC_WRAPPED = False
69
+ _ORIGINAL_MODEL_CONTEXT_CALL = Model.context_call
70
+ _MODEL_CONTEXT_CALL_WRAPPED = False
71
+ _ORIGINAL_MODEL_CONTEXT_CALL_ASYNC = Model.context_call_async
72
+ _MODEL_CONTEXT_CALL_ASYNC_WRAPPED = False
73
+ _ORIGINAL_MODEL_STREAM = Model.stream
74
+ _MODEL_STREAM_WRAPPED = False
75
+ _ORIGINAL_MODEL_STREAM_ASYNC = Model.stream_async
76
+ _MODEL_STREAM_ASYNC_WRAPPED = False
77
+ _ORIGINAL_MODEL_CONTEXT_STREAM = Model.context_stream
78
+ _MODEL_CONTEXT_STREAM_WRAPPED = False
79
+ _ORIGINAL_MODEL_CONTEXT_STREAM_ASYNC = Model.context_stream_async
80
+ _MODEL_CONTEXT_STREAM_ASYNC_WRAPPED = False
81
+
82
+ # Resume method originals and flags
83
+ _ORIGINAL_MODEL_RESUME = Model.resume
84
+ _MODEL_RESUME_WRAPPED = False
85
+ _ORIGINAL_MODEL_RESUME_ASYNC = Model.resume_async
86
+ _MODEL_RESUME_ASYNC_WRAPPED = False
87
+ _ORIGINAL_MODEL_CONTEXT_RESUME = Model.context_resume
88
+ _MODEL_CONTEXT_RESUME_WRAPPED = False
89
+ _ORIGINAL_MODEL_CONTEXT_RESUME_ASYNC = Model.context_resume_async
90
+ _MODEL_CONTEXT_RESUME_ASYNC_WRAPPED = False
91
+ _ORIGINAL_MODEL_RESUME_STREAM = Model.resume_stream
92
+ _MODEL_RESUME_STREAM_WRAPPED = False
93
+ _ORIGINAL_MODEL_RESUME_STREAM_ASYNC = Model.resume_stream_async
94
+ _MODEL_RESUME_STREAM_ASYNC_WRAPPED = False
95
+ _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM = Model.context_resume_stream
96
+ _MODEL_CONTEXT_RESUME_STREAM_WRAPPED = False
97
+ _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM_ASYNC = Model.context_resume_stream_async
98
+ _MODEL_CONTEXT_RESUME_STREAM_ASYNC_WRAPPED = False
99
+
100
+
101
+ # =============================================================================
102
+ # Model.call instrumentation
103
+ # =============================================================================
104
+
105
+
106
+ @overload
107
+ def _instrumented_model_call(
108
+ self: Model,
109
+ content: UserContent | Sequence[Message],
110
+ *,
111
+ tools: Sequence[Tool] | Toolkit | None = None,
112
+ format: None = None,
113
+ ) -> Response: ...
114
+
115
+
116
+ @overload
117
+ def _instrumented_model_call(
118
+ self: Model,
119
+ content: UserContent | Sequence[Message],
120
+ *,
121
+ tools: Sequence[Tool] | Toolkit | None = None,
122
+ format: type[FormattableT] | FormatParam,
123
+ ) -> Response[FormattableT]: ...
124
+
125
+
126
+ @overload
127
+ def _instrumented_model_call(
128
+ self: Model,
129
+ content: UserContent | Sequence[Message],
130
+ *,
131
+ tools: Sequence[Tool] | Toolkit | None = None,
132
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
133
+ ) -> Response | Response[FormattableT]: ...
134
+
135
+
136
+ @wraps(_ORIGINAL_MODEL_CALL)
137
+ def _instrumented_model_call(
138
+ self: Model,
139
+ content: UserContent | Sequence[Message],
140
+ *,
141
+ tools: Sequence[Tool] | Toolkit | None = None,
142
+ format: FormatParam = None,
143
+ ) -> Response | Response[FormattableT]:
144
+ """Returns a GenAI-instrumented result of `Model.call`."""
145
+ messages = promote_to_messages(content)
146
+ with start_model_span(
147
+ self,
148
+ messages=messages,
149
+ tools=tools,
150
+ format=format,
151
+ ) as span_ctx:
152
+ response = _ORIGINAL_MODEL_CALL(
153
+ self,
154
+ content,
155
+ tools=tools,
156
+ format=format,
157
+ )
158
+ if span_ctx.span is not None:
159
+ attach_response(
160
+ span_ctx.span,
161
+ response,
162
+ request_messages=messages,
163
+ )
164
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
165
+ return response
166
+
167
+
168
+ def wrap_model_call() -> None:
169
+ """Returns None. Replaces `Model.call` with the instrumented wrapper."""
170
+ global _MODEL_CALL_WRAPPED
171
+ if _MODEL_CALL_WRAPPED:
172
+ return
173
+ Model.call = _instrumented_model_call
174
+ _MODEL_CALL_WRAPPED = True
175
+
176
+
177
+ def unwrap_model_call() -> None:
178
+ """Returns None. Restores the original `Model.call` implementation."""
179
+ global _MODEL_CALL_WRAPPED
180
+ if not _MODEL_CALL_WRAPPED:
181
+ return
182
+ Model.call = _ORIGINAL_MODEL_CALL
183
+ _MODEL_CALL_WRAPPED = False
184
+
185
+
186
+ # =============================================================================
187
+ # Model.call_async instrumentation
188
+ # =============================================================================
189
+
190
+
191
+ @overload
192
+ async def _instrumented_model_call_async(
193
+ self: Model,
194
+ content: UserContent | Sequence[Message],
195
+ *,
196
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
197
+ format: None = None,
198
+ ) -> AsyncResponse: ...
199
+
200
+
201
+ @overload
202
+ async def _instrumented_model_call_async(
203
+ self: Model,
204
+ content: UserContent | Sequence[Message],
205
+ *,
206
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
207
+ format: type[FormattableT] | FormatParam,
208
+ ) -> AsyncResponse[FormattableT]: ...
209
+
210
+
211
+ @overload
212
+ async def _instrumented_model_call_async(
213
+ self: Model,
214
+ content: UserContent | Sequence[Message],
215
+ *,
216
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
217
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
218
+ ) -> AsyncResponse | AsyncResponse[FormattableT]: ...
219
+
220
+
221
+ @wraps(_ORIGINAL_MODEL_CALL_ASYNC)
222
+ async def _instrumented_model_call_async(
223
+ self: Model,
224
+ content: UserContent | Sequence[Message],
225
+ *,
226
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
227
+ format: FormatParam = None,
228
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
229
+ """Returns a GenAI-instrumented result of `Model.call_async`."""
230
+ messages = promote_to_messages(content)
231
+ with start_model_span(
232
+ self,
233
+ messages=messages,
234
+ tools=tools,
235
+ format=format,
236
+ activate=True,
237
+ ) as span_ctx:
238
+ response = await _ORIGINAL_MODEL_CALL_ASYNC(
239
+ self,
240
+ content,
241
+ tools=tools,
242
+ format=format,
243
+ )
244
+ if span_ctx.span is not None:
245
+ await attach_response_async(
246
+ span_ctx.span,
247
+ response,
248
+ request_messages=messages,
249
+ )
250
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
251
+ return response
252
+
253
+
254
+ def wrap_model_call_async() -> None:
255
+ """Returns None. Replaces `Model.call_async` with the instrumented wrapper."""
256
+ global _MODEL_CALL_ASYNC_WRAPPED
257
+ if _MODEL_CALL_ASYNC_WRAPPED:
258
+ return
259
+ Model.call_async = _instrumented_model_call_async
260
+ _MODEL_CALL_ASYNC_WRAPPED = True
261
+
262
+
263
+ def unwrap_model_call_async() -> None:
264
+ """Returns None. Restores the original `Model.call_async` implementation."""
265
+ global _MODEL_CALL_ASYNC_WRAPPED
266
+ if not _MODEL_CALL_ASYNC_WRAPPED:
267
+ return
268
+ Model.call_async = _ORIGINAL_MODEL_CALL_ASYNC
269
+ _MODEL_CALL_ASYNC_WRAPPED = False
270
+
271
+
272
+ # =============================================================================
273
+ # Model.context_call instrumentation
274
+ # =============================================================================
275
+
276
+
277
+ @overload
278
+ def _instrumented_model_context_call(
279
+ self: Model,
280
+ content: UserContent | Sequence[Message],
281
+ *,
282
+ ctx: Context[DepsT],
283
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
284
+ format: None = None,
285
+ ) -> ContextResponse[DepsT, None]: ...
286
+
287
+
288
+ @overload
289
+ def _instrumented_model_context_call(
290
+ self: Model,
291
+ content: UserContent | Sequence[Message],
292
+ *,
293
+ ctx: Context[DepsT],
294
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
295
+ format: type[FormattableT] | FormatParam,
296
+ ) -> ContextResponse[DepsT, FormattableT]: ...
297
+
298
+
299
+ @overload
300
+ def _instrumented_model_context_call(
301
+ self: Model,
302
+ content: UserContent | Sequence[Message],
303
+ *,
304
+ ctx: Context[DepsT],
305
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
306
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
307
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]: ...
308
+
309
+
310
+ @wraps(_ORIGINAL_MODEL_CONTEXT_CALL)
311
+ def _instrumented_model_context_call(
312
+ self: Model,
313
+ content: UserContent | Sequence[Message],
314
+ *,
315
+ ctx: Context[DepsT],
316
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
317
+ format: FormatParam = None,
318
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
319
+ """Returns a GenAI-instrumented result of `Model.context_call`."""
320
+ messages = promote_to_messages(content)
321
+ with start_model_span(
322
+ self,
323
+ messages=messages,
324
+ tools=tools,
325
+ format=format,
326
+ activate=True,
327
+ ) as span_ctx:
328
+ response = _ORIGINAL_MODEL_CONTEXT_CALL(
329
+ self,
330
+ content,
331
+ ctx=ctx,
332
+ tools=tools,
333
+ format=format,
334
+ )
335
+ if span_ctx.span is not None:
336
+ attach_response(
337
+ span_ctx.span,
338
+ response,
339
+ request_messages=messages,
340
+ )
341
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
342
+ return response
343
+
344
+
345
+ def wrap_model_context_call() -> None:
346
+ """Returns None. Replaces `Model.context_call` with the instrumented wrapper."""
347
+ global _MODEL_CONTEXT_CALL_WRAPPED
348
+ if _MODEL_CONTEXT_CALL_WRAPPED:
349
+ return
350
+ Model.context_call = _instrumented_model_context_call
351
+ _MODEL_CONTEXT_CALL_WRAPPED = True
352
+
353
+
354
+ def unwrap_model_context_call() -> None:
355
+ """Returns None. Restores the original `Model.context_call` implementation."""
356
+ global _MODEL_CONTEXT_CALL_WRAPPED
357
+ if not _MODEL_CONTEXT_CALL_WRAPPED:
358
+ return
359
+ Model.context_call = _ORIGINAL_MODEL_CONTEXT_CALL
360
+ _MODEL_CONTEXT_CALL_WRAPPED = False
361
+
362
+
363
+ # =============================================================================
364
+ # Model.context_call_async instrumentation
365
+ # =============================================================================
366
+
367
+
368
+ @overload
369
+ async def _instrumented_model_context_call_async(
370
+ self: Model,
371
+ content: UserContent | Sequence[Message],
372
+ *,
373
+ ctx: Context[DepsT],
374
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
375
+ | AsyncContextToolkit[DepsT]
376
+ | None = None,
377
+ format: None = None,
378
+ ) -> AsyncContextResponse[DepsT, None]: ...
379
+
380
+
381
+ @overload
382
+ async def _instrumented_model_context_call_async(
383
+ self: Model,
384
+ content: UserContent | Sequence[Message],
385
+ *,
386
+ ctx: Context[DepsT],
387
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
388
+ | AsyncContextToolkit[DepsT]
389
+ | None = None,
390
+ format: type[FormattableT] | FormatParam,
391
+ ) -> AsyncContextResponse[DepsT, FormattableT]: ...
392
+
393
+
394
+ @overload
395
+ async def _instrumented_model_context_call_async(
396
+ self: Model,
397
+ content: UserContent | Sequence[Message],
398
+ *,
399
+ ctx: Context[DepsT],
400
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
401
+ | AsyncContextToolkit[DepsT]
402
+ | None = None,
403
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
404
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]: ...
405
+
406
+
407
+ @wraps(_ORIGINAL_MODEL_CONTEXT_CALL_ASYNC)
408
+ async def _instrumented_model_context_call_async(
409
+ self: Model,
410
+ content: UserContent | Sequence[Message],
411
+ *,
412
+ ctx: Context[DepsT],
413
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
414
+ | AsyncContextToolkit[DepsT]
415
+ | None = None,
416
+ format: FormatParam = None,
417
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
418
+ """Returns a GenAI-instrumented result of `Model.context_call_async`."""
419
+ messages = promote_to_messages(content)
420
+ with start_model_span(
421
+ self,
422
+ messages=messages,
423
+ tools=tools,
424
+ format=format,
425
+ activate=True,
426
+ ) as span_ctx:
427
+ response = await _ORIGINAL_MODEL_CONTEXT_CALL_ASYNC(
428
+ self,
429
+ content,
430
+ ctx=ctx,
431
+ tools=tools,
432
+ format=format,
433
+ )
434
+ if span_ctx.span is not None:
435
+ await attach_response_async(
436
+ span_ctx.span,
437
+ response,
438
+ request_messages=messages,
439
+ )
440
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
441
+ return response
442
+
443
+
444
+ def wrap_model_context_call_async() -> None:
445
+ """Returns None. Replaces `Model.context_call_async` with the instrumented wrapper."""
446
+ global _MODEL_CONTEXT_CALL_ASYNC_WRAPPED
447
+ if _MODEL_CONTEXT_CALL_ASYNC_WRAPPED:
448
+ return
449
+ Model.context_call_async = _instrumented_model_context_call_async
450
+ _MODEL_CONTEXT_CALL_ASYNC_WRAPPED = True
451
+
452
+
453
+ def unwrap_model_context_call_async() -> None:
454
+ """Returns None. Restores the original `Model.context_call_async` implementation."""
455
+ global _MODEL_CONTEXT_CALL_ASYNC_WRAPPED
456
+ if not _MODEL_CONTEXT_CALL_ASYNC_WRAPPED:
457
+ return
458
+ Model.context_call_async = _ORIGINAL_MODEL_CONTEXT_CALL_ASYNC
459
+ _MODEL_CONTEXT_CALL_ASYNC_WRAPPED = False
460
+
461
+
462
+ # =============================================================================
463
+ # Stream span handler helpers
464
+ # =============================================================================
465
+
466
+
467
+ def _attach_stream_span_handlers(
468
+ *,
469
+ response: ContextStreamResponse[DepsT, FormattableT | None]
470
+ | StreamResponse[FormattableT | None],
471
+ span_cm: AbstractContextManager[SpanContext],
472
+ span: Span,
473
+ request_messages: Sequence[Message],
474
+ ) -> None:
475
+ """Returns None. Closes the span when streaming completes."""
476
+ chunk_iterator: Iterator[StreamResponseChunk] = response._chunk_iterator
477
+
478
+ response_ref = weakref.ref(response)
479
+ closed = False
480
+
481
+ def _close_span(
482
+ exc_type: type[BaseException] | None,
483
+ exc: BaseException | None,
484
+ tb: TracebackType | None,
485
+ ) -> None:
486
+ nonlocal closed
487
+ if closed:
488
+ return
489
+ closed = True
490
+ response_obj = response_ref()
491
+ if response_obj is not None:
492
+ attach_response(
493
+ span,
494
+ response_obj,
495
+ request_messages=request_messages,
496
+ )
497
+ span_cm.__exit__(exc_type, exc, tb)
498
+
499
+ def _wrapped_iterator() -> Iterator[StreamResponseChunk]:
500
+ with otel_trace.use_span(span, end_on_exit=False):
501
+ try:
502
+ yield from chunk_iterator
503
+ except Exception as exc: # noqa: BLE001
504
+ _close_span(type(exc), exc, exc.__traceback__)
505
+ raise
506
+ else:
507
+ _close_span(None, None, None)
508
+ finally:
509
+ _close_span(None, None, None)
510
+
511
+ response._chunk_iterator = _wrapped_iterator()
512
+
513
+
514
+ def _attach_async_stream_span_handlers(
515
+ *,
516
+ response: AsyncContextStreamResponse[DepsT, FormattableT | None],
517
+ span_cm: AbstractContextManager[SpanContext],
518
+ span: Span,
519
+ request_messages: Sequence[Message],
520
+ ) -> None:
521
+ """Returns None. Closes the span when async streaming completes."""
522
+ chunk_iterator: AsyncIterator[StreamResponseChunk] = response._chunk_iterator
523
+
524
+ response_ref = weakref.ref(response)
525
+ closed = False
526
+
527
+ def _close_span(
528
+ exc_type: type[BaseException] | None,
529
+ exc: BaseException | None,
530
+ tb: TracebackType | None,
531
+ ) -> None:
532
+ nonlocal closed
533
+ if closed:
534
+ return
535
+ closed = True
536
+ response_obj = response_ref()
537
+ if response_obj is not None:
538
+ attach_response(
539
+ span,
540
+ response_obj,
541
+ request_messages=request_messages,
542
+ )
543
+ span_cm.__exit__(exc_type, exc, tb)
544
+
545
+ async def _wrapped_iterator() -> AsyncIterator[StreamResponseChunk]:
546
+ try:
547
+ async for chunk in chunk_iterator:
548
+ yield chunk
549
+ except Exception as exc: # noqa: BLE001
550
+ _close_span(type(exc), exc, exc.__traceback__)
551
+ raise
552
+ else:
553
+ _close_span(None, None, None)
554
+ finally:
555
+ _close_span(None, None, None)
556
+
557
+ response._chunk_iterator = _wrapped_iterator()
558
+
559
+
560
+ # =============================================================================
561
+ # Model.stream instrumentation
562
+ # =============================================================================
563
+
564
+
565
+ @overload
566
+ def _instrumented_model_stream(
567
+ self: Model,
568
+ content: UserContent | Sequence[Message],
569
+ *,
570
+ tools: Sequence[Tool] | Toolkit | None = None,
571
+ format: None = None,
572
+ ) -> StreamResponse: ...
573
+
574
+
575
+ @overload
576
+ def _instrumented_model_stream(
577
+ self: Model,
578
+ content: UserContent | Sequence[Message],
579
+ *,
580
+ tools: Sequence[Tool] | Toolkit | None = None,
581
+ format: type[FormattableT] | FormatParam,
582
+ ) -> StreamResponse[FormattableT]: ...
583
+
584
+
585
+ @overload
586
+ def _instrumented_model_stream(
587
+ self: Model,
588
+ content: UserContent | Sequence[Message],
589
+ *,
590
+ tools: Sequence[Tool] | Toolkit | None = None,
591
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
592
+ ) -> StreamResponse | StreamResponse[FormattableT]: ...
593
+
594
+
595
+ @wraps(_ORIGINAL_MODEL_STREAM)
596
+ def _instrumented_model_stream(
597
+ self: Model,
598
+ content: UserContent | Sequence[Message],
599
+ *,
600
+ tools: Sequence[Tool] | Toolkit | None = None,
601
+ format: FormatParam = None,
602
+ ) -> StreamResponse | StreamResponse[FormattableT]:
603
+ """Returns a GenAI-instrumented result of `Model.stream`."""
604
+ messages = promote_to_messages(content)
605
+ span_cm = start_model_span(
606
+ self,
607
+ messages=messages,
608
+ tools=tools,
609
+ format=format,
610
+ activate=False,
611
+ )
612
+ span_ctx = span_cm.__enter__()
613
+ if span_ctx.span is None:
614
+ response = _ORIGINAL_MODEL_STREAM(
615
+ self,
616
+ content,
617
+ tools=tools,
618
+ format=format,
619
+ )
620
+ span_cm.__exit__(None, None, None)
621
+ return response
622
+
623
+ try:
624
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
625
+ response = _ORIGINAL_MODEL_STREAM(
626
+ self,
627
+ content,
628
+ tools=tools,
629
+ format=format,
630
+ )
631
+ except Exception as exc:
632
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
633
+ raise
634
+
635
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
636
+
637
+ try:
638
+ _attach_stream_span_handlers(
639
+ response=response,
640
+ span_cm=span_cm,
641
+ span=span_ctx.span,
642
+ request_messages=messages,
643
+ )
644
+ except Exception as exc: # pragma: no cover
645
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
646
+ raise
647
+
648
+ return response
649
+
650
+
651
+ def wrap_model_stream() -> None:
652
+ """Returns None. Replaces `Model.stream` with the instrumented wrapper."""
653
+ global _MODEL_STREAM_WRAPPED
654
+ if _MODEL_STREAM_WRAPPED:
655
+ return
656
+ Model.stream = _instrumented_model_stream
657
+ _MODEL_STREAM_WRAPPED = True
658
+
659
+
660
+ def unwrap_model_stream() -> None:
661
+ """Returns None. Restores the original `Model.stream` implementation."""
662
+ global _MODEL_STREAM_WRAPPED
663
+ if not _MODEL_STREAM_WRAPPED:
664
+ return
665
+ Model.stream = _ORIGINAL_MODEL_STREAM
666
+ _MODEL_STREAM_WRAPPED = False
667
+
668
+
669
+ # =============================================================================
670
+ # Model.stream_async instrumentation
671
+ # =============================================================================
672
+
673
+
674
+ @overload
675
+ async def _instrumented_model_stream_async(
676
+ self: Model,
677
+ content: UserContent | Sequence[Message],
678
+ *,
679
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
680
+ format: None = None,
681
+ ) -> AsyncStreamResponse: ...
682
+
683
+
684
+ @overload
685
+ async def _instrumented_model_stream_async(
686
+ self: Model,
687
+ content: UserContent | Sequence[Message],
688
+ *,
689
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
690
+ format: type[FormattableT] | FormatParam,
691
+ ) -> AsyncStreamResponse[FormattableT]: ...
692
+
693
+
694
+ @overload
695
+ async def _instrumented_model_stream_async(
696
+ self: Model,
697
+ content: UserContent | Sequence[Message],
698
+ *,
699
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
700
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
701
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]: ...
702
+
703
+
704
+ @wraps(_ORIGINAL_MODEL_STREAM_ASYNC)
705
+ async def _instrumented_model_stream_async(
706
+ self: Model,
707
+ content: UserContent | Sequence[Message],
708
+ *,
709
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
710
+ format: FormatParam = None,
711
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
712
+ """Returns a GenAI-instrumented result of `Model.stream_async`."""
713
+ messages = promote_to_messages(content)
714
+ span_cm = start_model_span(
715
+ self,
716
+ messages=messages,
717
+ tools=tools,
718
+ format=format,
719
+ activate=False,
720
+ )
721
+ span_ctx = span_cm.__enter__()
722
+ if span_ctx.span is None:
723
+ response = await _ORIGINAL_MODEL_STREAM_ASYNC(
724
+ self,
725
+ content,
726
+ tools=tools,
727
+ format=format,
728
+ )
729
+ span_cm.__exit__(None, None, None)
730
+ return response
731
+
732
+ try:
733
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
734
+ response = await _ORIGINAL_MODEL_STREAM_ASYNC(
735
+ self,
736
+ content,
737
+ tools=tools,
738
+ format=format,
739
+ )
740
+ except Exception as exc:
741
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
742
+ raise
743
+
744
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
745
+
746
+ try:
747
+ _attach_async_stream_span_handlers(
748
+ response=cast(
749
+ "AsyncContextStreamResponse[Any, FormattableT | None]", response
750
+ ),
751
+ span_cm=span_cm,
752
+ span=span_ctx.span,
753
+ request_messages=messages,
754
+ )
755
+ except Exception as exc: # pragma: no cover
756
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
757
+ raise
758
+
759
+ return response
760
+
761
+
762
+ def wrap_model_stream_async() -> None:
763
+ """Returns None. Replaces `Model.stream_async` with the instrumented wrapper."""
764
+ global _MODEL_STREAM_ASYNC_WRAPPED
765
+ if _MODEL_STREAM_ASYNC_WRAPPED:
766
+ return
767
+ Model.stream_async = _instrumented_model_stream_async
768
+ _MODEL_STREAM_ASYNC_WRAPPED = True
769
+
770
+
771
+ def unwrap_model_stream_async() -> None:
772
+ """Returns None. Restores the original `Model.stream_async` implementation."""
773
+ global _MODEL_STREAM_ASYNC_WRAPPED
774
+ if not _MODEL_STREAM_ASYNC_WRAPPED:
775
+ return
776
+ Model.stream_async = _ORIGINAL_MODEL_STREAM_ASYNC
777
+ _MODEL_STREAM_ASYNC_WRAPPED = False
778
+
779
+
780
+ # =============================================================================
781
+ # Model.context_stream instrumentation
782
+ # =============================================================================
783
+
784
+
785
+ @overload
786
+ def _instrumented_model_context_stream(
787
+ self: Model,
788
+ content: UserContent | Sequence[Message],
789
+ *,
790
+ ctx: Context[DepsT],
791
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
792
+ format: None = None,
793
+ ) -> ContextStreamResponse[DepsT, None]: ...
794
+
795
+
796
+ @overload
797
+ def _instrumented_model_context_stream(
798
+ self: Model,
799
+ content: UserContent | Sequence[Message],
800
+ *,
801
+ ctx: Context[DepsT],
802
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
803
+ format: type[FormattableT] | FormatParam,
804
+ ) -> ContextStreamResponse[DepsT, FormattableT]: ...
805
+
806
+
807
+ @overload
808
+ def _instrumented_model_context_stream(
809
+ self: Model,
810
+ content: UserContent | Sequence[Message],
811
+ *,
812
+ ctx: Context[DepsT],
813
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
814
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
815
+ ) -> (
816
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
817
+ ): ...
818
+
819
+
820
+ @wraps(_ORIGINAL_MODEL_CONTEXT_STREAM)
821
+ def _instrumented_model_context_stream(
822
+ self: Model,
823
+ content: UserContent | Sequence[Message],
824
+ *,
825
+ ctx: Context[DepsT],
826
+ tools: Sequence[Tool | ContextTool[DepsT]] | ContextToolkit[DepsT] | None = None,
827
+ format: FormatParam = None,
828
+ ) -> ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]:
829
+ """Returns a GenAI-instrumented result of `Model.context_stream`."""
830
+ messages = promote_to_messages(content)
831
+ span_cm = start_model_span(
832
+ self,
833
+ messages=messages,
834
+ tools=tools,
835
+ format=format,
836
+ activate=False,
837
+ )
838
+ span_ctx = span_cm.__enter__()
839
+ if span_ctx.span is None:
840
+ response = _ORIGINAL_MODEL_CONTEXT_STREAM(
841
+ self,
842
+ content,
843
+ ctx=ctx,
844
+ tools=tools,
845
+ format=format,
846
+ )
847
+ span_cm.__exit__(None, None, None)
848
+ return response
849
+
850
+ try:
851
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
852
+ response = _ORIGINAL_MODEL_CONTEXT_STREAM(
853
+ self,
854
+ content,
855
+ ctx=ctx,
856
+ tools=tools,
857
+ format=format,
858
+ )
859
+ except Exception as exc:
860
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
861
+ raise
862
+
863
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
864
+
865
+ try:
866
+ _attach_stream_span_handlers(
867
+ response=response,
868
+ span_cm=span_cm,
869
+ span=span_ctx.span,
870
+ request_messages=messages,
871
+ )
872
+ except Exception as exc: # pragma: no cover
873
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
874
+ raise
875
+
876
+ return response
877
+
878
+
879
+ def wrap_model_context_stream() -> None:
880
+ """Returns None. Replaces `Model.context_stream` with the instrumented wrapper."""
881
+ global _MODEL_CONTEXT_STREAM_WRAPPED
882
+ if _MODEL_CONTEXT_STREAM_WRAPPED:
883
+ return
884
+ Model.context_stream = _instrumented_model_context_stream
885
+ _MODEL_CONTEXT_STREAM_WRAPPED = True
886
+
887
+
888
+ def unwrap_model_context_stream() -> None:
889
+ """Returns None. Restores the original `Model.context_stream` implementation."""
890
+ global _MODEL_CONTEXT_STREAM_WRAPPED
891
+ if not _MODEL_CONTEXT_STREAM_WRAPPED:
892
+ return
893
+ Model.context_stream = _ORIGINAL_MODEL_CONTEXT_STREAM
894
+ _MODEL_CONTEXT_STREAM_WRAPPED = False
895
+
896
+
897
+ # =============================================================================
898
+ # Model.context_stream_async instrumentation
899
+ # =============================================================================
900
+
901
+
902
+ @overload
903
+ async def _instrumented_model_context_stream_async(
904
+ self: Model,
905
+ content: UserContent | Sequence[Message],
906
+ *,
907
+ ctx: Context[DepsT],
908
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
909
+ | AsyncContextToolkit[DepsT]
910
+ | None = None,
911
+ format: None = None,
912
+ ) -> AsyncContextStreamResponse[DepsT, None]: ...
913
+
914
+
915
+ @overload
916
+ async def _instrumented_model_context_stream_async(
917
+ self: Model,
918
+ content: UserContent | Sequence[Message],
919
+ *,
920
+ ctx: Context[DepsT],
921
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
922
+ | AsyncContextToolkit[DepsT]
923
+ | None = None,
924
+ format: type[FormattableT] | FormatParam,
925
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]: ...
926
+
927
+
928
+ @overload
929
+ async def _instrumented_model_context_stream_async(
930
+ self: Model,
931
+ content: UserContent | Sequence[Message],
932
+ *,
933
+ ctx: Context[DepsT],
934
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
935
+ | AsyncContextToolkit[DepsT]
936
+ | None = None,
937
+ format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
938
+ ) -> (
939
+ AsyncContextStreamResponse[DepsT, None]
940
+ | AsyncContextStreamResponse[DepsT, FormattableT]
941
+ ): ...
942
+
943
+
944
+ @wraps(_ORIGINAL_MODEL_CONTEXT_STREAM_ASYNC)
945
+ async def _instrumented_model_context_stream_async(
946
+ self: Model,
947
+ content: UserContent | Sequence[Message],
948
+ *,
949
+ ctx: Context[DepsT],
950
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
951
+ | AsyncContextToolkit[DepsT]
952
+ | None = None,
953
+ format: FormatParam = None,
954
+ ) -> (
955
+ AsyncContextStreamResponse[DepsT, None]
956
+ | AsyncContextStreamResponse[DepsT, FormattableT]
957
+ ):
958
+ """Returns a GenAI-instrumented result of `Model.context_stream_async`."""
959
+ messages = promote_to_messages(content)
960
+ span_cm = start_model_span(
961
+ self,
962
+ messages=messages,
963
+ tools=tools,
964
+ format=format,
965
+ activate=False,
966
+ )
967
+ span_ctx = span_cm.__enter__()
968
+ if span_ctx.span is None:
969
+ response = await _ORIGINAL_MODEL_CONTEXT_STREAM_ASYNC(
970
+ self,
971
+ content,
972
+ ctx=ctx,
973
+ tools=tools,
974
+ format=format,
975
+ )
976
+ span_cm.__exit__(None, None, None)
977
+ return response
978
+
979
+ try:
980
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
981
+ response = await _ORIGINAL_MODEL_CONTEXT_STREAM_ASYNC(
982
+ self,
983
+ content,
984
+ ctx=ctx,
985
+ tools=tools,
986
+ format=format,
987
+ )
988
+ except Exception as exc:
989
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
990
+ raise
991
+
992
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
993
+
994
+ try:
995
+ _attach_async_stream_span_handlers(
996
+ response=response,
997
+ span_cm=span_cm,
998
+ span=span_ctx.span,
999
+ request_messages=messages,
1000
+ )
1001
+ except Exception as exc: # pragma: no cover
1002
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1003
+ raise
1004
+
1005
+ return response
1006
+
1007
+
1008
+ def wrap_model_context_stream_async() -> None:
1009
+ """Returns None. Replaces `Model.context_stream_async` with the instrumented wrapper."""
1010
+ global _MODEL_CONTEXT_STREAM_ASYNC_WRAPPED
1011
+ if _MODEL_CONTEXT_STREAM_ASYNC_WRAPPED:
1012
+ return
1013
+ Model.context_stream_async = _instrumented_model_context_stream_async
1014
+ _MODEL_CONTEXT_STREAM_ASYNC_WRAPPED = True
1015
+
1016
+
1017
+ def unwrap_model_context_stream_async() -> None:
1018
+ """Returns None. Restores the original `Model.context_stream_async` implementation."""
1019
+ global _MODEL_CONTEXT_STREAM_ASYNC_WRAPPED
1020
+ if not _MODEL_CONTEXT_STREAM_ASYNC_WRAPPED:
1021
+ return
1022
+ Model.context_stream_async = _ORIGINAL_MODEL_CONTEXT_STREAM_ASYNC
1023
+ _MODEL_CONTEXT_STREAM_ASYNC_WRAPPED = False
1024
+
1025
+
1026
+ # =============================================================================
1027
+ # Model.resume instrumentation
1028
+ # =============================================================================
1029
+
1030
+
1031
+ @overload
1032
+ def _instrumented_model_resume(
1033
+ self: Model,
1034
+ *,
1035
+ response: Response,
1036
+ content: UserContent,
1037
+ ) -> Response: ...
1038
+
1039
+
1040
+ @overload
1041
+ def _instrumented_model_resume(
1042
+ self: Model,
1043
+ *,
1044
+ response: Response[FormattableT],
1045
+ content: UserContent,
1046
+ ) -> Response[FormattableT]: ...
1047
+
1048
+
1049
+ @overload
1050
+ def _instrumented_model_resume(
1051
+ self: Model,
1052
+ *,
1053
+ response: Response | Response[FormattableT],
1054
+ content: UserContent,
1055
+ ) -> Response | Response[FormattableT]: ...
1056
+
1057
+
1058
+ @wraps(_ORIGINAL_MODEL_RESUME)
1059
+ def _instrumented_model_resume(
1060
+ self: Model,
1061
+ *,
1062
+ response: Response | Response[FormattableT],
1063
+ content: UserContent,
1064
+ ) -> Response | Response[FormattableT]:
1065
+ """Returns a GenAI-instrumented result of `Model.resume`."""
1066
+ messages = list(response.messages) + [user(content)]
1067
+ with start_model_span(
1068
+ self,
1069
+ messages=messages,
1070
+ tools=cast(ToolsParam, response.toolkit),
1071
+ format=cast(FormatParam, response.format),
1072
+ ) as span_ctx:
1073
+ result = _ORIGINAL_MODEL_RESUME(
1074
+ self,
1075
+ response=response,
1076
+ content=content,
1077
+ )
1078
+ if span_ctx.span is not None:
1079
+ attach_response(
1080
+ span_ctx.span,
1081
+ cast("RootResponse[Any, FormattableT | None]", result),
1082
+ request_messages=messages,
1083
+ )
1084
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1085
+ return result
1086
+
1087
+
1088
+ def wrap_model_resume() -> None:
1089
+ """Returns None. Replaces `Model.resume` with the instrumented wrapper."""
1090
+ global _MODEL_RESUME_WRAPPED
1091
+ if _MODEL_RESUME_WRAPPED:
1092
+ return
1093
+ Model.resume = _instrumented_model_resume
1094
+ _MODEL_RESUME_WRAPPED = True
1095
+
1096
+
1097
+ def unwrap_model_resume() -> None:
1098
+ """Returns None. Restores the original `Model.resume` implementation."""
1099
+ global _MODEL_RESUME_WRAPPED
1100
+ if not _MODEL_RESUME_WRAPPED:
1101
+ return
1102
+ Model.resume = _ORIGINAL_MODEL_RESUME
1103
+ _MODEL_RESUME_WRAPPED = False
1104
+
1105
+
1106
+ # =============================================================================
1107
+ # Model.resume_async instrumentation
1108
+ # =============================================================================
1109
+
1110
+
1111
+ @overload
1112
+ async def _instrumented_model_resume_async(
1113
+ self: Model,
1114
+ *,
1115
+ response: AsyncResponse,
1116
+ content: UserContent,
1117
+ ) -> AsyncResponse: ...
1118
+
1119
+
1120
+ @overload
1121
+ async def _instrumented_model_resume_async(
1122
+ self: Model,
1123
+ *,
1124
+ response: AsyncResponse[FormattableT],
1125
+ content: UserContent,
1126
+ ) -> AsyncResponse[FormattableT]: ...
1127
+
1128
+
1129
+ @overload
1130
+ async def _instrumented_model_resume_async(
1131
+ self: Model,
1132
+ *,
1133
+ response: AsyncResponse | AsyncResponse[FormattableT],
1134
+ content: UserContent,
1135
+ ) -> AsyncResponse | AsyncResponse[FormattableT]: ...
1136
+
1137
+
1138
+ @wraps(_ORIGINAL_MODEL_RESUME_ASYNC)
1139
+ async def _instrumented_model_resume_async(
1140
+ self: Model,
1141
+ *,
1142
+ response: AsyncResponse | AsyncResponse[FormattableT],
1143
+ content: UserContent,
1144
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
1145
+ """Returns a GenAI-instrumented result of `Model.resume_async`."""
1146
+ messages = list(response.messages) + [user(content)]
1147
+ with start_model_span(
1148
+ self,
1149
+ messages=messages,
1150
+ tools=cast(ToolsParam, response.toolkit),
1151
+ format=cast(FormatParam, response.format),
1152
+ activate=True,
1153
+ ) as span_ctx:
1154
+ result = await _ORIGINAL_MODEL_RESUME_ASYNC(
1155
+ self,
1156
+ response=response,
1157
+ content=content,
1158
+ )
1159
+ if span_ctx.span is not None:
1160
+ await attach_response_async(
1161
+ span_ctx.span,
1162
+ cast("RootResponse[Any, FormattableT | None]", result),
1163
+ request_messages=messages,
1164
+ )
1165
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1166
+ return result
1167
+
1168
+
1169
+ def wrap_model_resume_async() -> None:
1170
+ """Returns None. Replaces `Model.resume_async` with the instrumented wrapper."""
1171
+ global _MODEL_RESUME_ASYNC_WRAPPED
1172
+ if _MODEL_RESUME_ASYNC_WRAPPED:
1173
+ return
1174
+ Model.resume_async = _instrumented_model_resume_async
1175
+ _MODEL_RESUME_ASYNC_WRAPPED = True
1176
+
1177
+
1178
+ def unwrap_model_resume_async() -> None:
1179
+ """Returns None. Restores the original `Model.resume_async` implementation."""
1180
+ global _MODEL_RESUME_ASYNC_WRAPPED
1181
+ if not _MODEL_RESUME_ASYNC_WRAPPED:
1182
+ return
1183
+ Model.resume_async = _ORIGINAL_MODEL_RESUME_ASYNC
1184
+ _MODEL_RESUME_ASYNC_WRAPPED = False
1185
+
1186
+
1187
+ # =============================================================================
1188
+ # Model.context_resume instrumentation
1189
+ # =============================================================================
1190
+
1191
+
1192
+ @overload
1193
+ def _instrumented_model_context_resume(
1194
+ self: Model,
1195
+ *,
1196
+ ctx: Context[DepsT],
1197
+ response: ContextResponse[DepsT, None],
1198
+ content: UserContent,
1199
+ ) -> ContextResponse[DepsT, None]: ...
1200
+
1201
+
1202
+ @overload
1203
+ def _instrumented_model_context_resume(
1204
+ self: Model,
1205
+ *,
1206
+ ctx: Context[DepsT],
1207
+ response: ContextResponse[DepsT, FormattableT],
1208
+ content: UserContent,
1209
+ ) -> ContextResponse[DepsT, FormattableT]: ...
1210
+
1211
+
1212
+ @overload
1213
+ def _instrumented_model_context_resume(
1214
+ self: Model,
1215
+ *,
1216
+ ctx: Context[DepsT],
1217
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
1218
+ content: UserContent,
1219
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]: ...
1220
+
1221
+
1222
+ @wraps(_ORIGINAL_MODEL_CONTEXT_RESUME)
1223
+ def _instrumented_model_context_resume(
1224
+ self: Model,
1225
+ *,
1226
+ ctx: Context[DepsT],
1227
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
1228
+ content: UserContent,
1229
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
1230
+ """Returns a GenAI-instrumented result of `Model.context_resume`."""
1231
+ messages = list(response.messages) + [user(content)]
1232
+ with start_model_span(
1233
+ self,
1234
+ messages=messages,
1235
+ tools=cast(ToolsParam, response.toolkit),
1236
+ format=cast(FormatParam, response.format),
1237
+ activate=True,
1238
+ ) as span_ctx:
1239
+ result = _ORIGINAL_MODEL_CONTEXT_RESUME(
1240
+ self,
1241
+ ctx=ctx,
1242
+ response=response,
1243
+ content=content,
1244
+ )
1245
+ if span_ctx.span is not None:
1246
+ attach_response(
1247
+ span_ctx.span,
1248
+ cast("RootResponse[Any, FormattableT | None]", result),
1249
+ request_messages=messages,
1250
+ )
1251
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1252
+ return result
1253
+
1254
+
1255
+ def wrap_model_context_resume() -> None:
1256
+ """Returns None. Replaces `Model.context_resume` with the instrumented wrapper."""
1257
+ global _MODEL_CONTEXT_RESUME_WRAPPED
1258
+ if _MODEL_CONTEXT_RESUME_WRAPPED:
1259
+ return
1260
+ Model.context_resume = _instrumented_model_context_resume
1261
+ _MODEL_CONTEXT_RESUME_WRAPPED = True
1262
+
1263
+
1264
+ def unwrap_model_context_resume() -> None:
1265
+ """Returns None. Restores the original `Model.context_resume` implementation."""
1266
+ global _MODEL_CONTEXT_RESUME_WRAPPED
1267
+ if not _MODEL_CONTEXT_RESUME_WRAPPED:
1268
+ return
1269
+ Model.context_resume = _ORIGINAL_MODEL_CONTEXT_RESUME
1270
+ _MODEL_CONTEXT_RESUME_WRAPPED = False
1271
+
1272
+
1273
+ # =============================================================================
1274
+ # Model.context_resume_async instrumentation
1275
+ # =============================================================================
1276
+
1277
+
1278
+ @overload
1279
+ async def _instrumented_model_context_resume_async(
1280
+ self: Model,
1281
+ *,
1282
+ ctx: Context[DepsT],
1283
+ response: AsyncContextResponse[DepsT, None],
1284
+ content: UserContent,
1285
+ ) -> AsyncContextResponse[DepsT, None]: ...
1286
+
1287
+
1288
+ @overload
1289
+ async def _instrumented_model_context_resume_async(
1290
+ self: Model,
1291
+ *,
1292
+ ctx: Context[DepsT],
1293
+ response: AsyncContextResponse[DepsT, FormattableT],
1294
+ content: UserContent,
1295
+ ) -> AsyncContextResponse[DepsT, FormattableT]: ...
1296
+
1297
+
1298
+ @overload
1299
+ async def _instrumented_model_context_resume_async(
1300
+ self: Model,
1301
+ *,
1302
+ ctx: Context[DepsT],
1303
+ response: AsyncContextResponse[DepsT, None]
1304
+ | AsyncContextResponse[DepsT, FormattableT],
1305
+ content: UserContent,
1306
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]: ...
1307
+
1308
+
1309
+ @wraps(_ORIGINAL_MODEL_CONTEXT_RESUME_ASYNC)
1310
+ async def _instrumented_model_context_resume_async(
1311
+ self: Model,
1312
+ *,
1313
+ ctx: Context[DepsT],
1314
+ response: AsyncContextResponse[DepsT, None]
1315
+ | AsyncContextResponse[DepsT, FormattableT],
1316
+ content: UserContent,
1317
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
1318
+ """Returns a GenAI-instrumented result of `Model.context_resume_async`."""
1319
+ messages = list(response.messages) + [user(content)]
1320
+ with start_model_span(
1321
+ self,
1322
+ messages=messages,
1323
+ tools=cast(ToolsParam, response.toolkit),
1324
+ format=cast(FormatParam, response.format),
1325
+ activate=True,
1326
+ ) as span_ctx:
1327
+ result = await _ORIGINAL_MODEL_CONTEXT_RESUME_ASYNC(
1328
+ self,
1329
+ ctx=ctx,
1330
+ response=response,
1331
+ content=content,
1332
+ )
1333
+ if span_ctx.span is not None:
1334
+ await attach_response_async(
1335
+ span_ctx.span,
1336
+ cast("RootResponse[Any, FormattableT | None]", result),
1337
+ request_messages=messages,
1338
+ )
1339
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1340
+ return result
1341
+
1342
+
1343
+ def wrap_model_context_resume_async() -> None:
1344
+ """Returns None. Replaces `Model.context_resume_async` with the instrumented wrapper."""
1345
+ global _MODEL_CONTEXT_RESUME_ASYNC_WRAPPED
1346
+ if _MODEL_CONTEXT_RESUME_ASYNC_WRAPPED:
1347
+ return
1348
+ Model.context_resume_async = _instrumented_model_context_resume_async
1349
+ _MODEL_CONTEXT_RESUME_ASYNC_WRAPPED = True
1350
+
1351
+
1352
+ def unwrap_model_context_resume_async() -> None:
1353
+ """Returns None. Restores the original `Model.context_resume_async` implementation."""
1354
+ global _MODEL_CONTEXT_RESUME_ASYNC_WRAPPED
1355
+ if not _MODEL_CONTEXT_RESUME_ASYNC_WRAPPED:
1356
+ return
1357
+ Model.context_resume_async = _ORIGINAL_MODEL_CONTEXT_RESUME_ASYNC
1358
+ _MODEL_CONTEXT_RESUME_ASYNC_WRAPPED = False
1359
+
1360
+
1361
+ # =============================================================================
1362
+ # Model.resume_stream instrumentation
1363
+ # =============================================================================
1364
+
1365
+
1366
+ @overload
1367
+ def _instrumented_model_resume_stream(
1368
+ self: Model,
1369
+ *,
1370
+ response: StreamResponse,
1371
+ content: UserContent,
1372
+ ) -> StreamResponse: ...
1373
+
1374
+
1375
+ @overload
1376
+ def _instrumented_model_resume_stream(
1377
+ self: Model,
1378
+ *,
1379
+ response: StreamResponse[FormattableT],
1380
+ content: UserContent,
1381
+ ) -> StreamResponse[FormattableT]: ...
1382
+
1383
+
1384
+ @overload
1385
+ def _instrumented_model_resume_stream(
1386
+ self: Model,
1387
+ *,
1388
+ response: StreamResponse | StreamResponse[FormattableT],
1389
+ content: UserContent,
1390
+ ) -> StreamResponse | StreamResponse[FormattableT]: ...
1391
+
1392
+
1393
+ @wraps(_ORIGINAL_MODEL_RESUME_STREAM)
1394
+ def _instrumented_model_resume_stream(
1395
+ self: Model,
1396
+ *,
1397
+ response: StreamResponse | StreamResponse[FormattableT],
1398
+ content: UserContent,
1399
+ ) -> StreamResponse | StreamResponse[FormattableT]:
1400
+ """Returns a GenAI-instrumented result of `Model.resume_stream`."""
1401
+ messages = list(response.messages) + [user(content)]
1402
+ span_cm = start_model_span(
1403
+ self,
1404
+ messages=messages,
1405
+ tools=cast(ToolsParam, response.toolkit),
1406
+ format=cast(FormatParam, response.format),
1407
+ activate=False,
1408
+ )
1409
+ span_ctx = span_cm.__enter__()
1410
+ if span_ctx.span is None:
1411
+ result = _ORIGINAL_MODEL_RESUME_STREAM(
1412
+ self,
1413
+ response=response,
1414
+ content=content,
1415
+ )
1416
+ span_cm.__exit__(None, None, None)
1417
+ return result
1418
+
1419
+ try:
1420
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
1421
+ result = _ORIGINAL_MODEL_RESUME_STREAM(
1422
+ self,
1423
+ response=response,
1424
+ content=content,
1425
+ )
1426
+ except Exception as exc:
1427
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1428
+ raise
1429
+
1430
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1431
+
1432
+ try:
1433
+ _attach_stream_span_handlers(
1434
+ response=cast(StreamResponse[FormattableT | None], result),
1435
+ span_cm=span_cm,
1436
+ span=span_ctx.span,
1437
+ request_messages=messages,
1438
+ )
1439
+ except Exception as exc: # pragma: no cover
1440
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1441
+ raise
1442
+
1443
+ return result
1444
+
1445
+
1446
+ def wrap_model_resume_stream() -> None:
1447
+ """Returns None. Replaces `Model.resume_stream` with the instrumented wrapper."""
1448
+ global _MODEL_RESUME_STREAM_WRAPPED
1449
+ if _MODEL_RESUME_STREAM_WRAPPED:
1450
+ return
1451
+ Model.resume_stream = _instrumented_model_resume_stream
1452
+ _MODEL_RESUME_STREAM_WRAPPED = True
1453
+
1454
+
1455
+ def unwrap_model_resume_stream() -> None:
1456
+ """Returns None. Restores the original `Model.resume_stream` implementation."""
1457
+ global _MODEL_RESUME_STREAM_WRAPPED
1458
+ if not _MODEL_RESUME_STREAM_WRAPPED:
1459
+ return
1460
+ Model.resume_stream = _ORIGINAL_MODEL_RESUME_STREAM
1461
+ _MODEL_RESUME_STREAM_WRAPPED = False
1462
+
1463
+
1464
+ # =============================================================================
1465
+ # Model.resume_stream_async instrumentation
1466
+ # =============================================================================
1467
+
1468
+
1469
+ @overload
1470
+ async def _instrumented_model_resume_stream_async(
1471
+ self: Model,
1472
+ *,
1473
+ response: AsyncStreamResponse,
1474
+ content: UserContent,
1475
+ ) -> AsyncStreamResponse: ...
1476
+
1477
+
1478
+ @overload
1479
+ async def _instrumented_model_resume_stream_async(
1480
+ self: Model,
1481
+ *,
1482
+ response: AsyncStreamResponse[FormattableT],
1483
+ content: UserContent,
1484
+ ) -> AsyncStreamResponse[FormattableT]: ...
1485
+
1486
+
1487
+ @overload
1488
+ async def _instrumented_model_resume_stream_async(
1489
+ self: Model,
1490
+ *,
1491
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1492
+ content: UserContent,
1493
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]: ...
1494
+
1495
+
1496
+ @wraps(_ORIGINAL_MODEL_RESUME_STREAM_ASYNC)
1497
+ async def _instrumented_model_resume_stream_async(
1498
+ self: Model,
1499
+ *,
1500
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1501
+ content: UserContent,
1502
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
1503
+ """Returns a GenAI-instrumented result of `Model.resume_stream_async`."""
1504
+ messages = list(response.messages) + [user(content)]
1505
+ span_cm = start_model_span(
1506
+ self,
1507
+ messages=messages,
1508
+ tools=cast(ToolsParam, response.toolkit),
1509
+ format=cast(FormatParam, response.format),
1510
+ activate=False,
1511
+ )
1512
+ span_ctx = span_cm.__enter__()
1513
+ if span_ctx.span is None:
1514
+ result = await _ORIGINAL_MODEL_RESUME_STREAM_ASYNC(
1515
+ self,
1516
+ response=response,
1517
+ content=content,
1518
+ )
1519
+ span_cm.__exit__(None, None, None)
1520
+ return result
1521
+
1522
+ try:
1523
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
1524
+ result = await _ORIGINAL_MODEL_RESUME_STREAM_ASYNC(
1525
+ self,
1526
+ response=response,
1527
+ content=content,
1528
+ )
1529
+ except Exception as exc:
1530
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1531
+ raise
1532
+
1533
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1534
+
1535
+ try:
1536
+ _attach_async_stream_span_handlers(
1537
+ response=cast(
1538
+ "AsyncContextStreamResponse[Any, FormattableT | None]", result
1539
+ ),
1540
+ span_cm=span_cm,
1541
+ span=span_ctx.span,
1542
+ request_messages=messages,
1543
+ )
1544
+ except Exception as exc: # pragma: no cover
1545
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1546
+ raise
1547
+
1548
+ return result
1549
+
1550
+
1551
+ def wrap_model_resume_stream_async() -> None:
1552
+ """Returns None. Replaces `Model.resume_stream_async` with the instrumented wrapper."""
1553
+ global _MODEL_RESUME_STREAM_ASYNC_WRAPPED
1554
+ if _MODEL_RESUME_STREAM_ASYNC_WRAPPED:
1555
+ return
1556
+ Model.resume_stream_async = _instrumented_model_resume_stream_async
1557
+ _MODEL_RESUME_STREAM_ASYNC_WRAPPED = True
1558
+
1559
+
1560
+ def unwrap_model_resume_stream_async() -> None:
1561
+ """Returns None. Restores the original `Model.resume_stream_async` implementation."""
1562
+ global _MODEL_RESUME_STREAM_ASYNC_WRAPPED
1563
+ if not _MODEL_RESUME_STREAM_ASYNC_WRAPPED:
1564
+ return
1565
+ Model.resume_stream_async = _ORIGINAL_MODEL_RESUME_STREAM_ASYNC
1566
+ _MODEL_RESUME_STREAM_ASYNC_WRAPPED = False
1567
+
1568
+
1569
+ # =============================================================================
1570
+ # Model.context_resume_stream instrumentation
1571
+ # =============================================================================
1572
+
1573
+
1574
+ @overload
1575
+ def _instrumented_model_context_resume_stream(
1576
+ self: Model,
1577
+ *,
1578
+ ctx: Context[DepsT],
1579
+ response: ContextStreamResponse[DepsT, None],
1580
+ content: UserContent,
1581
+ ) -> ContextStreamResponse[DepsT, None]: ...
1582
+
1583
+
1584
+ @overload
1585
+ def _instrumented_model_context_resume_stream(
1586
+ self: Model,
1587
+ *,
1588
+ ctx: Context[DepsT],
1589
+ response: ContextStreamResponse[DepsT, FormattableT],
1590
+ content: UserContent,
1591
+ ) -> ContextStreamResponse[DepsT, FormattableT]: ...
1592
+
1593
+
1594
+ @overload
1595
+ def _instrumented_model_context_resume_stream(
1596
+ self: Model,
1597
+ *,
1598
+ ctx: Context[DepsT],
1599
+ response: ContextStreamResponse[DepsT, None]
1600
+ | ContextStreamResponse[DepsT, FormattableT],
1601
+ content: UserContent,
1602
+ ) -> (
1603
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1604
+ ): ...
1605
+
1606
+
1607
+ @wraps(_ORIGINAL_MODEL_CONTEXT_RESUME_STREAM)
1608
+ def _instrumented_model_context_resume_stream(
1609
+ self: Model,
1610
+ *,
1611
+ ctx: Context[DepsT],
1612
+ response: ContextStreamResponse[DepsT, None]
1613
+ | ContextStreamResponse[DepsT, FormattableT],
1614
+ content: UserContent,
1615
+ ) -> ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]:
1616
+ """Returns a GenAI-instrumented result of `Model.context_resume_stream`."""
1617
+ messages = list(response.messages) + [user(content)]
1618
+ span_cm = start_model_span(
1619
+ self,
1620
+ messages=messages,
1621
+ tools=cast(ToolsParam, response.toolkit),
1622
+ format=cast(FormatParam, response.format),
1623
+ activate=False,
1624
+ )
1625
+ span_ctx = span_cm.__enter__()
1626
+ if span_ctx.span is None:
1627
+ result = _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM(
1628
+ self,
1629
+ ctx=ctx,
1630
+ response=response,
1631
+ content=content,
1632
+ )
1633
+ span_cm.__exit__(None, None, None)
1634
+ return result
1635
+
1636
+ try:
1637
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
1638
+ result = _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM(
1639
+ self,
1640
+ ctx=ctx,
1641
+ response=response,
1642
+ content=content,
1643
+ )
1644
+ except Exception as exc:
1645
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1646
+ raise
1647
+
1648
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1649
+
1650
+ try:
1651
+ _attach_stream_span_handlers(
1652
+ response=cast("ContextStreamResponse[Any, FormattableT | None]", result),
1653
+ span_cm=span_cm,
1654
+ span=span_ctx.span,
1655
+ request_messages=messages,
1656
+ )
1657
+ except Exception as exc: # pragma: no cover
1658
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1659
+ raise
1660
+
1661
+ return result
1662
+
1663
+
1664
+ def wrap_model_context_resume_stream() -> None:
1665
+ """Returns None. Replaces `Model.context_resume_stream` with the instrumented wrapper."""
1666
+ global _MODEL_CONTEXT_RESUME_STREAM_WRAPPED
1667
+ if _MODEL_CONTEXT_RESUME_STREAM_WRAPPED:
1668
+ return
1669
+ Model.context_resume_stream = _instrumented_model_context_resume_stream
1670
+ _MODEL_CONTEXT_RESUME_STREAM_WRAPPED = True
1671
+
1672
+
1673
+ def unwrap_model_context_resume_stream() -> None:
1674
+ """Returns None. Restores the original `Model.context_resume_stream` implementation."""
1675
+ global _MODEL_CONTEXT_RESUME_STREAM_WRAPPED
1676
+ if not _MODEL_CONTEXT_RESUME_STREAM_WRAPPED:
1677
+ return
1678
+ Model.context_resume_stream = _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM
1679
+ _MODEL_CONTEXT_RESUME_STREAM_WRAPPED = False
1680
+
1681
+
1682
+ # =============================================================================
1683
+ # Model.context_resume_stream_async instrumentation
1684
+ # =============================================================================
1685
+
1686
+
1687
+ @overload
1688
+ async def _instrumented_model_context_resume_stream_async(
1689
+ self: Model,
1690
+ *,
1691
+ ctx: Context[DepsT],
1692
+ response: AsyncContextStreamResponse[DepsT, None],
1693
+ content: UserContent,
1694
+ ) -> AsyncContextStreamResponse[DepsT, None]: ...
1695
+
1696
+
1697
+ @overload
1698
+ async def _instrumented_model_context_resume_stream_async(
1699
+ self: Model,
1700
+ *,
1701
+ ctx: Context[DepsT],
1702
+ response: AsyncContextStreamResponse[DepsT, FormattableT],
1703
+ content: UserContent,
1704
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]: ...
1705
+
1706
+
1707
+ @overload
1708
+ async def _instrumented_model_context_resume_stream_async(
1709
+ self: Model,
1710
+ *,
1711
+ ctx: Context[DepsT],
1712
+ response: AsyncContextStreamResponse[DepsT, None]
1713
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1714
+ content: UserContent,
1715
+ ) -> (
1716
+ AsyncContextStreamResponse[DepsT, None]
1717
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1718
+ ): ...
1719
+
1720
+
1721
+ @wraps(_ORIGINAL_MODEL_CONTEXT_RESUME_STREAM_ASYNC)
1722
+ async def _instrumented_model_context_resume_stream_async(
1723
+ self: Model,
1724
+ *,
1725
+ ctx: Context[DepsT],
1726
+ response: AsyncContextStreamResponse[DepsT, None]
1727
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1728
+ content: UserContent,
1729
+ ) -> (
1730
+ AsyncContextStreamResponse[DepsT, None]
1731
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1732
+ ):
1733
+ """Returns a GenAI-instrumented result of `Model.context_resume_stream_async`."""
1734
+ messages = list(response.messages) + [user(content)]
1735
+ span_cm = start_model_span(
1736
+ self,
1737
+ messages=messages,
1738
+ tools=cast(ToolsParam, response.toolkit),
1739
+ format=cast(FormatParam, response.format),
1740
+ activate=False,
1741
+ )
1742
+ span_ctx = span_cm.__enter__()
1743
+ if span_ctx.span is None:
1744
+ result = await _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM_ASYNC(
1745
+ self,
1746
+ ctx=ctx,
1747
+ response=response,
1748
+ content=content,
1749
+ )
1750
+ span_cm.__exit__(None, None, None)
1751
+ return result
1752
+
1753
+ try:
1754
+ with otel_trace.use_span(span_ctx.span, end_on_exit=False):
1755
+ result = await _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM_ASYNC(
1756
+ self,
1757
+ ctx=ctx,
1758
+ response=response,
1759
+ content=content,
1760
+ )
1761
+ except Exception as exc:
1762
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1763
+ raise
1764
+
1765
+ record_dropped_params(span_ctx.span, span_ctx.dropped_params)
1766
+
1767
+ try:
1768
+ _attach_async_stream_span_handlers(
1769
+ response=cast(
1770
+ "AsyncContextStreamResponse[Any, FormattableT | None]", result
1771
+ ),
1772
+ span_cm=span_cm,
1773
+ span=span_ctx.span,
1774
+ request_messages=messages,
1775
+ )
1776
+ except Exception as exc: # pragma: no cover
1777
+ span_cm.__exit__(type(exc), exc, exc.__traceback__)
1778
+ raise
1779
+
1780
+ return result
1781
+
1782
+
1783
+ def wrap_model_context_resume_stream_async() -> None:
1784
+ """Returns None. Replaces `Model.context_resume_stream_async` with the instrumented wrapper."""
1785
+ global _MODEL_CONTEXT_RESUME_STREAM_ASYNC_WRAPPED
1786
+ if _MODEL_CONTEXT_RESUME_STREAM_ASYNC_WRAPPED:
1787
+ return
1788
+ Model.context_resume_stream_async = _instrumented_model_context_resume_stream_async
1789
+ _MODEL_CONTEXT_RESUME_STREAM_ASYNC_WRAPPED = True
1790
+
1791
+
1792
+ def unwrap_model_context_resume_stream_async() -> None:
1793
+ """Returns None. Restores the original `Model.context_resume_stream_async` implementation."""
1794
+ global _MODEL_CONTEXT_RESUME_STREAM_ASYNC_WRAPPED
1795
+ if not _MODEL_CONTEXT_RESUME_STREAM_ASYNC_WRAPPED:
1796
+ return
1797
+ Model.context_resume_stream_async = _ORIGINAL_MODEL_CONTEXT_RESUME_STREAM_ASYNC
1798
+ _MODEL_CONTEXT_RESUME_STREAM_ASYNC_WRAPPED = False