mirascope 2.0.1__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mirascope/_stubs.py +39 -18
- mirascope/_utils.py +34 -0
- mirascope/api/_generated/__init__.py +4 -0
- mirascope/api/_generated/organization_invitations/client.py +2 -2
- mirascope/api/_generated/organization_invitations/raw_client.py +2 -2
- mirascope/api/_generated/project_memberships/__init__.py +4 -0
- mirascope/api/_generated/project_memberships/client.py +91 -0
- mirascope/api/_generated/project_memberships/raw_client.py +239 -0
- mirascope/api/_generated/project_memberships/types/__init__.py +4 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
- mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
- mirascope/api/_generated/reference.md +73 -1
- mirascope/llm/__init__.py +19 -0
- mirascope/llm/calls/calls.py +28 -21
- mirascope/llm/calls/decorator.py +17 -24
- mirascope/llm/formatting/__init__.py +2 -2
- mirascope/llm/formatting/format.py +2 -4
- mirascope/llm/formatting/types.py +19 -2
- mirascope/llm/models/models.py +66 -146
- mirascope/llm/prompts/decorator.py +5 -16
- mirascope/llm/prompts/prompts.py +35 -38
- mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -7
- mirascope/llm/providers/anthropic/_utils/beta_encode.py +22 -16
- mirascope/llm/providers/anthropic/_utils/decode.py +45 -7
- mirascope/llm/providers/anthropic/_utils/encode.py +28 -15
- mirascope/llm/providers/anthropic/beta_provider.py +33 -69
- mirascope/llm/providers/anthropic/provider.py +52 -91
- mirascope/llm/providers/base/_utils.py +4 -9
- mirascope/llm/providers/base/base_provider.py +89 -205
- mirascope/llm/providers/google/_utils/decode.py +51 -1
- mirascope/llm/providers/google/_utils/encode.py +38 -21
- mirascope/llm/providers/google/provider.py +33 -69
- mirascope/llm/providers/mirascope/provider.py +25 -61
- mirascope/llm/providers/mlx/encoding/base.py +3 -6
- mirascope/llm/providers/mlx/encoding/transformers.py +4 -8
- mirascope/llm/providers/mlx/mlx.py +9 -21
- mirascope/llm/providers/mlx/provider.py +33 -69
- mirascope/llm/providers/openai/completions/_utils/encode.py +39 -20
- mirascope/llm/providers/openai/completions/base_provider.py +34 -75
- mirascope/llm/providers/openai/provider.py +25 -61
- mirascope/llm/providers/openai/responses/_utils/decode.py +31 -2
- mirascope/llm/providers/openai/responses/_utils/encode.py +32 -17
- mirascope/llm/providers/openai/responses/provider.py +34 -75
- mirascope/llm/responses/__init__.py +2 -1
- mirascope/llm/responses/base_stream_response.py +4 -0
- mirascope/llm/responses/response.py +8 -12
- mirascope/llm/responses/stream_response.py +8 -12
- mirascope/llm/responses/usage.py +44 -0
- mirascope/llm/tools/__init__.py +24 -0
- mirascope/llm/tools/provider_tools.py +18 -0
- mirascope/llm/tools/tool_schema.py +11 -4
- mirascope/llm/tools/toolkit.py +24 -6
- mirascope/llm/tools/types.py +112 -0
- mirascope/llm/tools/web_search_tool.py +32 -0
- mirascope/ops/__init__.py +19 -1
- mirascope/ops/_internal/closure.py +4 -1
- mirascope/ops/_internal/exporters/exporters.py +13 -46
- mirascope/ops/_internal/exporters/utils.py +37 -0
- mirascope/ops/_internal/instrumentation/__init__.py +20 -0
- mirascope/ops/_internal/instrumentation/llm/common.py +19 -49
- mirascope/ops/_internal/instrumentation/llm/model.py +61 -82
- mirascope/ops/_internal/instrumentation/llm/serialize.py +36 -12
- mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
- mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
- mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
- mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
- mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
- mirascope/ops/_internal/traced_calls.py +14 -0
- mirascope/ops/_internal/traced_functions.py +7 -2
- mirascope/ops/_internal/utils.py +12 -4
- mirascope/ops/_internal/versioned_functions.py +1 -1
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/METADATA +96 -68
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/RECORD +75 -64
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/WHEEL +0 -0
- {mirascope-2.0.1.dist-info → mirascope-2.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -17,10 +17,11 @@ from opentelemetry.semconv.attributes import error_attributes as ErrorAttributes
|
|
|
17
17
|
from opentelemetry.trace import SpanKind, Status, StatusCode
|
|
18
18
|
|
|
19
19
|
from .....llm import (
|
|
20
|
-
|
|
20
|
+
AnyTools,
|
|
21
21
|
AnyToolSchema,
|
|
22
22
|
BaseToolkit,
|
|
23
23
|
Format,
|
|
24
|
+
FormatSpec,
|
|
24
25
|
FormattableT,
|
|
25
26
|
Jsonable,
|
|
26
27
|
Message,
|
|
@@ -28,9 +29,9 @@ from .....llm import (
|
|
|
28
29
|
ModelId,
|
|
29
30
|
Params,
|
|
30
31
|
ProviderId,
|
|
32
|
+
ProviderTool,
|
|
31
33
|
RootResponse,
|
|
32
34
|
ToolkitT,
|
|
33
|
-
ToolSchema,
|
|
34
35
|
)
|
|
35
36
|
from ...configuration import get_tracer
|
|
36
37
|
from ...utils import json_dumps
|
|
@@ -45,6 +46,7 @@ from .serialize import (
|
|
|
45
46
|
serialize_mirascope_cost,
|
|
46
47
|
serialize_mirascope_messages,
|
|
47
48
|
serialize_mirascope_usage,
|
|
49
|
+
serialize_tools,
|
|
48
50
|
)
|
|
49
51
|
|
|
50
52
|
logger = logging.getLogger(__name__)
|
|
@@ -64,10 +66,7 @@ else:
|
|
|
64
66
|
Tracer = None
|
|
65
67
|
|
|
66
68
|
|
|
67
|
-
|
|
68
|
-
Sequence[ToolSchema[AnyToolFn]] | BaseToolkit[AnyToolSchema] | None
|
|
69
|
-
)
|
|
70
|
-
FormatParam: TypeAlias = Format[FormattableT] | None
|
|
69
|
+
FormatParam: TypeAlias = FormatSpec[FormattableT] | None
|
|
71
70
|
ParamsDict: TypeAlias = Mapping[str, str | int | float | bool | Sequence[str] | None]
|
|
72
71
|
SpanAttributes: TypeAlias = Mapping[str, AttributeValue]
|
|
73
72
|
AttributeSetter: TypeAlias = Callable[[str, AttributeValue], None]
|
|
@@ -180,53 +179,13 @@ def _assign_request_message_attributes(
|
|
|
180
179
|
)
|
|
181
180
|
|
|
182
181
|
|
|
183
|
-
def _collect_tool_schemas(
|
|
184
|
-
tools: Sequence[ToolSchema[AnyToolFn]] | BaseToolkit[AnyToolSchema],
|
|
185
|
-
) -> list[ToolSchema[AnyToolFn]]:
|
|
186
|
-
"""Collect ToolSchema instances from a tools parameter."""
|
|
187
|
-
iterable = list(tools.tools) if isinstance(tools, BaseToolkit) else list(tools)
|
|
188
|
-
schemas: list[ToolSchema[AnyToolFn]] = []
|
|
189
|
-
for tool in iterable:
|
|
190
|
-
if isinstance(tool, ToolSchema):
|
|
191
|
-
schemas.append(tool)
|
|
192
|
-
return schemas
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
def _serialize_tool_definitions(
|
|
196
|
-
tools: ToolsParam,
|
|
197
|
-
format: FormatParam = None,
|
|
198
|
-
) -> str | None:
|
|
199
|
-
"""Serialize tool definitions to JSON for span attributes."""
|
|
200
|
-
if tools is None:
|
|
201
|
-
tool_schemas: list[ToolSchema[AnyToolFn]] = []
|
|
202
|
-
else:
|
|
203
|
-
tool_schemas = _collect_tool_schemas(tools)
|
|
204
|
-
|
|
205
|
-
if isinstance(format, Format) and format.mode == "tool":
|
|
206
|
-
tool_schemas.append(format.create_tool_schema())
|
|
207
|
-
|
|
208
|
-
if not tool_schemas:
|
|
209
|
-
return None
|
|
210
|
-
definitions: list[dict[str, str | int | bool | dict[str, str | int | bool]]] = []
|
|
211
|
-
for tool in tool_schemas:
|
|
212
|
-
tool_def: dict[str, str | int | bool | dict[str, str | int | bool]] = {
|
|
213
|
-
"name": tool.name,
|
|
214
|
-
"description": tool.description,
|
|
215
|
-
"parameters": tool.parameters.model_dump(by_alias=True, mode="json"),
|
|
216
|
-
}
|
|
217
|
-
if tool.strict is not None:
|
|
218
|
-
tool_def["strict"] = tool.strict
|
|
219
|
-
definitions.append(tool_def)
|
|
220
|
-
return json_dumps(definitions)
|
|
221
|
-
|
|
222
|
-
|
|
223
182
|
def _build_request_attributes(
|
|
224
183
|
*,
|
|
225
184
|
operation: str,
|
|
226
185
|
provider: ProviderId,
|
|
227
186
|
model_id: ModelId,
|
|
228
187
|
messages: Sequence[Message],
|
|
229
|
-
tools:
|
|
188
|
+
tools: AnyTools | None,
|
|
230
189
|
format: FormatParam,
|
|
231
190
|
params: ParamsDict,
|
|
232
191
|
) -> dict[str, AttributeValue]:
|
|
@@ -244,7 +203,18 @@ def _build_request_attributes(
|
|
|
244
203
|
messages=messages,
|
|
245
204
|
)
|
|
246
205
|
|
|
247
|
-
|
|
206
|
+
tool_schemas: list[AnyToolSchema | ProviderTool] = []
|
|
207
|
+
if tools is None:
|
|
208
|
+
tool_schemas = []
|
|
209
|
+
elif isinstance(tools, BaseToolkit):
|
|
210
|
+
tool_schemas = list(tools.tools)
|
|
211
|
+
else:
|
|
212
|
+
tool_schemas = list(tools)
|
|
213
|
+
|
|
214
|
+
if isinstance(format, Format) and format.mode == "tool":
|
|
215
|
+
tool_schemas.append(format.create_tool_schema())
|
|
216
|
+
|
|
217
|
+
tool_payload = serialize_tools(tool_schemas)
|
|
248
218
|
if tool_payload:
|
|
249
219
|
# The incubating semconv module does not yet expose a constant for this key.
|
|
250
220
|
attrs["gen_ai.tool.definitions"] = tool_payload
|
|
@@ -477,7 +447,7 @@ def start_model_span(
|
|
|
477
447
|
model: Model,
|
|
478
448
|
*,
|
|
479
449
|
messages: Sequence[Message],
|
|
480
|
-
tools:
|
|
450
|
+
tools: AnyTools | None,
|
|
481
451
|
format: FormatParam,
|
|
482
452
|
activate: bool = True,
|
|
483
453
|
) -> Iterator[SpanContext]:
|
|
@@ -19,35 +19,30 @@ from opentelemetry import trace as otel_trace
|
|
|
19
19
|
from .....llm import (
|
|
20
20
|
AsyncContextResponse,
|
|
21
21
|
AsyncContextStreamResponse,
|
|
22
|
-
|
|
23
|
-
AsyncContextToolkit,
|
|
22
|
+
AsyncContextTools,
|
|
24
23
|
AsyncResponse,
|
|
25
24
|
AsyncStreamResponse,
|
|
26
|
-
|
|
27
|
-
AsyncToolkit,
|
|
25
|
+
AsyncTools,
|
|
28
26
|
Context,
|
|
29
27
|
ContextResponse,
|
|
30
28
|
ContextStreamResponse,
|
|
31
|
-
|
|
32
|
-
ContextToolkit,
|
|
29
|
+
ContextTools,
|
|
33
30
|
DepsT,
|
|
31
|
+
FormatSpec,
|
|
34
32
|
FormattableT,
|
|
35
33
|
Message,
|
|
36
34
|
Model,
|
|
37
|
-
OutputParser,
|
|
38
35
|
Response,
|
|
39
36
|
RootResponse,
|
|
40
37
|
StreamResponse,
|
|
41
38
|
StreamResponseChunk,
|
|
42
|
-
|
|
43
|
-
Toolkit,
|
|
39
|
+
Tools,
|
|
44
40
|
UserContent,
|
|
45
41
|
)
|
|
46
42
|
from .....llm.messages import promote_to_messages, user
|
|
47
43
|
from .common import (
|
|
48
44
|
FormatParam,
|
|
49
45
|
SpanContext,
|
|
50
|
-
ToolsParam,
|
|
51
46
|
attach_response,
|
|
52
47
|
attach_response_async,
|
|
53
48
|
record_dropped_params,
|
|
@@ -108,7 +103,7 @@ def _instrumented_model_call(
|
|
|
108
103
|
self: Model,
|
|
109
104
|
content: UserContent | Sequence[Message],
|
|
110
105
|
*,
|
|
111
|
-
tools:
|
|
106
|
+
tools: Tools | None = None,
|
|
112
107
|
format: None = None,
|
|
113
108
|
) -> Response: ...
|
|
114
109
|
|
|
@@ -118,8 +113,8 @@ def _instrumented_model_call(
|
|
|
118
113
|
self: Model,
|
|
119
114
|
content: UserContent | Sequence[Message],
|
|
120
115
|
*,
|
|
121
|
-
tools:
|
|
122
|
-
format:
|
|
116
|
+
tools: Tools | None = None,
|
|
117
|
+
format: FormatSpec[FormattableT],
|
|
123
118
|
) -> Response[FormattableT]: ...
|
|
124
119
|
|
|
125
120
|
|
|
@@ -128,8 +123,8 @@ def _instrumented_model_call(
|
|
|
128
123
|
self: Model,
|
|
129
124
|
content: UserContent | Sequence[Message],
|
|
130
125
|
*,
|
|
131
|
-
tools:
|
|
132
|
-
format:
|
|
126
|
+
tools: Tools | None = None,
|
|
127
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
133
128
|
) -> Response | Response[FormattableT]: ...
|
|
134
129
|
|
|
135
130
|
|
|
@@ -138,7 +133,7 @@ def _instrumented_model_call(
|
|
|
138
133
|
self: Model,
|
|
139
134
|
content: UserContent | Sequence[Message],
|
|
140
135
|
*,
|
|
141
|
-
tools:
|
|
136
|
+
tools: Tools | None = None,
|
|
142
137
|
format: FormatParam = None,
|
|
143
138
|
) -> Response | Response[FormattableT]:
|
|
144
139
|
"""Returns a GenAI-instrumented result of `Model.call`."""
|
|
@@ -193,7 +188,7 @@ async def _instrumented_model_call_async(
|
|
|
193
188
|
self: Model,
|
|
194
189
|
content: UserContent | Sequence[Message],
|
|
195
190
|
*,
|
|
196
|
-
tools:
|
|
191
|
+
tools: AsyncTools | None = None,
|
|
197
192
|
format: None = None,
|
|
198
193
|
) -> AsyncResponse: ...
|
|
199
194
|
|
|
@@ -203,8 +198,8 @@ async def _instrumented_model_call_async(
|
|
|
203
198
|
self: Model,
|
|
204
199
|
content: UserContent | Sequence[Message],
|
|
205
200
|
*,
|
|
206
|
-
tools:
|
|
207
|
-
format:
|
|
201
|
+
tools: AsyncTools | None = None,
|
|
202
|
+
format: FormatSpec[FormattableT],
|
|
208
203
|
) -> AsyncResponse[FormattableT]: ...
|
|
209
204
|
|
|
210
205
|
|
|
@@ -213,8 +208,8 @@ async def _instrumented_model_call_async(
|
|
|
213
208
|
self: Model,
|
|
214
209
|
content: UserContent | Sequence[Message],
|
|
215
210
|
*,
|
|
216
|
-
tools:
|
|
217
|
-
format:
|
|
211
|
+
tools: AsyncTools | None = None,
|
|
212
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
218
213
|
) -> AsyncResponse | AsyncResponse[FormattableT]: ...
|
|
219
214
|
|
|
220
215
|
|
|
@@ -223,7 +218,7 @@ async def _instrumented_model_call_async(
|
|
|
223
218
|
self: Model,
|
|
224
219
|
content: UserContent | Sequence[Message],
|
|
225
220
|
*,
|
|
226
|
-
tools:
|
|
221
|
+
tools: AsyncTools | None = None,
|
|
227
222
|
format: FormatParam = None,
|
|
228
223
|
) -> AsyncResponse | AsyncResponse[FormattableT]:
|
|
229
224
|
"""Returns a GenAI-instrumented result of `Model.call_async`."""
|
|
@@ -280,7 +275,7 @@ def _instrumented_model_context_call(
|
|
|
280
275
|
content: UserContent | Sequence[Message],
|
|
281
276
|
*,
|
|
282
277
|
ctx: Context[DepsT],
|
|
283
|
-
tools:
|
|
278
|
+
tools: ContextTools[DepsT] | None = None,
|
|
284
279
|
format: None = None,
|
|
285
280
|
) -> ContextResponse[DepsT, None]: ...
|
|
286
281
|
|
|
@@ -291,8 +286,8 @@ def _instrumented_model_context_call(
|
|
|
291
286
|
content: UserContent | Sequence[Message],
|
|
292
287
|
*,
|
|
293
288
|
ctx: Context[DepsT],
|
|
294
|
-
tools:
|
|
295
|
-
format:
|
|
289
|
+
tools: ContextTools[DepsT] | None = None,
|
|
290
|
+
format: FormatSpec[FormattableT],
|
|
296
291
|
) -> ContextResponse[DepsT, FormattableT]: ...
|
|
297
292
|
|
|
298
293
|
|
|
@@ -302,8 +297,8 @@ def _instrumented_model_context_call(
|
|
|
302
297
|
content: UserContent | Sequence[Message],
|
|
303
298
|
*,
|
|
304
299
|
ctx: Context[DepsT],
|
|
305
|
-
tools:
|
|
306
|
-
format:
|
|
300
|
+
tools: ContextTools[DepsT] | None = None,
|
|
301
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
307
302
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]: ...
|
|
308
303
|
|
|
309
304
|
|
|
@@ -313,7 +308,7 @@ def _instrumented_model_context_call(
|
|
|
313
308
|
content: UserContent | Sequence[Message],
|
|
314
309
|
*,
|
|
315
310
|
ctx: Context[DepsT],
|
|
316
|
-
tools:
|
|
311
|
+
tools: ContextTools[DepsT] | None = None,
|
|
317
312
|
format: FormatParam = None,
|
|
318
313
|
) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
|
|
319
314
|
"""Returns a GenAI-instrumented result of `Model.context_call`."""
|
|
@@ -371,9 +366,7 @@ async def _instrumented_model_context_call_async(
|
|
|
371
366
|
content: UserContent | Sequence[Message],
|
|
372
367
|
*,
|
|
373
368
|
ctx: Context[DepsT],
|
|
374
|
-
tools:
|
|
375
|
-
| AsyncContextToolkit[DepsT]
|
|
376
|
-
| None = None,
|
|
369
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
377
370
|
format: None = None,
|
|
378
371
|
) -> AsyncContextResponse[DepsT, None]: ...
|
|
379
372
|
|
|
@@ -384,10 +377,8 @@ async def _instrumented_model_context_call_async(
|
|
|
384
377
|
content: UserContent | Sequence[Message],
|
|
385
378
|
*,
|
|
386
379
|
ctx: Context[DepsT],
|
|
387
|
-
tools:
|
|
388
|
-
|
|
389
|
-
| None = None,
|
|
390
|
-
format: type[FormattableT] | FormatParam,
|
|
380
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
381
|
+
format: FormatSpec[FormattableT],
|
|
391
382
|
) -> AsyncContextResponse[DepsT, FormattableT]: ...
|
|
392
383
|
|
|
393
384
|
|
|
@@ -397,10 +388,8 @@ async def _instrumented_model_context_call_async(
|
|
|
397
388
|
content: UserContent | Sequence[Message],
|
|
398
389
|
*,
|
|
399
390
|
ctx: Context[DepsT],
|
|
400
|
-
tools:
|
|
401
|
-
|
|
402
|
-
| None = None,
|
|
403
|
-
format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
|
|
391
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
392
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
404
393
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]: ...
|
|
405
394
|
|
|
406
395
|
|
|
@@ -410,9 +399,7 @@ async def _instrumented_model_context_call_async(
|
|
|
410
399
|
content: UserContent | Sequence[Message],
|
|
411
400
|
*,
|
|
412
401
|
ctx: Context[DepsT],
|
|
413
|
-
tools:
|
|
414
|
-
| AsyncContextToolkit[DepsT]
|
|
415
|
-
| None = None,
|
|
402
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
416
403
|
format: FormatParam = None,
|
|
417
404
|
) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
|
|
418
405
|
"""Returns a GenAI-instrumented result of `Model.context_call_async`."""
|
|
@@ -567,7 +554,7 @@ def _instrumented_model_stream(
|
|
|
567
554
|
self: Model,
|
|
568
555
|
content: UserContent | Sequence[Message],
|
|
569
556
|
*,
|
|
570
|
-
tools:
|
|
557
|
+
tools: Tools | None = None,
|
|
571
558
|
format: None = None,
|
|
572
559
|
) -> StreamResponse: ...
|
|
573
560
|
|
|
@@ -577,8 +564,8 @@ def _instrumented_model_stream(
|
|
|
577
564
|
self: Model,
|
|
578
565
|
content: UserContent | Sequence[Message],
|
|
579
566
|
*,
|
|
580
|
-
tools:
|
|
581
|
-
format:
|
|
567
|
+
tools: Tools | None = None,
|
|
568
|
+
format: FormatSpec[FormattableT],
|
|
582
569
|
) -> StreamResponse[FormattableT]: ...
|
|
583
570
|
|
|
584
571
|
|
|
@@ -587,8 +574,8 @@ def _instrumented_model_stream(
|
|
|
587
574
|
self: Model,
|
|
588
575
|
content: UserContent | Sequence[Message],
|
|
589
576
|
*,
|
|
590
|
-
tools:
|
|
591
|
-
format:
|
|
577
|
+
tools: Tools | None = None,
|
|
578
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
592
579
|
) -> StreamResponse | StreamResponse[FormattableT]: ...
|
|
593
580
|
|
|
594
581
|
|
|
@@ -597,7 +584,7 @@ def _instrumented_model_stream(
|
|
|
597
584
|
self: Model,
|
|
598
585
|
content: UserContent | Sequence[Message],
|
|
599
586
|
*,
|
|
600
|
-
tools:
|
|
587
|
+
tools: Tools | None = None,
|
|
601
588
|
format: FormatParam = None,
|
|
602
589
|
) -> StreamResponse | StreamResponse[FormattableT]:
|
|
603
590
|
"""Returns a GenAI-instrumented result of `Model.stream`."""
|
|
@@ -676,7 +663,7 @@ async def _instrumented_model_stream_async(
|
|
|
676
663
|
self: Model,
|
|
677
664
|
content: UserContent | Sequence[Message],
|
|
678
665
|
*,
|
|
679
|
-
tools:
|
|
666
|
+
tools: AsyncTools | None = None,
|
|
680
667
|
format: None = None,
|
|
681
668
|
) -> AsyncStreamResponse: ...
|
|
682
669
|
|
|
@@ -686,8 +673,8 @@ async def _instrumented_model_stream_async(
|
|
|
686
673
|
self: Model,
|
|
687
674
|
content: UserContent | Sequence[Message],
|
|
688
675
|
*,
|
|
689
|
-
tools:
|
|
690
|
-
format:
|
|
676
|
+
tools: AsyncTools | None = None,
|
|
677
|
+
format: FormatSpec[FormattableT],
|
|
691
678
|
) -> AsyncStreamResponse[FormattableT]: ...
|
|
692
679
|
|
|
693
680
|
|
|
@@ -696,8 +683,8 @@ async def _instrumented_model_stream_async(
|
|
|
696
683
|
self: Model,
|
|
697
684
|
content: UserContent | Sequence[Message],
|
|
698
685
|
*,
|
|
699
|
-
tools:
|
|
700
|
-
format:
|
|
686
|
+
tools: AsyncTools | None = None,
|
|
687
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
701
688
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]: ...
|
|
702
689
|
|
|
703
690
|
|
|
@@ -706,7 +693,7 @@ async def _instrumented_model_stream_async(
|
|
|
706
693
|
self: Model,
|
|
707
694
|
content: UserContent | Sequence[Message],
|
|
708
695
|
*,
|
|
709
|
-
tools:
|
|
696
|
+
tools: AsyncTools | None = None,
|
|
710
697
|
format: FormatParam = None,
|
|
711
698
|
) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
|
|
712
699
|
"""Returns a GenAI-instrumented result of `Model.stream_async`."""
|
|
@@ -788,7 +775,7 @@ def _instrumented_model_context_stream(
|
|
|
788
775
|
content: UserContent | Sequence[Message],
|
|
789
776
|
*,
|
|
790
777
|
ctx: Context[DepsT],
|
|
791
|
-
tools:
|
|
778
|
+
tools: ContextTools[DepsT] | None = None,
|
|
792
779
|
format: None = None,
|
|
793
780
|
) -> ContextStreamResponse[DepsT, None]: ...
|
|
794
781
|
|
|
@@ -799,8 +786,8 @@ def _instrumented_model_context_stream(
|
|
|
799
786
|
content: UserContent | Sequence[Message],
|
|
800
787
|
*,
|
|
801
788
|
ctx: Context[DepsT],
|
|
802
|
-
tools:
|
|
803
|
-
format:
|
|
789
|
+
tools: ContextTools[DepsT] | None = None,
|
|
790
|
+
format: FormatSpec[FormattableT],
|
|
804
791
|
) -> ContextStreamResponse[DepsT, FormattableT]: ...
|
|
805
792
|
|
|
806
793
|
|
|
@@ -810,8 +797,8 @@ def _instrumented_model_context_stream(
|
|
|
810
797
|
content: UserContent | Sequence[Message],
|
|
811
798
|
*,
|
|
812
799
|
ctx: Context[DepsT],
|
|
813
|
-
tools:
|
|
814
|
-
format:
|
|
800
|
+
tools: ContextTools[DepsT] | None = None,
|
|
801
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
815
802
|
) -> (
|
|
816
803
|
ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
|
|
817
804
|
): ...
|
|
@@ -823,7 +810,7 @@ def _instrumented_model_context_stream(
|
|
|
823
810
|
content: UserContent | Sequence[Message],
|
|
824
811
|
*,
|
|
825
812
|
ctx: Context[DepsT],
|
|
826
|
-
tools:
|
|
813
|
+
tools: ContextTools[DepsT] | None = None,
|
|
827
814
|
format: FormatParam = None,
|
|
828
815
|
) -> ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]:
|
|
829
816
|
"""Returns a GenAI-instrumented result of `Model.context_stream`."""
|
|
@@ -905,9 +892,7 @@ async def _instrumented_model_context_stream_async(
|
|
|
905
892
|
content: UserContent | Sequence[Message],
|
|
906
893
|
*,
|
|
907
894
|
ctx: Context[DepsT],
|
|
908
|
-
tools:
|
|
909
|
-
| AsyncContextToolkit[DepsT]
|
|
910
|
-
| None = None,
|
|
895
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
911
896
|
format: None = None,
|
|
912
897
|
) -> AsyncContextStreamResponse[DepsT, None]: ...
|
|
913
898
|
|
|
@@ -918,10 +903,8 @@ async def _instrumented_model_context_stream_async(
|
|
|
918
903
|
content: UserContent | Sequence[Message],
|
|
919
904
|
*,
|
|
920
905
|
ctx: Context[DepsT],
|
|
921
|
-
tools:
|
|
922
|
-
|
|
923
|
-
| None = None,
|
|
924
|
-
format: type[FormattableT] | FormatParam,
|
|
906
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
907
|
+
format: FormatSpec[FormattableT],
|
|
925
908
|
) -> AsyncContextStreamResponse[DepsT, FormattableT]: ...
|
|
926
909
|
|
|
927
910
|
|
|
@@ -931,10 +914,8 @@ async def _instrumented_model_context_stream_async(
|
|
|
931
914
|
content: UserContent | Sequence[Message],
|
|
932
915
|
*,
|
|
933
916
|
ctx: Context[DepsT],
|
|
934
|
-
tools:
|
|
935
|
-
|
|
936
|
-
| None = None,
|
|
937
|
-
format: type[FormattableT] | FormatParam | OutputParser[FormattableT] | None = None,
|
|
917
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
918
|
+
format: FormatSpec[FormattableT] | None = None,
|
|
938
919
|
) -> (
|
|
939
920
|
AsyncContextStreamResponse[DepsT, None]
|
|
940
921
|
| AsyncContextStreamResponse[DepsT, FormattableT]
|
|
@@ -947,9 +928,7 @@ async def _instrumented_model_context_stream_async(
|
|
|
947
928
|
content: UserContent | Sequence[Message],
|
|
948
929
|
*,
|
|
949
930
|
ctx: Context[DepsT],
|
|
950
|
-
tools:
|
|
951
|
-
| AsyncContextToolkit[DepsT]
|
|
952
|
-
| None = None,
|
|
931
|
+
tools: AsyncContextTools[DepsT] | None = None,
|
|
953
932
|
format: FormatParam = None,
|
|
954
933
|
) -> (
|
|
955
934
|
AsyncContextStreamResponse[DepsT, None]
|
|
@@ -1067,7 +1046,7 @@ def _instrumented_model_resume(
|
|
|
1067
1046
|
with start_model_span(
|
|
1068
1047
|
self,
|
|
1069
1048
|
messages=messages,
|
|
1070
|
-
tools=
|
|
1049
|
+
tools=response.toolkit,
|
|
1071
1050
|
format=cast(FormatParam, response.format),
|
|
1072
1051
|
) as span_ctx:
|
|
1073
1052
|
result = _ORIGINAL_MODEL_RESUME(
|
|
@@ -1147,7 +1126,7 @@ async def _instrumented_model_resume_async(
|
|
|
1147
1126
|
with start_model_span(
|
|
1148
1127
|
self,
|
|
1149
1128
|
messages=messages,
|
|
1150
|
-
tools=
|
|
1129
|
+
tools=response.toolkit,
|
|
1151
1130
|
format=cast(FormatParam, response.format),
|
|
1152
1131
|
activate=True,
|
|
1153
1132
|
) as span_ctx:
|
|
@@ -1232,7 +1211,7 @@ def _instrumented_model_context_resume(
|
|
|
1232
1211
|
with start_model_span(
|
|
1233
1212
|
self,
|
|
1234
1213
|
messages=messages,
|
|
1235
|
-
tools=
|
|
1214
|
+
tools=response.toolkit,
|
|
1236
1215
|
format=cast(FormatParam, response.format),
|
|
1237
1216
|
activate=True,
|
|
1238
1217
|
) as span_ctx:
|
|
@@ -1320,7 +1299,7 @@ async def _instrumented_model_context_resume_async(
|
|
|
1320
1299
|
with start_model_span(
|
|
1321
1300
|
self,
|
|
1322
1301
|
messages=messages,
|
|
1323
|
-
tools=
|
|
1302
|
+
tools=response.toolkit,
|
|
1324
1303
|
format=cast(FormatParam, response.format),
|
|
1325
1304
|
activate=True,
|
|
1326
1305
|
) as span_ctx:
|
|
@@ -1402,7 +1381,7 @@ def _instrumented_model_resume_stream(
|
|
|
1402
1381
|
span_cm = start_model_span(
|
|
1403
1382
|
self,
|
|
1404
1383
|
messages=messages,
|
|
1405
|
-
tools=
|
|
1384
|
+
tools=response.toolkit,
|
|
1406
1385
|
format=cast(FormatParam, response.format),
|
|
1407
1386
|
activate=False,
|
|
1408
1387
|
)
|
|
@@ -1505,7 +1484,7 @@ async def _instrumented_model_resume_stream_async(
|
|
|
1505
1484
|
span_cm = start_model_span(
|
|
1506
1485
|
self,
|
|
1507
1486
|
messages=messages,
|
|
1508
|
-
tools=
|
|
1487
|
+
tools=response.toolkit,
|
|
1509
1488
|
format=cast(FormatParam, response.format),
|
|
1510
1489
|
activate=False,
|
|
1511
1490
|
)
|
|
@@ -1618,7 +1597,7 @@ def _instrumented_model_context_resume_stream(
|
|
|
1618
1597
|
span_cm = start_model_span(
|
|
1619
1598
|
self,
|
|
1620
1599
|
messages=messages,
|
|
1621
|
-
tools=
|
|
1600
|
+
tools=response.toolkit,
|
|
1622
1601
|
format=cast(FormatParam, response.format),
|
|
1623
1602
|
activate=False,
|
|
1624
1603
|
)
|
|
@@ -1735,7 +1714,7 @@ async def _instrumented_model_context_resume_stream_async(
|
|
|
1735
1714
|
span_cm = start_model_span(
|
|
1736
1715
|
self,
|
|
1737
1716
|
messages=messages,
|
|
1738
|
-
tools=
|
|
1717
|
+
tools=response.toolkit,
|
|
1739
1718
|
format=cast(FormatParam, response.format),
|
|
1740
1719
|
activate=False,
|
|
1741
1720
|
)
|
|
@@ -4,30 +4,33 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
import logging
|
|
6
6
|
from collections.abc import Sequence
|
|
7
|
-
from typing import
|
|
7
|
+
from typing import Any, Protocol
|
|
8
8
|
|
|
9
|
-
from
|
|
9
|
+
from opentelemetry.util.types import AttributeValue
|
|
10
|
+
|
|
11
|
+
from .....llm import (
|
|
12
|
+
AnyToolSchema,
|
|
13
|
+
AssistantMessage,
|
|
10
14
|
Audio,
|
|
11
15
|
Base64ImageSource,
|
|
12
16
|
Document,
|
|
13
17
|
Image,
|
|
18
|
+
Jsonable,
|
|
19
|
+
Message,
|
|
20
|
+
ProviderTool,
|
|
21
|
+
RootResponse,
|
|
22
|
+
SystemMessage,
|
|
14
23
|
Text,
|
|
15
24
|
Thought,
|
|
16
25
|
ToolCall,
|
|
17
26
|
ToolOutput,
|
|
27
|
+
Usage,
|
|
28
|
+
UserMessage,
|
|
18
29
|
)
|
|
19
30
|
from .....llm.content.document import Base64DocumentSource, TextDocumentSource
|
|
20
|
-
from .....llm.messages import AssistantMessage, Message, SystemMessage, UserMessage
|
|
21
|
-
from .....llm.responses.usage import Usage
|
|
22
31
|
from ...utils import json_dumps
|
|
23
32
|
from .cost import calculate_cost_async, calculate_cost_sync
|
|
24
33
|
|
|
25
|
-
if TYPE_CHECKING:
|
|
26
|
-
from opentelemetry.util.types import AttributeValue
|
|
27
|
-
|
|
28
|
-
from .....llm.responses.root_response import RootResponse
|
|
29
|
-
from .....llm.types import Jsonable
|
|
30
|
-
|
|
31
34
|
logger = logging.getLogger(__name__)
|
|
32
35
|
|
|
33
36
|
|
|
@@ -41,7 +44,7 @@ class SpanProtocol(Protocol):
|
|
|
41
44
|
|
|
42
45
|
def _serialize_content_part(
|
|
43
46
|
part: Text | ToolCall | Thought | Image | Audio | Document | ToolOutput[Jsonable],
|
|
44
|
-
) -> dict[str,
|
|
47
|
+
) -> dict[str, Jsonable]:
|
|
45
48
|
"""Serialize a single content part to a dict matching the Mirascope dataclass structure."""
|
|
46
49
|
if isinstance(part, Text):
|
|
47
50
|
return {"type": "text", "text": part.text}
|
|
@@ -116,7 +119,7 @@ def _serialize_content_part(
|
|
|
116
119
|
return {"type": "unknown"} # pragma: no cover
|
|
117
120
|
|
|
118
121
|
|
|
119
|
-
def _serialize_message(message: Message) -> dict[str,
|
|
122
|
+
def _serialize_message(message: Message) -> dict[str, Jsonable]:
|
|
120
123
|
"""Serialize a Message to a dict matching the Mirascope dataclass structure."""
|
|
121
124
|
if isinstance(message, SystemMessage):
|
|
122
125
|
return {
|
|
@@ -166,6 +169,27 @@ def serialize_mirascope_usage(usage: Usage | None) -> AttributeValue | None:
|
|
|
166
169
|
)
|
|
167
170
|
|
|
168
171
|
|
|
172
|
+
def _serialize_tool(tool: AnyToolSchema | ProviderTool) -> dict[str, Jsonable]:
|
|
173
|
+
if isinstance(tool, ProviderTool):
|
|
174
|
+
return {"name": tool.name, "type": "extension"}
|
|
175
|
+
result: dict[str, Jsonable] = {
|
|
176
|
+
"name": tool.name,
|
|
177
|
+
"description": tool.description,
|
|
178
|
+
"type": "function",
|
|
179
|
+
"parameters": tool.parameters.model_dump(by_alias=True, mode="json"),
|
|
180
|
+
}
|
|
181
|
+
if tool.strict is not None:
|
|
182
|
+
result["strict"] = tool.strict
|
|
183
|
+
return result
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def serialize_tools(tools: Sequence[AnyToolSchema | ProviderTool]) -> str | None:
|
|
187
|
+
"""Serialize a sequence of Mirascope tools"""
|
|
188
|
+
if not tools:
|
|
189
|
+
return None
|
|
190
|
+
return json_dumps([_serialize_tool(t) for t in tools])
|
|
191
|
+
|
|
192
|
+
|
|
169
193
|
def serialize_mirascope_cost(
|
|
170
194
|
input_cost: float,
|
|
171
195
|
output_cost: float,
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Provider instrumentation modules."""
|
|
2
|
+
|
|
3
|
+
from .anthropic import (
|
|
4
|
+
instrument_anthropic,
|
|
5
|
+
is_anthropic_instrumented,
|
|
6
|
+
uninstrument_anthropic,
|
|
7
|
+
)
|
|
8
|
+
from .google_genai import (
|
|
9
|
+
instrument_google_genai,
|
|
10
|
+
is_google_genai_instrumented,
|
|
11
|
+
uninstrument_google_genai,
|
|
12
|
+
)
|
|
13
|
+
from .openai import (
|
|
14
|
+
instrument_openai,
|
|
15
|
+
is_openai_instrumented,
|
|
16
|
+
uninstrument_openai,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"instrument_anthropic",
|
|
21
|
+
"instrument_google_genai",
|
|
22
|
+
"instrument_openai",
|
|
23
|
+
"is_anthropic_instrumented",
|
|
24
|
+
"is_google_genai_instrumented",
|
|
25
|
+
"is_openai_instrumented",
|
|
26
|
+
"uninstrument_anthropic",
|
|
27
|
+
"uninstrument_google_genai",
|
|
28
|
+
"uninstrument_openai",
|
|
29
|
+
]
|