judgeval 0.15.0__py3-none-any.whl → 0.16.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. judgeval/api/__init__.py +4 -18
  2. judgeval/api/api_types.py +18 -2
  3. judgeval/data/judgment_types.py +18 -2
  4. judgeval/logger.py +1 -1
  5. judgeval/tracer/__init__.py +10 -7
  6. judgeval/tracer/keys.py +7 -3
  7. judgeval/tracer/llm/__init__.py +2 -1227
  8. judgeval/tracer/llm/config.py +110 -0
  9. judgeval/tracer/llm/constants.py +10 -0
  10. judgeval/tracer/llm/llm_anthropic/__init__.py +3 -0
  11. judgeval/tracer/llm/llm_anthropic/wrapper.py +611 -0
  12. judgeval/tracer/llm/llm_google/__init__.py +0 -0
  13. judgeval/tracer/llm/llm_google/config.py +24 -0
  14. judgeval/tracer/llm/llm_google/wrapper.py +426 -0
  15. judgeval/tracer/llm/llm_groq/__init__.py +0 -0
  16. judgeval/tracer/llm/llm_groq/config.py +23 -0
  17. judgeval/tracer/llm/llm_groq/wrapper.py +477 -0
  18. judgeval/tracer/llm/llm_openai/__init__.py +3 -0
  19. judgeval/tracer/llm/llm_openai/wrapper.py +637 -0
  20. judgeval/tracer/llm/llm_together/__init__.py +0 -0
  21. judgeval/tracer/llm/llm_together/config.py +23 -0
  22. judgeval/tracer/llm/llm_together/wrapper.py +478 -0
  23. judgeval/tracer/llm/providers.py +5 -5
  24. judgeval/tracer/processors/__init__.py +1 -1
  25. judgeval/trainer/console.py +1 -1
  26. judgeval/utils/decorators/__init__.py +0 -0
  27. judgeval/utils/decorators/dont_throw.py +21 -0
  28. judgeval/utils/{decorators.py → decorators/use_once.py} +0 -11
  29. judgeval/utils/meta.py +1 -1
  30. judgeval/utils/version_check.py +1 -1
  31. judgeval/version.py +1 -1
  32. judgeval-0.16.1.dist-info/METADATA +266 -0
  33. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/RECORD +38 -24
  34. judgeval/tracer/llm/google/__init__.py +0 -21
  35. judgeval/tracer/llm/groq/__init__.py +0 -20
  36. judgeval/tracer/llm/together/__init__.py +0 -20
  37. judgeval-0.15.0.dist-info/METADATA +0 -158
  38. /judgeval/tracer/llm/{anthropic/__init__.py → llm_anthropic/config.py} +0 -0
  39. /judgeval/tracer/llm/{openai/__init__.py → llm_openai/config.py} +0 -0
  40. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/WHEEL +0 -0
  41. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/entry_points.txt +0 -0
  42. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,611 @@
1
+ from __future__ import annotations
2
+ import functools
3
+ import orjson
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Callable,
7
+ Optional,
8
+ Protocol,
9
+ Tuple,
10
+ Union,
11
+ Iterator,
12
+ AsyncIterator,
13
+ Sequence,
14
+ runtime_checkable,
15
+ )
16
+
17
+ from judgeval.tracer.llm.llm_anthropic.config import (
18
+ anthropic_Anthropic,
19
+ anthropic_AsyncAnthropic,
20
+ )
21
+ from judgeval.tracer.managers import sync_span_context, async_span_context
22
+ from judgeval.tracer.keys import AttributeKeys
23
+ from judgeval.tracer.utils import set_span_attribute
24
+ from judgeval.utils.serialize import safe_serialize
25
+
26
+ if TYPE_CHECKING:
27
+ from judgeval.tracer import Tracer
28
+ from opentelemetry.trace import Span
29
+
30
+ from anthropic import Anthropic, AsyncAnthropic
31
+
32
+ # Keep the original client type for runtime compatibility
33
+ AnthropicClientType = Union[Anthropic, AsyncAnthropic]
34
+
35
+
36
+ # Content block protocols
37
+ @runtime_checkable
38
+ class AnthropicContentBlock(Protocol):
39
+ text: str
40
+ type: str
41
+
42
+
43
+ # Usage protocols
44
+ @runtime_checkable
45
+ class AnthropicUsage(Protocol):
46
+ input_tokens: int
47
+ output_tokens: int
48
+ cache_read_input_tokens: Optional[int]
49
+ cache_creation_input_tokens: Optional[int]
50
+
51
+
52
+ # Message protocols
53
+ @runtime_checkable
54
+ class AnthropicMessage(Protocol):
55
+ content: Sequence[AnthropicContentBlock]
56
+ usage: AnthropicUsage
57
+ model: Optional[str]
58
+
59
+
60
+ # Stream event protocols
61
+ @runtime_checkable
62
+ class AnthropicStreamDelta(Protocol):
63
+ text: Optional[str]
64
+
65
+
66
+ @runtime_checkable
67
+ class AnthropicStreamEvent(Protocol):
68
+ type: str
69
+ delta: Optional[AnthropicStreamDelta]
70
+ message: Optional[AnthropicMessage]
71
+ usage: Optional[AnthropicUsage]
72
+
73
+
74
+ # Client protocols
75
+ @runtime_checkable
76
+ class AnthropicClient(Protocol):
77
+ pass
78
+
79
+
80
+ @runtime_checkable
81
+ class AnthropicAsyncClient(Protocol):
82
+ pass
83
+
84
+
85
+ # Union types
86
+ AnthropicResponseType = AnthropicMessage
87
+ AnthropicStreamType = Union[
88
+ Iterator[AnthropicStreamEvent], AsyncIterator[AnthropicStreamEvent]
89
+ ]
90
+
91
+
92
+ def _extract_anthropic_content(chunk: AnthropicStreamEvent) -> str:
93
+ if hasattr(chunk, "delta") and chunk.delta and hasattr(chunk.delta, "text"):
94
+ return chunk.delta.text or ""
95
+
96
+ if isinstance(chunk, AnthropicStreamEvent) and chunk.type == "content_block_delta":
97
+ if chunk.delta and chunk.delta.text:
98
+ return chunk.delta.text
99
+ return ""
100
+
101
+
102
+ def _extract_anthropic_tokens(usage_data: AnthropicUsage) -> Tuple[int, int, int, int]:
103
+ prompt_tokens = usage_data.input_tokens or 0
104
+ completion_tokens = usage_data.output_tokens or 0
105
+ cache_read_input_tokens = usage_data.cache_read_input_tokens or 0
106
+ cache_creation_input_tokens = usage_data.cache_creation_input_tokens or 0
107
+
108
+ return (
109
+ prompt_tokens,
110
+ completion_tokens,
111
+ cache_read_input_tokens,
112
+ cache_creation_input_tokens,
113
+ )
114
+
115
+
116
+ def _extract_anthropic_chunk_usage(
117
+ chunk: AnthropicStreamEvent,
118
+ ) -> Optional[AnthropicUsage]:
119
+ if hasattr(chunk, "usage") and chunk.usage:
120
+ return chunk.usage
121
+
122
+ if isinstance(chunk, AnthropicStreamEvent):
123
+ if chunk.type == "message_start" and chunk.message:
124
+ return chunk.message.usage
125
+ elif chunk.type in ("message_delta", "message_stop"):
126
+ return chunk.usage
127
+ return None
128
+
129
+
130
+ def _format_anthropic_output(
131
+ response: AnthropicMessage,
132
+ ) -> Tuple[Optional[Union[str, list]], Optional[AnthropicUsage]]:
133
+ message_content: Optional[Union[str, list]] = None
134
+ usage_data: Optional[AnthropicUsage] = None
135
+
136
+ try:
137
+ if isinstance(response, AnthropicMessage):
138
+ usage_data = response.usage
139
+ if response.content:
140
+ content_blocks = []
141
+ for block in response.content:
142
+ if isinstance(block, AnthropicContentBlock):
143
+ block_type = getattr(block, "type", None)
144
+ if block_type == "text":
145
+ block_data = {
146
+ "type": "text",
147
+ "text": getattr(block, "text", ""),
148
+ }
149
+ # Add citations if present
150
+ if hasattr(block, "citations"):
151
+ block_data["citations"] = getattr(
152
+ block, "citations", None
153
+ )
154
+ elif block_type == "tool_use":
155
+ block_data = {
156
+ "type": "tool_use",
157
+ "id": getattr(block, "id", None),
158
+ "name": getattr(block, "name", None),
159
+ "input": getattr(block, "input", None),
160
+ }
161
+ elif block_type == "tool_result":
162
+ block_data = {
163
+ "type": "tool_result",
164
+ "tool_use_id": getattr(block, "tool_use_id", None),
165
+ "content": getattr(block, "content", None),
166
+ }
167
+ else:
168
+ # Handle unknown block types
169
+ block_data = {"type": block_type}
170
+ for attr in [
171
+ "id",
172
+ "text",
173
+ "name",
174
+ "input",
175
+ "content",
176
+ "tool_use_id",
177
+ "citations",
178
+ ]:
179
+ if hasattr(block, attr):
180
+ block_data[attr] = getattr(block, attr)
181
+
182
+ content_blocks.append(block_data)
183
+
184
+ # Return structured data instead of string
185
+ message_content = content_blocks if content_blocks else None
186
+ except (AttributeError, IndexError, TypeError):
187
+ pass
188
+
189
+ return message_content, usage_data
190
+
191
+
192
+ class TracedAnthropicGenerator:
193
+ def __init__(
194
+ self,
195
+ tracer: Tracer,
196
+ generator: Iterator[AnthropicStreamEvent],
197
+ client: AnthropicClientType,
198
+ span: Span,
199
+ model_name: str,
200
+ ):
201
+ self.tracer = tracer
202
+ self.generator = generator
203
+ self.client = client
204
+ self.span = span
205
+ self.model_name = model_name
206
+ self.accumulated_content = ""
207
+
208
+ def __iter__(self) -> Iterator[AnthropicStreamEvent]:
209
+ return self
210
+
211
+ def __next__(self) -> AnthropicStreamEvent:
212
+ try:
213
+ chunk = next(self.generator)
214
+ content = _extract_anthropic_content(chunk)
215
+ if content:
216
+ self.accumulated_content += content
217
+
218
+ usage_data = _extract_anthropic_chunk_usage(chunk)
219
+ if usage_data:
220
+ prompt_tokens, completion_tokens, cache_read, cache_creation = (
221
+ _extract_anthropic_tokens(usage_data)
222
+ )
223
+ set_span_attribute(
224
+ self.span, AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens
225
+ )
226
+ set_span_attribute(
227
+ self.span,
228
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
229
+ completion_tokens,
230
+ )
231
+ set_span_attribute(
232
+ self.span,
233
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
234
+ cache_read,
235
+ )
236
+ set_span_attribute(
237
+ self.span,
238
+ AttributeKeys.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS,
239
+ cache_creation,
240
+ )
241
+ set_span_attribute(
242
+ self.span,
243
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
244
+ safe_serialize(usage_data),
245
+ )
246
+ return chunk
247
+ except StopIteration:
248
+ set_span_attribute(
249
+ self.span, AttributeKeys.GEN_AI_COMPLETION, self.accumulated_content
250
+ )
251
+ self.span.end()
252
+ raise
253
+ except Exception as e:
254
+ if self.span:
255
+ self.span.record_exception(e)
256
+ self.span.end()
257
+ raise
258
+
259
+
260
+ class TracedAnthropicAsyncGenerator:
261
+ def __init__(
262
+ self,
263
+ tracer: Tracer,
264
+ async_generator: AsyncIterator[AnthropicStreamEvent],
265
+ client: AnthropicClientType,
266
+ span: Span,
267
+ model_name: str,
268
+ ):
269
+ self.tracer = tracer
270
+ self.async_generator = async_generator
271
+ self.client = client
272
+ self.span = span
273
+ self.model_name = model_name
274
+ self.accumulated_content = ""
275
+
276
+ def __aiter__(self) -> AsyncIterator[AnthropicStreamEvent]:
277
+ return self
278
+
279
+ async def __anext__(self) -> AnthropicStreamEvent:
280
+ try:
281
+ chunk = await self.async_generator.__anext__()
282
+ content = _extract_anthropic_content(chunk)
283
+ if content:
284
+ self.accumulated_content += content
285
+
286
+ usage_data = _extract_anthropic_chunk_usage(chunk)
287
+ if usage_data:
288
+ prompt_tokens, completion_tokens, cache_read, cache_creation = (
289
+ _extract_anthropic_tokens(usage_data)
290
+ )
291
+ set_span_attribute(
292
+ self.span, AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens
293
+ )
294
+ set_span_attribute(
295
+ self.span,
296
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
297
+ completion_tokens,
298
+ )
299
+ set_span_attribute(
300
+ self.span,
301
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
302
+ cache_read,
303
+ )
304
+ set_span_attribute(
305
+ self.span,
306
+ AttributeKeys.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS,
307
+ cache_creation,
308
+ )
309
+ set_span_attribute(
310
+ self.span,
311
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
312
+ safe_serialize(usage_data),
313
+ )
314
+ return chunk
315
+ except StopAsyncIteration:
316
+ set_span_attribute(
317
+ self.span, AttributeKeys.GEN_AI_COMPLETION, self.accumulated_content
318
+ )
319
+ self.span.end()
320
+ raise
321
+ except Exception as e:
322
+ if self.span:
323
+ self.span.record_exception(e)
324
+ self.span.end()
325
+ raise
326
+
327
+
328
+ class TracedAnthropicSyncContextManager:
329
+ def __init__(
330
+ self,
331
+ tracer: Tracer,
332
+ context_manager,
333
+ client: AnthropicClientType,
334
+ span: Span,
335
+ model_name: str,
336
+ ):
337
+ self.tracer = tracer
338
+ self.context_manager = context_manager
339
+ self.client = client
340
+ self.span = span
341
+ self.model_name = model_name
342
+
343
+ def __enter__(self):
344
+ self.stream = self.context_manager.__enter__()
345
+ return TracedAnthropicGenerator(
346
+ self.tracer, self.stream, self.client, self.span, self.model_name
347
+ )
348
+
349
+ def __exit__(self, exc_type, exc_val, exc_tb):
350
+ return self.context_manager.__exit__(exc_type, exc_val, exc_tb)
351
+
352
+
353
+ class TracedAnthropicAsyncContextManager:
354
+ def __init__(
355
+ self,
356
+ tracer: Tracer,
357
+ context_manager,
358
+ client: AnthropicClientType,
359
+ span: Span,
360
+ model_name: str,
361
+ ):
362
+ self.tracer = tracer
363
+ self.context_manager = context_manager
364
+ self.client = client
365
+ self.span = span
366
+ self.model_name = model_name
367
+
368
+ async def __aenter__(self):
369
+ self.stream = await self.context_manager.__aenter__()
370
+ return TracedAnthropicAsyncGenerator(
371
+ self.tracer, self.stream, self.client, self.span, self.model_name
372
+ )
373
+
374
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
375
+ return await self.context_manager.__aexit__(exc_type, exc_val, exc_tb)
376
+
377
+
378
+ def wrap_anthropic_client(
379
+ tracer: Tracer, client: AnthropicClientType
380
+ ) -> AnthropicClientType:
381
+ def wrapped(function: Callable, span_name: str):
382
+ @functools.wraps(function)
383
+ def wrapper(*args, **kwargs):
384
+ if kwargs.get("stream", False):
385
+ span = tracer.get_tracer().start_span(
386
+ span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
387
+ )
388
+ tracer.add_agent_attributes_to_span(span)
389
+ set_span_attribute(
390
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
391
+ )
392
+ model_name = kwargs.get("model", "")
393
+ set_span_attribute(span, AttributeKeys.GEN_AI_REQUEST_MODEL, model_name)
394
+ stream_response = function(*args, **kwargs)
395
+ return TracedAnthropicGenerator(
396
+ tracer, stream_response, client, span, model_name
397
+ )
398
+ else:
399
+ with sync_span_context(
400
+ tracer, span_name, {AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
401
+ ) as span:
402
+ tracer.add_agent_attributes_to_span(span)
403
+ set_span_attribute(
404
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
405
+ )
406
+ model_name = kwargs.get("model", "")
407
+ set_span_attribute(
408
+ span, AttributeKeys.GEN_AI_REQUEST_MODEL, model_name
409
+ )
410
+
411
+ response = function(*args, **kwargs)
412
+
413
+ if isinstance(response, AnthropicMessage):
414
+ output, usage_data = _format_anthropic_output(response)
415
+ # Serialize structured data to JSON for span attribute
416
+ if isinstance(output, list):
417
+ output_str = orjson.dumps(
418
+ output, option=orjson.OPT_INDENT_2
419
+ ).decode()
420
+ else:
421
+ output_str = str(output) if output is not None else None
422
+ set_span_attribute(
423
+ span, AttributeKeys.GEN_AI_COMPLETION, output_str
424
+ )
425
+
426
+ if usage_data:
427
+ (
428
+ prompt_tokens,
429
+ completion_tokens,
430
+ cache_read,
431
+ cache_creation,
432
+ ) = _extract_anthropic_tokens(usage_data)
433
+ set_span_attribute(
434
+ span,
435
+ AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS,
436
+ prompt_tokens,
437
+ )
438
+ set_span_attribute(
439
+ span,
440
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
441
+ completion_tokens,
442
+ )
443
+ set_span_attribute(
444
+ span,
445
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
446
+ cache_read,
447
+ )
448
+ set_span_attribute(
449
+ span,
450
+ AttributeKeys.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS,
451
+ cache_creation,
452
+ )
453
+ set_span_attribute(
454
+ span,
455
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
456
+ safe_serialize(usage_data),
457
+ )
458
+ set_span_attribute(
459
+ span,
460
+ AttributeKeys.GEN_AI_RESPONSE_MODEL,
461
+ getattr(response, "model", model_name),
462
+ )
463
+ return response
464
+
465
+ return wrapper
466
+
467
+ def wrapped_async(function: Callable, span_name: str):
468
+ @functools.wraps(function)
469
+ async def wrapper(*args, **kwargs):
470
+ if kwargs.get("stream", False):
471
+ span = tracer.get_tracer().start_span(
472
+ span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
473
+ )
474
+ tracer.add_agent_attributes_to_span(span)
475
+ set_span_attribute(
476
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
477
+ )
478
+ model_name = kwargs.get("model", "")
479
+ set_span_attribute(span, AttributeKeys.GEN_AI_REQUEST_MODEL, model_name)
480
+ stream_response = await function(*args, **kwargs)
481
+ return TracedAnthropicAsyncGenerator(
482
+ tracer, stream_response, client, span, model_name
483
+ )
484
+ else:
485
+ async with async_span_context(
486
+ tracer, span_name, {AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
487
+ ) as span:
488
+ tracer.add_agent_attributes_to_span(span)
489
+ set_span_attribute(
490
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
491
+ )
492
+ model_name = kwargs.get("model", "")
493
+ set_span_attribute(
494
+ span, AttributeKeys.GEN_AI_REQUEST_MODEL, model_name
495
+ )
496
+
497
+ response = await function(*args, **kwargs)
498
+
499
+ if isinstance(response, AnthropicMessage):
500
+ output, usage_data = _format_anthropic_output(response)
501
+ # Serialize structured data to JSON for span attribute
502
+ if isinstance(output, list):
503
+ output_str = orjson.dumps(
504
+ output, option=orjson.OPT_INDENT_2
505
+ ).decode()
506
+ else:
507
+ output_str = str(output) if output is not None else None
508
+ set_span_attribute(
509
+ span, AttributeKeys.GEN_AI_COMPLETION, output_str
510
+ )
511
+
512
+ if usage_data:
513
+ (
514
+ prompt_tokens,
515
+ completion_tokens,
516
+ cache_read,
517
+ cache_creation,
518
+ ) = _extract_anthropic_tokens(usage_data)
519
+ set_span_attribute(
520
+ span,
521
+ AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS,
522
+ prompt_tokens,
523
+ )
524
+ set_span_attribute(
525
+ span,
526
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
527
+ completion_tokens,
528
+ )
529
+ set_span_attribute(
530
+ span,
531
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
532
+ cache_read,
533
+ )
534
+ set_span_attribute(
535
+ span,
536
+ AttributeKeys.GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS,
537
+ cache_creation,
538
+ )
539
+ set_span_attribute(
540
+ span,
541
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
542
+ safe_serialize(usage_data),
543
+ )
544
+ set_span_attribute(
545
+ span,
546
+ AttributeKeys.GEN_AI_RESPONSE_MODEL,
547
+ getattr(response, "model", model_name),
548
+ )
549
+ return response
550
+
551
+ return wrapper
552
+
553
+ def wrapped_sync_context_manager(function, span_name: str):
554
+ @functools.wraps(function)
555
+ def wrapper(*args, **kwargs):
556
+ span = tracer.get_tracer().start_span(
557
+ span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
558
+ )
559
+ tracer.add_agent_attributes_to_span(span)
560
+ set_span_attribute(
561
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
562
+ )
563
+ model_name = kwargs.get("model", "")
564
+ set_span_attribute(span, AttributeKeys.GEN_AI_REQUEST_MODEL, model_name)
565
+
566
+ original_context_manager = function(*args, **kwargs)
567
+ return TracedAnthropicSyncContextManager(
568
+ tracer, original_context_manager, client, span, model_name
569
+ )
570
+
571
+ return wrapper
572
+
573
+ def wrapped_async_context_manager(function, span_name: str):
574
+ @functools.wraps(function)
575
+ def wrapper(*args, **kwargs):
576
+ span = tracer.get_tracer().start_span(
577
+ span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
578
+ )
579
+ tracer.add_agent_attributes_to_span(span)
580
+ set_span_attribute(
581
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
582
+ )
583
+ model_name = kwargs.get("model", "")
584
+ set_span_attribute(span, AttributeKeys.GEN_AI_REQUEST_MODEL, model_name)
585
+
586
+ original_context_manager = function(*args, **kwargs)
587
+ return TracedAnthropicAsyncContextManager(
588
+ tracer, original_context_manager, client, span, model_name
589
+ )
590
+
591
+ return wrapper
592
+
593
+ span_name = "ANTHROPIC_API_CALL"
594
+ if anthropic_Anthropic and isinstance(client, anthropic_Anthropic):
595
+ setattr(client.messages, "create", wrapped(client.messages.create, span_name))
596
+ setattr(
597
+ client.messages,
598
+ "stream",
599
+ wrapped_sync_context_manager(client.messages.stream, span_name),
600
+ )
601
+ elif anthropic_AsyncAnthropic and isinstance(client, anthropic_AsyncAnthropic):
602
+ setattr(
603
+ client.messages, "create", wrapped_async(client.messages.create, span_name)
604
+ )
605
+ setattr(
606
+ client.messages,
607
+ "stream",
608
+ wrapped_async_context_manager(client.messages.stream, span_name),
609
+ )
610
+
611
+ return client
File without changes
@@ -0,0 +1,24 @@
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING
3
+
4
+ if TYPE_CHECKING:
5
+ from google.genai import Client
6
+ from google.genai.client import AsyncClient
7
+
8
+ try:
9
+ from google.genai import Client
10
+ from google.genai.client import AsyncClient
11
+
12
+ HAS_GOOGLE_GENAI = True
13
+ except ImportError:
14
+ HAS_GOOGLE_GENAI = False
15
+ Client = AsyncClient = None # type: ignore[misc,assignment]
16
+
17
+ google_genai_Client = Client
18
+ google_genai_AsyncClient = AsyncClient
19
+
20
+ __all__ = [
21
+ "HAS_GOOGLE_GENAI",
22
+ "google_genai_Client",
23
+ "google_genai_AsyncClient",
24
+ ]