judgeval 0.16.6__py3-none-any.whl → 0.16.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of judgeval might be problematic. Click here for more details.

Files changed (43) hide show
  1. judgeval/api/api_types.py +1 -2
  2. judgeval/data/judgment_types.py +1 -2
  3. judgeval/tracer/__init__.py +7 -52
  4. judgeval/tracer/llm/config.py +12 -44
  5. judgeval/tracer/llm/constants.py +0 -1
  6. judgeval/tracer/llm/llm_anthropic/config.py +3 -17
  7. judgeval/tracer/llm/llm_anthropic/messages.py +440 -0
  8. judgeval/tracer/llm/llm_anthropic/messages_stream.py +322 -0
  9. judgeval/tracer/llm/llm_anthropic/wrapper.py +40 -621
  10. judgeval/tracer/llm/llm_google/__init__.py +3 -0
  11. judgeval/tracer/llm/llm_google/config.py +3 -21
  12. judgeval/tracer/llm/llm_google/generate_content.py +125 -0
  13. judgeval/tracer/llm/llm_google/wrapper.py +19 -454
  14. judgeval/tracer/llm/llm_openai/beta_chat_completions.py +192 -0
  15. judgeval/tracer/llm/llm_openai/chat_completions.py +437 -0
  16. judgeval/tracer/llm/llm_openai/config.py +3 -29
  17. judgeval/tracer/llm/llm_openai/responses.py +444 -0
  18. judgeval/tracer/llm/llm_openai/wrapper.py +43 -641
  19. judgeval/tracer/llm/llm_together/__init__.py +3 -0
  20. judgeval/tracer/llm/llm_together/chat_completions.py +398 -0
  21. judgeval/tracer/llm/llm_together/config.py +3 -20
  22. judgeval/tracer/llm/llm_together/wrapper.py +34 -485
  23. judgeval/tracer/llm/providers.py +4 -48
  24. judgeval/utils/decorators/dont_throw.py +30 -14
  25. judgeval/utils/wrappers/README.md +3 -0
  26. judgeval/utils/wrappers/__init__.py +15 -0
  27. judgeval/utils/wrappers/immutable_wrap_async.py +74 -0
  28. judgeval/utils/wrappers/immutable_wrap_async_iterator.py +84 -0
  29. judgeval/utils/wrappers/immutable_wrap_sync.py +66 -0
  30. judgeval/utils/wrappers/immutable_wrap_sync_iterator.py +84 -0
  31. judgeval/utils/wrappers/mutable_wrap_async.py +67 -0
  32. judgeval/utils/wrappers/mutable_wrap_sync.py +67 -0
  33. judgeval/utils/wrappers/utils.py +35 -0
  34. judgeval/version.py +1 -1
  35. {judgeval-0.16.6.dist-info → judgeval-0.16.8.dist-info}/METADATA +1 -1
  36. {judgeval-0.16.6.dist-info → judgeval-0.16.8.dist-info}/RECORD +40 -27
  37. judgeval/tracer/llm/llm_groq/config.py +0 -23
  38. judgeval/tracer/llm/llm_groq/wrapper.py +0 -498
  39. judgeval/tracer/local_eval_queue.py +0 -199
  40. /judgeval/{tracer/llm/llm_groq/__init__.py → utils/wrappers/py.typed} +0 -0
  41. {judgeval-0.16.6.dist-info → judgeval-0.16.8.dist-info}/WHEEL +0 -0
  42. {judgeval-0.16.6.dist-info → judgeval-0.16.8.dist-info}/entry_points.txt +0 -0
  43. {judgeval-0.16.6.dist-info → judgeval-0.16.8.dist-info}/licenses/LICENSE.md +0 -0
@@ -1,503 +1,52 @@
1
1
  from __future__ import annotations
2
- import functools
3
- from typing import (
4
- TYPE_CHECKING,
5
- Any,
6
- Callable,
7
- Optional,
8
- Protocol,
9
- Tuple,
10
- Union,
11
- Iterator,
12
- AsyncIterator,
13
- Sequence,
14
- runtime_checkable,
15
- )
2
+ from typing import TYPE_CHECKING, Union
3
+ import typing
16
4
 
17
- from judgeval.tracer.llm.llm_together.config import (
18
- together_Together,
19
- together_AsyncTogether,
5
+ from judgeval.tracer.llm.llm_together.chat_completions import (
6
+ wrap_chat_completions_create_sync,
7
+ wrap_chat_completions_create_async,
20
8
  )
21
- from judgeval.tracer.managers import sync_span_context, async_span_context
22
- from judgeval.logger import judgeval_logger
23
- from judgeval.tracer.keys import AttributeKeys
24
- from judgeval.tracer.utils import set_span_attribute
25
- from judgeval.utils.serialize import safe_serialize
9
+
26
10
 
27
11
  if TYPE_CHECKING:
28
12
  from judgeval.tracer import Tracer
29
- from opentelemetry.trace import Span
30
-
31
- # Keep the original client type for runtime compatibility
32
- TogetherClientType = Union[together_Together, together_AsyncTogether]
33
-
34
-
35
- # Usage protocols
36
- @runtime_checkable
37
- class TogetherUsage(Protocol):
38
- prompt_tokens: Optional[int]
39
- completion_tokens: Optional[int]
40
- total_tokens: Optional[int]
41
-
42
-
43
- # Message protocols
44
- @runtime_checkable
45
- class TogetherMessage(Protocol):
46
- content: Optional[str]
47
- role: str
48
-
49
-
50
- @runtime_checkable
51
- class TogetherChoice(Protocol):
52
- index: int
53
- message: TogetherMessage
54
- finish_reason: Optional[str]
55
-
56
-
57
- @runtime_checkable
58
- class TogetherChatCompletion(Protocol):
59
- id: str
60
- object: str
61
- created: int
62
- model: str
63
- choices: Sequence[TogetherChoice]
64
- usage: Optional[TogetherUsage]
65
-
66
-
67
- # Stream protocols
68
- @runtime_checkable
69
- class TogetherStreamDelta(Protocol):
70
- content: Optional[str]
71
-
72
-
73
- @runtime_checkable
74
- class TogetherStreamChoice(Protocol):
75
- index: int
76
- delta: TogetherStreamDelta
77
-
78
-
79
- @runtime_checkable
80
- class TogetherStreamChunk(Protocol):
81
- choices: Sequence[TogetherStreamChoice]
82
- usage: Optional[TogetherUsage]
83
-
84
-
85
- # Client protocols
86
- @runtime_checkable
87
- class TogetherClient(Protocol):
88
- pass
89
-
90
-
91
- @runtime_checkable
92
- class TogetherAsyncClient(Protocol):
93
- pass
94
-
13
+ from together import Together, AsyncTogether # type: ignore[import-untyped]
95
14
 
96
- # Union types
97
- TogetherResponseType = TogetherChatCompletion
98
- TogetherStreamType = Union[
99
- Iterator[TogetherStreamChunk], AsyncIterator[TogetherStreamChunk]
100
- ]
15
+ TClient = Union[Together, AsyncTogether]
101
16
 
102
17
 
103
- def _extract_together_content(chunk: TogetherStreamChunk) -> str:
104
- if chunk.choices and len(chunk.choices) > 0:
105
- delta_content = chunk.choices[0].delta.content
106
- if delta_content:
107
- return delta_content
108
- return ""
109
-
110
-
111
- def _extract_together_tokens(usage_data: TogetherUsage) -> Tuple[int, int, int, int]:
112
- prompt_tokens = usage_data.prompt_tokens or 0
113
- completion_tokens = usage_data.completion_tokens or 0
114
- cache_read_input_tokens = 0 # Together doesn't support cache tokens
115
- cache_creation_input_tokens = 0 # Together doesn't support cache tokens
116
- return (
117
- prompt_tokens,
118
- completion_tokens,
119
- cache_read_input_tokens,
120
- cache_creation_input_tokens,
121
- )
122
-
123
-
124
- def _format_together_output(
125
- response: TogetherChatCompletion,
126
- ) -> Tuple[Optional[Union[str, list[dict[str, Any]]]], Optional[TogetherUsage]]:
127
- message_content: Optional[Union[str, list[dict[str, Any]]]] = None
128
- usage_data: Optional[TogetherUsage] = None
129
-
130
- try:
131
- if isinstance(response, TogetherChatCompletion):
132
- usage_data = response.usage
133
- if response.choices and len(response.choices) > 0:
134
- content = response.choices[0].message.content
135
- if content:
136
- # Return structured data for consistency with other providers
137
- message_content = [{"type": "text", "text": str(content)}]
138
- except (AttributeError, IndexError, TypeError):
139
- pass
140
-
141
- return message_content, usage_data
142
-
143
-
144
- class TracedTogetherGenerator:
145
- def __init__(
146
- self,
147
- tracer: Tracer,
148
- generator: Iterator[TogetherStreamChunk],
149
- client: TogetherClientType,
150
- span: Span,
151
- model_name: str,
152
- ):
153
- self.tracer = tracer
154
- self.generator = generator
155
- self.client = client
156
- self.span = span
157
- self.model_name = model_name
158
- self.accumulated_content = ""
159
-
160
- def __iter__(self) -> Iterator[TogetherStreamChunk]:
161
- return self
162
-
163
- def __next__(self) -> TogetherStreamChunk:
164
- try:
165
- chunk = next(self.generator)
166
- content = _extract_together_content(chunk)
167
- if content:
168
- self.accumulated_content += content
169
- if chunk.usage:
170
- (
171
- prompt_tokens,
172
- completion_tokens,
173
- cache_read,
174
- cache_creation,
175
- ) = _extract_together_tokens(chunk.usage)
176
- set_span_attribute(
177
- self.span, AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens
178
- )
179
- set_span_attribute(
180
- self.span,
181
- AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
182
- completion_tokens,
183
- )
184
- set_span_attribute(
185
- self.span,
186
- AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
187
- cache_read,
188
- )
189
- set_span_attribute(
190
- self.span,
191
- AttributeKeys.JUDGMENT_USAGE_METADATA,
192
- safe_serialize(chunk.usage),
193
- )
194
- return chunk
195
- except StopIteration:
196
- set_span_attribute(
197
- self.span, AttributeKeys.GEN_AI_COMPLETION, self.accumulated_content
198
- )
199
- self.span.end()
200
- raise
201
- except Exception as e:
202
- if self.span:
203
- self.span.record_exception(e)
204
- self.span.end()
205
- raise
206
-
207
-
208
- class TracedTogetherAsyncGenerator:
209
- def __init__(
210
- self,
211
- tracer: Tracer,
212
- async_generator: AsyncIterator[TogetherStreamChunk],
213
- client: TogetherClientType,
214
- span: Span,
215
- model_name: str,
216
- ):
217
- self.tracer = tracer
218
- self.async_generator = async_generator
219
- self.client = client
220
- self.span = span
221
- self.model_name = model_name
222
- self.accumulated_content = ""
223
-
224
- def __aiter__(self) -> AsyncIterator[TogetherStreamChunk]:
225
- return self
226
-
227
- async def __anext__(self) -> TogetherStreamChunk:
228
- try:
229
- chunk = await self.async_generator.__anext__()
230
- content = _extract_together_content(chunk)
231
- if content:
232
- self.accumulated_content += content
233
- if chunk.usage:
234
- (
235
- prompt_tokens,
236
- completion_tokens,
237
- cache_read,
238
- cache_creation,
239
- ) = _extract_together_tokens(chunk.usage)
240
- set_span_attribute(
241
- self.span, AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens
242
- )
243
- set_span_attribute(
244
- self.span,
245
- AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
246
- completion_tokens,
247
- )
248
- set_span_attribute(
249
- self.span,
250
- AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
251
- cache_read,
252
- )
253
- set_span_attribute(
254
- self.span,
255
- AttributeKeys.JUDGMENT_USAGE_METADATA,
256
- safe_serialize(chunk.usage),
257
- )
258
- return chunk
259
- except StopAsyncIteration:
260
- set_span_attribute(
261
- self.span, AttributeKeys.GEN_AI_COMPLETION, self.accumulated_content
262
- )
263
- self.span.end()
264
- raise
265
- except Exception as e:
266
- if self.span:
267
- self.span.record_exception(e)
268
- self.span.end()
269
- raise
270
-
271
-
272
- def wrap_together_client(
273
- tracer: Tracer, client: TogetherClientType
274
- ) -> TogetherClientType:
275
- def wrapped(function: Callable, span_name: str):
276
- @functools.wraps(function)
277
- def wrapper(*args, **kwargs):
278
- if kwargs.get("stream", False):
279
- span = tracer.get_tracer().start_span(
280
- span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
281
- )
282
- tracer.add_agent_attributes_to_span(span)
283
- set_span_attribute(
284
- span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
285
- )
286
- model_name = kwargs.get("model", "")
287
- # Add together_ai/ prefix for server-side cost calculation
288
- prefixed_model_name = f"together_ai/{model_name}" if model_name else ""
289
- set_span_attribute(
290
- span, AttributeKeys.GEN_AI_REQUEST_MODEL, prefixed_model_name
291
- )
292
- stream_response = function(*args, **kwargs)
293
- return TracedTogetherGenerator(
294
- tracer, stream_response, client, span, model_name
295
- )
296
- else:
297
- with sync_span_context(
298
- tracer, span_name, {AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
299
- ) as span:
300
- try:
301
- tracer.add_agent_attributes_to_span(span)
302
- set_span_attribute(
303
- span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
304
- )
305
- model_name = kwargs.get("model", "")
306
- # Add together_ai/ prefix for server-side cost calculation
307
- prefixed_model_name = (
308
- f"together_ai/{model_name}" if model_name else ""
309
- )
310
- set_span_attribute(
311
- span,
312
- AttributeKeys.GEN_AI_REQUEST_MODEL,
313
- prefixed_model_name,
314
- )
315
- except Exception as e:
316
- judgeval_logger.error(
317
- f"[together wrapped] Error adding span metadata: {e}"
318
- )
319
-
320
- response = function(*args, **kwargs)
18
+ def wrap_together_client_sync(tracer: Tracer, client: Together) -> Together:
19
+ wrap_chat_completions_create_sync(tracer, client)
20
+ return client
321
21
 
322
- try:
323
- if isinstance(response, TogetherChatCompletion):
324
- output, usage_data = _format_together_output(response)
325
- # Serialize structured data to JSON for span attribute
326
- if output:
327
- if isinstance(output, list):
328
- output_str = safe_serialize(output)
329
- else:
330
- output_str = str(output)
331
- set_span_attribute(
332
- span, AttributeKeys.GEN_AI_COMPLETION, output_str
333
- )
334
- if usage_data:
335
- (
336
- prompt_tokens,
337
- completion_tokens,
338
- cache_read,
339
- cache_creation,
340
- ) = _extract_together_tokens(usage_data)
341
- set_span_attribute(
342
- span,
343
- AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS,
344
- prompt_tokens,
345
- )
346
- set_span_attribute(
347
- span,
348
- AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
349
- completion_tokens,
350
- )
351
- set_span_attribute(
352
- span,
353
- AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
354
- cache_read,
355
- )
356
- set_span_attribute(
357
- span,
358
- AttributeKeys.JUDGMENT_USAGE_METADATA,
359
- safe_serialize(usage_data),
360
- )
361
- # Add together_ai/ prefix to response model for server-side cost calculation
362
- response_model = getattr(response, "model", model_name)
363
- prefixed_response_model = (
364
- f"together_ai/{response_model}"
365
- if response_model
366
- else ""
367
- )
368
- set_span_attribute(
369
- span,
370
- AttributeKeys.GEN_AI_RESPONSE_MODEL,
371
- prefixed_response_model,
372
- )
373
- except Exception as e:
374
- judgeval_logger.error(
375
- f"[together wrapped] Error adding span metadata: {e}"
376
- )
377
- finally:
378
- return response
379
22
 
380
- return wrapper
23
+ def wrap_together_client_async(tracer: Tracer, client: AsyncTogether) -> AsyncTogether:
24
+ wrap_chat_completions_create_async(tracer, client)
25
+ return client
381
26
 
382
- def wrapped_async(function: Callable, span_name: str):
383
- @functools.wraps(function)
384
- async def wrapper(*args, **kwargs):
385
- if kwargs.get("stream", False):
386
- span = tracer.get_tracer().start_span(
387
- span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
388
- )
389
- tracer.add_agent_attributes_to_span(span)
390
- set_span_attribute(
391
- span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
392
- )
393
- model_name = kwargs.get("model", "")
394
- # Add together_ai/ prefix for server-side cost calculation
395
- prefixed_model_name = f"together_ai/{model_name}" if model_name else ""
396
- set_span_attribute(
397
- span, AttributeKeys.GEN_AI_REQUEST_MODEL, prefixed_model_name
398
- )
399
- stream_response = await function(*args, **kwargs)
400
- return TracedTogetherAsyncGenerator(
401
- tracer, stream_response, client, span, model_name
402
- )
403
- else:
404
- async with async_span_context(
405
- tracer, span_name, {AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
406
- ) as span:
407
- try:
408
- tracer.add_agent_attributes_to_span(span)
409
- set_span_attribute(
410
- span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
411
- )
412
- model_name = kwargs.get("model", "")
413
- # Add together_ai/ prefix for server-side cost calculation
414
- prefixed_model_name = (
415
- f"together_ai/{model_name}" if model_name else ""
416
- )
417
- set_span_attribute(
418
- span,
419
- AttributeKeys.GEN_AI_REQUEST_MODEL,
420
- prefixed_model_name,
421
- )
422
- except Exception as e:
423
- judgeval_logger.error(
424
- f"[together wrapped_async] Error adding span metadata: {e}"
425
- )
426
27
 
427
- response = await function(*args, **kwargs)
28
+ @typing.overload
29
+ def wrap_together_client(tracer: Tracer, client: Together) -> Together: ...
30
+ @typing.overload
31
+ def wrap_together_client(tracer: Tracer, client: AsyncTogether) -> AsyncTogether: ... # type: ignore[overload-cannot-match]
428
32
 
429
- try:
430
- if isinstance(response, TogetherChatCompletion):
431
- output, usage_data = _format_together_output(response)
432
- # Serialize structured data to JSON for span attribute
433
- if output:
434
- if isinstance(output, list):
435
- output_str = safe_serialize(output)
436
- else:
437
- output_str = str(output)
438
- set_span_attribute(
439
- span, AttributeKeys.GEN_AI_COMPLETION, output_str
440
- )
441
- if usage_data:
442
- (
443
- prompt_tokens,
444
- completion_tokens,
445
- cache_read,
446
- cache_creation,
447
- ) = _extract_together_tokens(usage_data)
448
- set_span_attribute(
449
- span,
450
- AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS,
451
- prompt_tokens,
452
- )
453
- set_span_attribute(
454
- span,
455
- AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
456
- completion_tokens,
457
- )
458
- set_span_attribute(
459
- span,
460
- AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
461
- cache_read,
462
- )
463
- set_span_attribute(
464
- span,
465
- AttributeKeys.JUDGMENT_USAGE_METADATA,
466
- safe_serialize(usage_data),
467
- )
468
- # Add together_ai/ prefix to response model for server-side cost calculation
469
- response_model = getattr(response, "model", model_name)
470
- prefixed_response_model = (
471
- f"together_ai/{response_model}"
472
- if response_model
473
- else ""
474
- )
475
- set_span_attribute(
476
- span,
477
- AttributeKeys.GEN_AI_RESPONSE_MODEL,
478
- prefixed_response_model,
479
- )
480
- except Exception as e:
481
- judgeval_logger.error(
482
- f"[together wrapped_async] Error adding span metadata: {e}"
483
- )
484
- finally:
485
- return response
486
33
 
487
- return wrapper
34
+ def wrap_together_client(tracer: Tracer, client: TClient) -> TClient:
35
+ from judgeval.tracer.llm.llm_together.config import HAS_TOGETHER
36
+ from judgeval.logger import judgeval_logger
488
37
 
489
- span_name = "TOGETHER_API_CALL"
490
- if together_Together and isinstance(client, together_Together):
491
- setattr(
492
- client.chat.completions,
493
- "create",
494
- wrapped(client.chat.completions.create, span_name),
495
- )
496
- elif together_AsyncTogether and isinstance(client, together_AsyncTogether):
497
- setattr(
498
- client.chat.completions,
499
- "create",
500
- wrapped_async(client.chat.completions.create, span_name),
38
+ if not HAS_TOGETHER:
39
+ judgeval_logger.error(
40
+ "Cannot wrap Together client: 'together' library not installed. "
41
+ "Install it with: pip install together"
501
42
  )
43
+ return client
502
44
 
503
- return client
45
+ from together import Together, AsyncTogether # type: ignore[import-untyped]
46
+
47
+ if isinstance(client, AsyncTogether):
48
+ return wrap_together_client_async(tracer, client)
49
+ elif isinstance(client, Together):
50
+ return wrap_together_client_sync(tracer, client)
51
+ else:
52
+ raise TypeError(f"Invalid client type: {type(client)}")
@@ -1,35 +1,10 @@
1
1
  from __future__ import annotations
2
2
  from typing import Any, TypeAlias
3
3
 
4
- from judgeval.tracer.llm.llm_openai.config import (
5
- HAS_OPENAI,
6
- openai_OpenAI,
7
- openai_AsyncOpenAI,
8
- openai_ChatCompletion,
9
- openai_Response,
10
- openai_ParsedChatCompletion,
11
- )
12
- from judgeval.tracer.llm.llm_together.config import (
13
- HAS_TOGETHER,
14
- together_Together,
15
- together_AsyncTogether,
16
- )
17
- from judgeval.tracer.llm.llm_anthropic.config import (
18
- HAS_ANTHROPIC,
19
- anthropic_Anthropic,
20
- anthropic_AsyncAnthropic,
21
- )
22
- from judgeval.tracer.llm.llm_google.config import (
23
- HAS_GOOGLE_GENAI,
24
- google_genai_Client,
25
- google_genai_AsyncClient,
26
- )
27
- from judgeval.tracer.llm.llm_groq.config import (
28
- HAS_GROQ,
29
- groq_Groq,
30
- groq_AsyncGroq,
31
- )
32
-
4
+ from judgeval.tracer.llm.llm_openai.config import HAS_OPENAI
5
+ from judgeval.tracer.llm.llm_together.config import HAS_TOGETHER
6
+ from judgeval.tracer.llm.llm_anthropic.config import HAS_ANTHROPIC
7
+ from judgeval.tracer.llm.llm_google.config import HAS_GOOGLE_GENAI
33
8
 
34
9
  # TODO: if we support dependency groups we can have this better type, but during runtime, we do
35
10
  # not know which clients an end user might have installed.
@@ -37,27 +12,8 @@ ApiClient: TypeAlias = Any
37
12
 
38
13
  __all__ = [
39
14
  "ApiClient",
40
- # OpenAI
41
15
  "HAS_OPENAI",
42
- "openai_OpenAI",
43
- "openai_AsyncOpenAI",
44
- "openai_ChatCompletion",
45
- "openai_Response",
46
- "openai_ParsedChatCompletion",
47
- # Together
48
16
  "HAS_TOGETHER",
49
- "together_Together",
50
- "together_AsyncTogether",
51
- # Anthropic
52
17
  "HAS_ANTHROPIC",
53
- "anthropic_Anthropic",
54
- "anthropic_AsyncAnthropic",
55
- # Google GenAI
56
18
  "HAS_GOOGLE_GENAI",
57
- "google_genai_Client",
58
- "google_genai_AsyncClient",
59
- # Groq
60
- "HAS_GROQ",
61
- "groq_Groq",
62
- "groq_AsyncGroq",
63
19
  ]
@@ -1,21 +1,37 @@
1
+ from functools import wraps
2
+ from typing import Any, Callable, ParamSpec, TypeVar, overload
3
+
1
4
  from judgeval.logger import judgeval_logger
2
5
 
6
+ T = TypeVar("T")
7
+ D = TypeVar("D")
8
+ P = ParamSpec("P")
3
9
 
4
- from functools import wraps
5
- from typing import Callable, TypeVar
6
10
 
7
- T = TypeVar("T")
11
+ @overload
12
+ def dont_throw(func: Callable[P, T], /) -> Callable[P, T | None]: ...
13
+
14
+
15
+ @overload
16
+ def dont_throw(
17
+ func: None = None, /, *, default: D
18
+ ) -> Callable[[Callable[P, T]], Callable[P, T | D]]: ...
19
+
8
20
 
21
+ def dont_throw(func: Callable[P, T] | None = None, /, *, default: Any = None):
22
+ def decorator(f: Callable[P, T]) -> Callable[P, T | Any]:
23
+ @wraps(f)
24
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> T | Any:
25
+ try:
26
+ return f(*args, **kwargs)
27
+ except Exception as e:
28
+ judgeval_logger.debug(
29
+ f"[Caught] An exception was raised in {f.__name__}", exc_info=e
30
+ )
31
+ return default
9
32
 
10
- def dont_throw(func: Callable[..., T]) -> Callable[..., T | None]:
11
- @wraps(func)
12
- def wrapper(*args, **kwargs):
13
- try:
14
- return func(*args, **kwargs)
15
- except Exception as e:
16
- judgeval_logger.warning(
17
- f"An exception was raised in {func.__name__}", exc_info=e
18
- )
19
- pass
33
+ return wrapper
20
34
 
21
- return wrapper
35
+ if func is None:
36
+ return decorator
37
+ return decorator(func)
@@ -0,0 +1,3 @@
1
+ # Wrapper Utilities
2
+
3
+ Ensure 100% test coverage for all files in this folder
@@ -0,0 +1,15 @@
1
+ from .immutable_wrap_sync import immutable_wrap_sync
2
+ from .immutable_wrap_async import immutable_wrap_async
3
+ from .immutable_wrap_sync_iterator import immutable_wrap_sync_iterator
4
+ from .immutable_wrap_async_iterator import immutable_wrap_async_iterator
5
+ from .mutable_wrap_sync import mutable_wrap_sync
6
+ from .mutable_wrap_async import mutable_wrap_async
7
+
8
+ __all__ = [
9
+ "immutable_wrap_sync",
10
+ "immutable_wrap_async",
11
+ "immutable_wrap_sync_iterator",
12
+ "immutable_wrap_async_iterator",
13
+ "mutable_wrap_sync",
14
+ "mutable_wrap_async",
15
+ ]