judgeval 0.15.0__py3-none-any.whl → 0.16.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. judgeval/api/__init__.py +4 -18
  2. judgeval/api/api_types.py +18 -2
  3. judgeval/data/judgment_types.py +18 -2
  4. judgeval/logger.py +1 -1
  5. judgeval/tracer/__init__.py +10 -7
  6. judgeval/tracer/keys.py +7 -3
  7. judgeval/tracer/llm/__init__.py +2 -1227
  8. judgeval/tracer/llm/config.py +110 -0
  9. judgeval/tracer/llm/constants.py +10 -0
  10. judgeval/tracer/llm/llm_anthropic/__init__.py +3 -0
  11. judgeval/tracer/llm/llm_anthropic/wrapper.py +611 -0
  12. judgeval/tracer/llm/llm_google/__init__.py +0 -0
  13. judgeval/tracer/llm/llm_google/config.py +24 -0
  14. judgeval/tracer/llm/llm_google/wrapper.py +426 -0
  15. judgeval/tracer/llm/llm_groq/__init__.py +0 -0
  16. judgeval/tracer/llm/llm_groq/config.py +23 -0
  17. judgeval/tracer/llm/llm_groq/wrapper.py +477 -0
  18. judgeval/tracer/llm/llm_openai/__init__.py +3 -0
  19. judgeval/tracer/llm/llm_openai/wrapper.py +637 -0
  20. judgeval/tracer/llm/llm_together/__init__.py +0 -0
  21. judgeval/tracer/llm/llm_together/config.py +23 -0
  22. judgeval/tracer/llm/llm_together/wrapper.py +478 -0
  23. judgeval/tracer/llm/providers.py +5 -5
  24. judgeval/tracer/processors/__init__.py +1 -1
  25. judgeval/trainer/console.py +1 -1
  26. judgeval/utils/decorators/__init__.py +0 -0
  27. judgeval/utils/decorators/dont_throw.py +21 -0
  28. judgeval/utils/{decorators.py → decorators/use_once.py} +0 -11
  29. judgeval/utils/meta.py +1 -1
  30. judgeval/utils/version_check.py +1 -1
  31. judgeval/version.py +1 -1
  32. judgeval-0.16.1.dist-info/METADATA +266 -0
  33. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/RECORD +38 -24
  34. judgeval/tracer/llm/google/__init__.py +0 -21
  35. judgeval/tracer/llm/groq/__init__.py +0 -20
  36. judgeval/tracer/llm/together/__init__.py +0 -20
  37. judgeval-0.15.0.dist-info/METADATA +0 -158
  38. /judgeval/tracer/llm/{anthropic/__init__.py → llm_anthropic/config.py} +0 -0
  39. /judgeval/tracer/llm/{openai/__init__.py → llm_openai/config.py} +0 -0
  40. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/WHEEL +0 -0
  41. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/entry_points.txt +0 -0
  42. {judgeval-0.15.0.dist-info → judgeval-0.16.1.dist-info}/licenses/LICENSE.md +0 -0
@@ -0,0 +1,478 @@
1
+ from __future__ import annotations
2
+ import functools
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ Callable,
7
+ Optional,
8
+ Protocol,
9
+ Tuple,
10
+ Union,
11
+ Iterator,
12
+ AsyncIterator,
13
+ Sequence,
14
+ runtime_checkable,
15
+ )
16
+
17
+ from judgeval.tracer.llm.llm_together.config import (
18
+ together_Together,
19
+ together_AsyncTogether,
20
+ )
21
+ from judgeval.tracer.managers import sync_span_context, async_span_context
22
+ from judgeval.tracer.keys import AttributeKeys
23
+ from judgeval.tracer.utils import set_span_attribute
24
+ from judgeval.utils.serialize import safe_serialize
25
+
26
+ if TYPE_CHECKING:
27
+ from judgeval.tracer import Tracer
28
+ from opentelemetry.trace import Span
29
+
30
+ # Keep the original client type for runtime compatibility
31
+ TogetherClientType = Union[together_Together, together_AsyncTogether]
32
+
33
+
34
+ # Usage protocols
35
+ @runtime_checkable
36
+ class TogetherUsage(Protocol):
37
+ prompt_tokens: Optional[int]
38
+ completion_tokens: Optional[int]
39
+ total_tokens: Optional[int]
40
+
41
+
42
+ # Message protocols
43
+ @runtime_checkable
44
+ class TogetherMessage(Protocol):
45
+ content: Optional[str]
46
+ role: str
47
+
48
+
49
+ @runtime_checkable
50
+ class TogetherChoice(Protocol):
51
+ index: int
52
+ message: TogetherMessage
53
+ finish_reason: Optional[str]
54
+
55
+
56
+ @runtime_checkable
57
+ class TogetherChatCompletion(Protocol):
58
+ id: str
59
+ object: str
60
+ created: int
61
+ model: str
62
+ choices: Sequence[TogetherChoice]
63
+ usage: Optional[TogetherUsage]
64
+
65
+
66
+ # Stream protocols
67
+ @runtime_checkable
68
+ class TogetherStreamDelta(Protocol):
69
+ content: Optional[str]
70
+
71
+
72
+ @runtime_checkable
73
+ class TogetherStreamChoice(Protocol):
74
+ index: int
75
+ delta: TogetherStreamDelta
76
+
77
+
78
+ @runtime_checkable
79
+ class TogetherStreamChunk(Protocol):
80
+ choices: Sequence[TogetherStreamChoice]
81
+ usage: Optional[TogetherUsage]
82
+
83
+
84
+ # Client protocols
85
+ @runtime_checkable
86
+ class TogetherClient(Protocol):
87
+ pass
88
+
89
+
90
+ @runtime_checkable
91
+ class TogetherAsyncClient(Protocol):
92
+ pass
93
+
94
+
95
+ # Union types
96
+ TogetherResponseType = TogetherChatCompletion
97
+ TogetherStreamType = Union[
98
+ Iterator[TogetherStreamChunk], AsyncIterator[TogetherStreamChunk]
99
+ ]
100
+
101
+
102
+ def _extract_together_content(chunk: TogetherStreamChunk) -> str:
103
+ if chunk.choices and len(chunk.choices) > 0:
104
+ delta_content = chunk.choices[0].delta.content
105
+ if delta_content:
106
+ return delta_content
107
+ return ""
108
+
109
+
110
+ def _extract_together_tokens(usage_data: TogetherUsage) -> Tuple[int, int, int, int]:
111
+ prompt_tokens = usage_data.prompt_tokens or 0
112
+ completion_tokens = usage_data.completion_tokens or 0
113
+ cache_read_input_tokens = 0 # Together doesn't support cache tokens
114
+ cache_creation_input_tokens = 0 # Together doesn't support cache tokens
115
+ return (
116
+ prompt_tokens,
117
+ completion_tokens,
118
+ cache_read_input_tokens,
119
+ cache_creation_input_tokens,
120
+ )
121
+
122
+
123
+ def _format_together_output(
124
+ response: TogetherChatCompletion,
125
+ ) -> Tuple[Optional[Union[str, list[dict[str, Any]]]], Optional[TogetherUsage]]:
126
+ message_content: Optional[Union[str, list[dict[str, Any]]]] = None
127
+ usage_data: Optional[TogetherUsage] = None
128
+
129
+ try:
130
+ if isinstance(response, TogetherChatCompletion):
131
+ usage_data = response.usage
132
+ if response.choices and len(response.choices) > 0:
133
+ content = response.choices[0].message.content
134
+ if content:
135
+ # Return structured data for consistency with other providers
136
+ message_content = [{"type": "text", "text": str(content)}]
137
+ except (AttributeError, IndexError, TypeError):
138
+ pass
139
+
140
+ return message_content, usage_data
141
+
142
+
143
+ class TracedTogetherGenerator:
144
+ def __init__(
145
+ self,
146
+ tracer: Tracer,
147
+ generator: Iterator[TogetherStreamChunk],
148
+ client: TogetherClientType,
149
+ span: Span,
150
+ model_name: str,
151
+ ):
152
+ self.tracer = tracer
153
+ self.generator = generator
154
+ self.client = client
155
+ self.span = span
156
+ self.model_name = model_name
157
+ self.accumulated_content = ""
158
+
159
+ def __iter__(self) -> Iterator[TogetherStreamChunk]:
160
+ return self
161
+
162
+ def __next__(self) -> TogetherStreamChunk:
163
+ try:
164
+ chunk = next(self.generator)
165
+ content = _extract_together_content(chunk)
166
+ if content:
167
+ self.accumulated_content += content
168
+ if chunk.usage:
169
+ (
170
+ prompt_tokens,
171
+ completion_tokens,
172
+ cache_read,
173
+ cache_creation,
174
+ ) = _extract_together_tokens(chunk.usage)
175
+ set_span_attribute(
176
+ self.span, AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens
177
+ )
178
+ set_span_attribute(
179
+ self.span,
180
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
181
+ completion_tokens,
182
+ )
183
+ set_span_attribute(
184
+ self.span,
185
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
186
+ cache_read,
187
+ )
188
+ set_span_attribute(
189
+ self.span,
190
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
191
+ safe_serialize(chunk.usage),
192
+ )
193
+ return chunk
194
+ except StopIteration:
195
+ set_span_attribute(
196
+ self.span, AttributeKeys.GEN_AI_COMPLETION, self.accumulated_content
197
+ )
198
+ self.span.end()
199
+ raise
200
+ except Exception as e:
201
+ if self.span:
202
+ self.span.record_exception(e)
203
+ self.span.end()
204
+ raise
205
+
206
+
207
+ class TracedTogetherAsyncGenerator:
208
+ def __init__(
209
+ self,
210
+ tracer: Tracer,
211
+ async_generator: AsyncIterator[TogetherStreamChunk],
212
+ client: TogetherClientType,
213
+ span: Span,
214
+ model_name: str,
215
+ ):
216
+ self.tracer = tracer
217
+ self.async_generator = async_generator
218
+ self.client = client
219
+ self.span = span
220
+ self.model_name = model_name
221
+ self.accumulated_content = ""
222
+
223
+ def __aiter__(self) -> AsyncIterator[TogetherStreamChunk]:
224
+ return self
225
+
226
+ async def __anext__(self) -> TogetherStreamChunk:
227
+ try:
228
+ chunk = await self.async_generator.__anext__()
229
+ content = _extract_together_content(chunk)
230
+ if content:
231
+ self.accumulated_content += content
232
+ if chunk.usage:
233
+ (
234
+ prompt_tokens,
235
+ completion_tokens,
236
+ cache_read,
237
+ cache_creation,
238
+ ) = _extract_together_tokens(chunk.usage)
239
+ set_span_attribute(
240
+ self.span, AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS, prompt_tokens
241
+ )
242
+ set_span_attribute(
243
+ self.span,
244
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
245
+ completion_tokens,
246
+ )
247
+ set_span_attribute(
248
+ self.span,
249
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
250
+ cache_read,
251
+ )
252
+ set_span_attribute(
253
+ self.span,
254
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
255
+ safe_serialize(chunk.usage),
256
+ )
257
+ return chunk
258
+ except StopAsyncIteration:
259
+ set_span_attribute(
260
+ self.span, AttributeKeys.GEN_AI_COMPLETION, self.accumulated_content
261
+ )
262
+ self.span.end()
263
+ raise
264
+ except Exception as e:
265
+ if self.span:
266
+ self.span.record_exception(e)
267
+ self.span.end()
268
+ raise
269
+
270
+
271
+ def wrap_together_client(
272
+ tracer: Tracer, client: TogetherClientType
273
+ ) -> TogetherClientType:
274
+ def wrapped(function: Callable, span_name: str):
275
+ @functools.wraps(function)
276
+ def wrapper(*args, **kwargs):
277
+ if kwargs.get("stream", False):
278
+ span = tracer.get_tracer().start_span(
279
+ span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
280
+ )
281
+ tracer.add_agent_attributes_to_span(span)
282
+ set_span_attribute(
283
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
284
+ )
285
+ model_name = kwargs.get("model", "")
286
+ # Add together_ai/ prefix for server-side cost calculation
287
+ prefixed_model_name = f"together_ai/{model_name}" if model_name else ""
288
+ set_span_attribute(
289
+ span, AttributeKeys.GEN_AI_REQUEST_MODEL, prefixed_model_name
290
+ )
291
+ stream_response = function(*args, **kwargs)
292
+ return TracedTogetherGenerator(
293
+ tracer, stream_response, client, span, model_name
294
+ )
295
+ else:
296
+ with sync_span_context(
297
+ tracer, span_name, {AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
298
+ ) as span:
299
+ tracer.add_agent_attributes_to_span(span)
300
+ set_span_attribute(
301
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
302
+ )
303
+ model_name = kwargs.get("model", "")
304
+ # Add together_ai/ prefix for server-side cost calculation
305
+ prefixed_model_name = (
306
+ f"together_ai/{model_name}" if model_name else ""
307
+ )
308
+ set_span_attribute(
309
+ span, AttributeKeys.GEN_AI_REQUEST_MODEL, prefixed_model_name
310
+ )
311
+ response = function(*args, **kwargs)
312
+
313
+ if isinstance(response, TogetherChatCompletion):
314
+ output, usage_data = _format_together_output(response)
315
+ # Serialize structured data to JSON for span attribute
316
+ if output:
317
+ if isinstance(output, list):
318
+ import orjson
319
+
320
+ output_str = orjson.dumps(
321
+ output, option=orjson.OPT_INDENT_2
322
+ ).decode()
323
+ else:
324
+ output_str = str(output)
325
+ set_span_attribute(
326
+ span, AttributeKeys.GEN_AI_COMPLETION, output_str
327
+ )
328
+ if usage_data:
329
+ (
330
+ prompt_tokens,
331
+ completion_tokens,
332
+ cache_read,
333
+ cache_creation,
334
+ ) = _extract_together_tokens(usage_data)
335
+ set_span_attribute(
336
+ span,
337
+ AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS,
338
+ prompt_tokens,
339
+ )
340
+ set_span_attribute(
341
+ span,
342
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
343
+ completion_tokens,
344
+ )
345
+ set_span_attribute(
346
+ span,
347
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
348
+ cache_read,
349
+ )
350
+ set_span_attribute(
351
+ span,
352
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
353
+ safe_serialize(usage_data),
354
+ )
355
+ # Add together_ai/ prefix to response model for server-side cost calculation
356
+ response_model = getattr(response, "model", model_name)
357
+ prefixed_response_model = (
358
+ f"together_ai/{response_model}" if response_model else ""
359
+ )
360
+ set_span_attribute(
361
+ span,
362
+ AttributeKeys.GEN_AI_RESPONSE_MODEL,
363
+ prefixed_response_model,
364
+ )
365
+ return response
366
+
367
+ return wrapper
368
+
369
+ def wrapped_async(function: Callable, span_name: str):
370
+ @functools.wraps(function)
371
+ async def wrapper(*args, **kwargs):
372
+ if kwargs.get("stream", False):
373
+ span = tracer.get_tracer().start_span(
374
+ span_name, attributes={AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
375
+ )
376
+ tracer.add_agent_attributes_to_span(span)
377
+ set_span_attribute(
378
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
379
+ )
380
+ model_name = kwargs.get("model", "")
381
+ # Add together_ai/ prefix for server-side cost calculation
382
+ prefixed_model_name = f"together_ai/{model_name}" if model_name else ""
383
+ set_span_attribute(
384
+ span, AttributeKeys.GEN_AI_REQUEST_MODEL, prefixed_model_name
385
+ )
386
+ stream_response = await function(*args, **kwargs)
387
+ return TracedTogetherAsyncGenerator(
388
+ tracer, stream_response, client, span, model_name
389
+ )
390
+ else:
391
+ async with async_span_context(
392
+ tracer, span_name, {AttributeKeys.JUDGMENT_SPAN_KIND: "llm"}
393
+ ) as span:
394
+ tracer.add_agent_attributes_to_span(span)
395
+ set_span_attribute(
396
+ span, AttributeKeys.GEN_AI_PROMPT, safe_serialize(kwargs)
397
+ )
398
+ model_name = kwargs.get("model", "")
399
+ # Add together_ai/ prefix for server-side cost calculation
400
+ prefixed_model_name = (
401
+ f"together_ai/{model_name}" if model_name else ""
402
+ )
403
+ set_span_attribute(
404
+ span, AttributeKeys.GEN_AI_REQUEST_MODEL, prefixed_model_name
405
+ )
406
+ response = await function(*args, **kwargs)
407
+
408
+ if isinstance(response, TogetherChatCompletion):
409
+ output, usage_data = _format_together_output(response)
410
+ # Serialize structured data to JSON for span attribute
411
+ if output:
412
+ if isinstance(output, list):
413
+ import orjson
414
+
415
+ output_str = orjson.dumps(
416
+ output, option=orjson.OPT_INDENT_2
417
+ ).decode()
418
+ else:
419
+ output_str = str(output)
420
+ set_span_attribute(
421
+ span, AttributeKeys.GEN_AI_COMPLETION, output_str
422
+ )
423
+ if usage_data:
424
+ (
425
+ prompt_tokens,
426
+ completion_tokens,
427
+ cache_read,
428
+ cache_creation,
429
+ ) = _extract_together_tokens(usage_data)
430
+ set_span_attribute(
431
+ span,
432
+ AttributeKeys.GEN_AI_USAGE_INPUT_TOKENS,
433
+ prompt_tokens,
434
+ )
435
+ set_span_attribute(
436
+ span,
437
+ AttributeKeys.GEN_AI_USAGE_OUTPUT_TOKENS,
438
+ completion_tokens,
439
+ )
440
+ set_span_attribute(
441
+ span,
442
+ AttributeKeys.GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS,
443
+ cache_read,
444
+ )
445
+ set_span_attribute(
446
+ span,
447
+ AttributeKeys.JUDGMENT_USAGE_METADATA,
448
+ safe_serialize(usage_data),
449
+ )
450
+ # Add together_ai/ prefix to response model for server-side cost calculation
451
+ response_model = getattr(response, "model", model_name)
452
+ prefixed_response_model = (
453
+ f"together_ai/{response_model}" if response_model else ""
454
+ )
455
+ set_span_attribute(
456
+ span,
457
+ AttributeKeys.GEN_AI_RESPONSE_MODEL,
458
+ prefixed_response_model,
459
+ )
460
+ return response
461
+
462
+ return wrapper
463
+
464
+ span_name = "TOGETHER_API_CALL"
465
+ if together_Together and isinstance(client, together_Together):
466
+ setattr(
467
+ client.chat.completions,
468
+ "create",
469
+ wrapped(client.chat.completions.create, span_name),
470
+ )
471
+ elif together_AsyncTogether and isinstance(client, together_AsyncTogether):
472
+ setattr(
473
+ client.chat.completions,
474
+ "create",
475
+ wrapped_async(client.chat.completions.create, span_name),
476
+ )
477
+
478
+ return client
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
  from typing import Any, TypeAlias
3
3
 
4
- from judgeval.tracer.llm.openai import (
4
+ from judgeval.tracer.llm.llm_openai.config import (
5
5
  HAS_OPENAI,
6
6
  openai_OpenAI,
7
7
  openai_AsyncOpenAI,
@@ -9,22 +9,22 @@ from judgeval.tracer.llm.openai import (
9
9
  openai_Response,
10
10
  openai_ParsedChatCompletion,
11
11
  )
12
- from judgeval.tracer.llm.together import (
12
+ from judgeval.tracer.llm.llm_together.config import (
13
13
  HAS_TOGETHER,
14
14
  together_Together,
15
15
  together_AsyncTogether,
16
16
  )
17
- from judgeval.tracer.llm.anthropic import (
17
+ from judgeval.tracer.llm.llm_anthropic.config import (
18
18
  HAS_ANTHROPIC,
19
19
  anthropic_Anthropic,
20
20
  anthropic_AsyncAnthropic,
21
21
  )
22
- from judgeval.tracer.llm.google import (
22
+ from judgeval.tracer.llm.llm_google.config import (
23
23
  HAS_GOOGLE_GENAI,
24
24
  google_genai_Client,
25
25
  google_genai_AsyncClient,
26
26
  )
27
- from judgeval.tracer.llm.groq import (
27
+ from judgeval.tracer.llm.llm_groq.config import (
28
28
  HAS_GROQ,
29
29
  groq_Groq,
30
30
  groq_AsyncGroq,
@@ -10,7 +10,7 @@ from opentelemetry.sdk.trace.export import (
10
10
  from judgeval.tracer.exporters import JudgmentSpanExporter
11
11
  from judgeval.tracer.keys import AttributeKeys, InternalAttributeKeys, ResourceKeys
12
12
  from judgeval.utils.url import url_for
13
- from judgeval.utils.decorators import dont_throw
13
+ from judgeval.utils.decorators.dont_throw import dont_throw
14
14
  from judgeval.version import get_version
15
15
 
16
16
  if TYPE_CHECKING:
@@ -2,7 +2,7 @@ from contextlib import contextmanager
2
2
  from typing import Optional
3
3
  import sys
4
4
  import os
5
- from judgeval.utils.decorators import use_once
5
+ from judgeval.utils.decorators.use_once import use_once
6
6
 
7
7
 
8
8
  @use_once
File without changes
@@ -0,0 +1,21 @@
1
+ from judgeval.logger import judgeval_logger
2
+
3
+
4
+ from functools import wraps
5
+ from typing import Callable, TypeVar
6
+
7
+ T = TypeVar("T")
8
+
9
+
10
+ def dont_throw(func: Callable[..., T]) -> Callable[..., T | None]:
11
+ @wraps(func)
12
+ def wrapper(*args, **kwargs):
13
+ try:
14
+ return func(*args, **kwargs)
15
+ except Exception as e:
16
+ judgeval_logger.debug(
17
+ f"An exception was raised in {func.__name__}", exc_info=e
18
+ )
19
+ pass
20
+
21
+ return wrapper
@@ -11,14 +11,3 @@ def use_once(func: Callable[..., T]) -> Callable[..., T]:
11
11
  return func(*args, **kwargs)
12
12
 
13
13
  return wrapper
14
-
15
-
16
- def dont_throw(func: Callable[..., T]) -> Callable[..., T | None]:
17
- @wraps(func)
18
- def wrapper(*args, **kwargs):
19
- try:
20
- return func(*args, **kwargs)
21
- except Exception:
22
- pass
23
-
24
- return wrapper
judgeval/utils/meta.py CHANGED
@@ -11,7 +11,7 @@ class SingletonMeta(type):
11
11
 
12
12
  _instances: Dict[type, object] = {}
13
13
 
14
- def __call__(cls, *args, **kwargs) -> object:
14
+ def __call__(cls, *args, **kwargs):
15
15
  if cls not in SingletonMeta._instances:
16
16
  SingletonMeta._instances[cls] = super(SingletonMeta, cls).__call__(
17
17
  *args, **kwargs
@@ -2,7 +2,7 @@ import importlib.metadata
2
2
  import httpx
3
3
  import threading
4
4
  from judgeval.logger import judgeval_logger
5
- from judgeval.utils.decorators import use_once
5
+ from judgeval.utils.decorators.use_once import use_once
6
6
 
7
7
 
8
8
  @use_once
judgeval/version.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.15.0"
1
+ __version__ = "0.16.1"
2
2
 
3
3
 
4
4
  def get_version() -> str: