pydantic-evals 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pydantic-evals might be problematic. Click here for more details.

@@ -7,7 +7,6 @@ from .common import (
7
7
  LLMJudge,
8
8
  MaxDuration,
9
9
  OutputConfig,
10
- Python,
11
10
  )
12
11
  from .context import EvaluatorContext
13
12
  from .evaluator import EvaluationReason, EvaluationResult, Evaluator, EvaluatorFailure, EvaluatorOutput, EvaluatorSpec
@@ -22,7 +21,6 @@ __all__ = (
22
21
  'LLMJudge',
23
22
  'HasMatchingSpan',
24
23
  'OutputConfig',
25
- 'Python',
26
24
  # context
27
25
  'EvaluatorContext',
28
26
  # evaluator
@@ -34,3 +32,11 @@ __all__ = (
34
32
  'EvaluationReason',
35
33
  'EvaluationResult',
36
34
  )
35
+
36
+
37
+ def __getattr__(name: str):
38
+ if name == 'Python':
39
+ raise ImportError(
40
+ 'The `Python` evaluator has been removed for security reasons. See https://github.com/pydantic/pydantic-ai/pull/2808 for more details and a workaround.'
41
+ )
42
+ raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
@@ -21,7 +21,6 @@ __all__ = (
21
21
  'MaxDuration',
22
22
  'LLMJudge',
23
23
  'HasMatchingSpan',
24
- 'Python',
25
24
  'OutputConfig',
26
25
  )
27
26
 
@@ -268,22 +267,6 @@ class HasMatchingSpan(Evaluator[object, object, object]):
268
267
  return ctx.span_tree.any(self.query)
269
268
 
270
269
 
271
- # TODO: Consider moving this to docs rather than providing it with the library, given the security implications
272
- @dataclass(repr=False)
273
- class Python(Evaluator[object, object, object]):
274
- """The output of this evaluator is the result of evaluating the provided Python expression.
275
-
276
- ***WARNING***: this evaluator runs arbitrary Python code, so you should ***NEVER*** use it with untrusted inputs.
277
- """
278
-
279
- expression: str
280
- evaluation_name: str | None = field(default=None)
281
-
282
- def evaluate(self, ctx: EvaluatorContext[object, object, object]) -> EvaluatorOutput:
283
- # Evaluate the condition, exposing access to the evaluator context as `ctx`.
284
- return eval(self.expression, {'ctx': ctx})
285
-
286
-
287
270
  DEFAULT_EVALUATORS: tuple[type[Evaluator[object, object, object]], ...] = (
288
271
  Equals,
289
272
  EqualsExpected,
@@ -292,5 +275,12 @@ DEFAULT_EVALUATORS: tuple[type[Evaluator[object, object, object]], ...] = (
292
275
  MaxDuration,
293
276
  LLMJudge,
294
277
  HasMatchingSpan,
295
- # Python, # not included by default for security reasons
296
278
  )
279
+
280
+
281
+ def __getattr__(name: str):
282
+ if name == 'Python':
283
+ raise ImportError(
284
+ 'The `Python` evaluator has been removed for security reasons. See https://github.com/pydantic/pydantic-ai/pull/2808 for more details and a workaround.'
285
+ )
286
+ raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pydantic-evals
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: Framework for evaluating stochastic code execution, especially code making use of LLMs
5
5
  Project-URL: Homepage, https://ai.pydantic.dev/evals
6
6
  Project-URL: Source, https://github.com/pydantic/pydantic-ai
@@ -29,9 +29,8 @@ Classifier: Topic :: Internet
29
29
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
30
30
  Requires-Python: >=3.10
31
31
  Requires-Dist: anyio>=0
32
- Requires-Dist: eval-type-backport>=0; python_version < '3.11'
33
32
  Requires-Dist: logfire-api>=3.14.1
34
- Requires-Dist: pydantic-ai-slim==1.0.0
33
+ Requires-Dist: pydantic-ai-slim==1.0.1
35
34
  Requires-Dist: pydantic>=2.10
36
35
  Requires-Dist: pyyaml>=6.0.2
37
36
  Requires-Dist: rich>=13.9.4
@@ -3,9 +3,9 @@ pydantic_evals/_utils.py,sha256=1muGTc2zqjwxqngz6quRSLoZM88onjp0Xgt-a9n2aPQ,4111
3
3
  pydantic_evals/dataset.py,sha256=8rcw_hJb9H01M22NInn-2Pi27xtZgfADUboMCW-nrj4,48468
4
4
  pydantic_evals/generation.py,sha256=Yd1rfbsDjjBBHDk-1KDu48hlITjM2-74rTnPBD_sqbA,3494
5
5
  pydantic_evals/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- pydantic_evals/evaluators/__init__.py,sha256=b5Q7-AC6csZVK8e9t8LwKfJL9Ld80Yro8FS62V8sni4,726
6
+ pydantic_evals/evaluators/__init__.py,sha256=E_JT6o96Ef-oS_IZ1Hyy95NRLwz7EOHewp-o13IdXEM,1032
7
7
  pydantic_evals/evaluators/_run_evaluator.py,sha256=uGmH67gCTeF9BSprCiBC4DtKEpKLrKYaXgsAQiCbCLY,3630
8
- pydantic_evals/evaluators/common.py,sha256=ZBrNTfPJoOpT4WNXTRGS0UcKhnuhfYJxjNzum-zHFk8,12064
8
+ pydantic_evals/evaluators/common.py,sha256=Cc9RMsSf5P2gcq3IDwmZxgfo1xnu7HEehiAS2Hgibz4,11609
9
9
  pydantic_evals/evaluators/context.py,sha256=mTxcm0Hvkev9htpqwoJMCJIqEYBtY5g86SXcjoqQxHY,3884
10
10
  pydantic_evals/evaluators/evaluator.py,sha256=ylfKRytoM9KzbZkSsFkEEnsg4XhK4usuyy1Rb1emoPo,11474
11
11
  pydantic_evals/evaluators/llm_as_a_judge.py,sha256=i20c506j9f5J2VMzPeUky677lfGq27xaZ7xcYIFltiA,9599
@@ -17,7 +17,7 @@ pydantic_evals/otel/_errors.py,sha256=aW1414eTofpA7R_DUgOeT-gj7YA6OXmm8Y4oYeFukD
17
17
  pydantic_evals/otel/span_tree.py,sha256=RzX4VGpEqc2QUhkyxMTXtBRo5yHHO1c0hI7QJJuiXPU,23043
18
18
  pydantic_evals/reporting/__init__.py,sha256=4S8q_KfOflQlJYTISWM1Vp6_wPDHOMjbh9mSc3dU4-8,51562
19
19
  pydantic_evals/reporting/render_numbers.py,sha256=8SKlK3etbD7HnSWWHCE993ceCNLZCepVQ-SsqUIhyxk,6916
20
- pydantic_evals-1.0.0.dist-info/METADATA,sha256=-2N8OIJ4SehYchj-R8G5EGCkVmV_E9qtuIsxxvI7GB0,7906
21
- pydantic_evals-1.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
- pydantic_evals-1.0.0.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
23
- pydantic_evals-1.0.0.dist-info/RECORD,,
20
+ pydantic_evals-1.0.1.dist-info/METADATA,sha256=sXCoSsXg3p6Ww1Lccq8pbidUSkkzxr2D36MeL4Ir4dc,7844
21
+ pydantic_evals-1.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
+ pydantic_evals-1.0.1.dist-info/licenses/LICENSE,sha256=vA6Jc482lEyBBuGUfD1pYx-cM7jxvLYOxPidZ30t_PQ,1100
23
+ pydantic_evals-1.0.1.dist-info/RECORD,,