deepeval 3.7.4__py3-none-any.whl → 3.7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/dataset/golden.py +54 -2
- deepeval/evaluate/evaluate.py +16 -8
- deepeval/evaluate/execute.py +70 -26
- deepeval/evaluate/utils.py +26 -22
- deepeval/integrations/pydantic_ai/agent.py +19 -2
- deepeval/integrations/pydantic_ai/instrumentator.py +62 -23
- deepeval/metrics/__init__.py +14 -12
- deepeval/metrics/answer_relevancy/answer_relevancy.py +74 -29
- deepeval/metrics/answer_relevancy/template.py +188 -92
- deepeval/metrics/base_metric.py +2 -5
- deepeval/metrics/contextual_precision/contextual_precision.py +53 -15
- deepeval/metrics/contextual_precision/template.py +115 -66
- deepeval/metrics/contextual_recall/contextual_recall.py +50 -13
- deepeval/metrics/contextual_recall/template.py +106 -55
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +47 -15
- deepeval/metrics/contextual_relevancy/template.py +87 -58
- deepeval/metrics/dag/templates.py +2 -2
- deepeval/metrics/faithfulness/faithfulness.py +70 -27
- deepeval/metrics/faithfulness/schema.py +1 -1
- deepeval/metrics/faithfulness/template.py +200 -115
- deepeval/metrics/g_eval/utils.py +2 -2
- deepeval/metrics/indicator.py +4 -4
- deepeval/metrics/multimodal_metrics/__init__.py +0 -18
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +24 -17
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +26 -21
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +24 -17
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +24 -17
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +19 -19
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +63 -78
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/utils.py +20 -20
- deepeval/metrics/multimodal_metrics/text_to_image/text_to_image.py +71 -50
- deepeval/metrics/ragas.py +3 -3
- deepeval/metrics/tool_correctness/tool_correctness.py +2 -2
- deepeval/metrics/turn_contextual_precision/schema.py +21 -0
- deepeval/metrics/turn_contextual_precision/template.py +187 -0
- deepeval/metrics/turn_contextual_precision/turn_contextual_precision.py +550 -0
- deepeval/metrics/turn_contextual_recall/schema.py +21 -0
- deepeval/metrics/turn_contextual_recall/template.py +178 -0
- deepeval/metrics/turn_contextual_recall/turn_contextual_recall.py +520 -0
- deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_contextual_relevancy}/schema.py +7 -1
- deepeval/metrics/turn_contextual_relevancy/template.py +161 -0
- deepeval/metrics/turn_contextual_relevancy/turn_contextual_relevancy.py +535 -0
- deepeval/metrics/{multimodal_metrics/multimodal_faithfulness → turn_faithfulness}/schema.py +11 -3
- deepeval/metrics/turn_faithfulness/template.py +218 -0
- deepeval/metrics/turn_faithfulness/turn_faithfulness.py +596 -0
- deepeval/metrics/utils.py +39 -58
- deepeval/models/__init__.py +0 -12
- deepeval/models/base_model.py +16 -38
- deepeval/models/embedding_models/__init__.py +7 -0
- deepeval/models/embedding_models/azure_embedding_model.py +52 -28
- deepeval/models/embedding_models/local_embedding_model.py +18 -14
- deepeval/models/embedding_models/ollama_embedding_model.py +38 -16
- deepeval/models/embedding_models/openai_embedding_model.py +40 -21
- deepeval/models/llms/amazon_bedrock_model.py +1 -2
- deepeval/models/llms/anthropic_model.py +44 -23
- deepeval/models/llms/azure_model.py +121 -36
- deepeval/models/llms/deepseek_model.py +18 -13
- deepeval/models/llms/gemini_model.py +129 -43
- deepeval/models/llms/grok_model.py +18 -13
- deepeval/models/llms/kimi_model.py +18 -13
- deepeval/models/llms/litellm_model.py +42 -22
- deepeval/models/llms/local_model.py +12 -7
- deepeval/models/llms/ollama_model.py +114 -12
- deepeval/models/llms/openai_model.py +137 -41
- deepeval/models/llms/portkey_model.py +24 -7
- deepeval/models/llms/utils.py +5 -3
- deepeval/models/retry_policy.py +17 -14
- deepeval/models/utils.py +46 -1
- deepeval/optimizer/__init__.py +5 -0
- deepeval/optimizer/algorithms/__init__.py +6 -0
- deepeval/optimizer/algorithms/base.py +29 -0
- deepeval/optimizer/algorithms/configs.py +18 -0
- deepeval/optimizer/algorithms/copro/__init__.py +5 -0
- deepeval/{optimization/copro/loop.py → optimizer/algorithms/copro/copro.py} +112 -113
- deepeval/optimizer/algorithms/gepa/__init__.py +5 -0
- deepeval/{optimization/gepa/loop.py → optimizer/algorithms/gepa/gepa.py} +175 -115
- deepeval/optimizer/algorithms/miprov2/__init__.py +17 -0
- deepeval/optimizer/algorithms/miprov2/bootstrapper.py +435 -0
- deepeval/optimizer/algorithms/miprov2/miprov2.py +752 -0
- deepeval/optimizer/algorithms/miprov2/proposer.py +301 -0
- deepeval/optimizer/algorithms/simba/__init__.py +5 -0
- deepeval/{optimization/simba/loop.py → optimizer/algorithms/simba/simba.py} +128 -112
- deepeval/{optimization → optimizer}/configs.py +5 -8
- deepeval/{optimization/policies/selection.py → optimizer/policies.py} +63 -2
- deepeval/optimizer/prompt_optimizer.py +263 -0
- deepeval/optimizer/rewriter/__init__.py +5 -0
- deepeval/optimizer/rewriter/rewriter.py +124 -0
- deepeval/optimizer/rewriter/utils.py +214 -0
- deepeval/optimizer/scorer/__init__.py +5 -0
- deepeval/optimizer/scorer/base.py +86 -0
- deepeval/optimizer/scorer/scorer.py +316 -0
- deepeval/optimizer/scorer/utils.py +30 -0
- deepeval/optimizer/types.py +148 -0
- deepeval/{optimization → optimizer}/utils.py +47 -165
- deepeval/prompt/prompt.py +5 -9
- deepeval/test_case/__init__.py +1 -3
- deepeval/test_case/api.py +12 -10
- deepeval/test_case/conversational_test_case.py +19 -1
- deepeval/test_case/llm_test_case.py +152 -1
- deepeval/test_case/utils.py +4 -8
- deepeval/test_run/api.py +15 -14
- deepeval/test_run/test_run.py +3 -3
- deepeval/tracing/patchers.py +9 -4
- deepeval/tracing/tracing.py +2 -2
- deepeval/utils.py +65 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/METADATA +1 -4
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/RECORD +116 -125
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +0 -343
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/schema.py +0 -19
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +0 -122
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +0 -301
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +0 -132
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +0 -285
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/schema.py +0 -15
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +0 -112
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +0 -282
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +0 -102
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +0 -356
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +0 -175
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/__init__.py +0 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +0 -290
- deepeval/models/mlllms/__init__.py +0 -4
- deepeval/models/mlllms/azure_model.py +0 -343
- deepeval/models/mlllms/gemini_model.py +0 -313
- deepeval/models/mlllms/ollama_model.py +0 -175
- deepeval/models/mlllms/openai_model.py +0 -309
- deepeval/optimization/__init__.py +0 -13
- deepeval/optimization/adapters/__init__.py +0 -2
- deepeval/optimization/adapters/deepeval_scoring_adapter.py +0 -588
- deepeval/optimization/aggregates.py +0 -14
- deepeval/optimization/copro/configs.py +0 -31
- deepeval/optimization/gepa/__init__.py +0 -7
- deepeval/optimization/gepa/configs.py +0 -115
- deepeval/optimization/miprov2/configs.py +0 -134
- deepeval/optimization/miprov2/loop.py +0 -785
- deepeval/optimization/mutations/__init__.py +0 -0
- deepeval/optimization/mutations/prompt_rewriter.py +0 -458
- deepeval/optimization/policies/__init__.py +0 -16
- deepeval/optimization/policies/tie_breaker.py +0 -67
- deepeval/optimization/prompt_optimizer.py +0 -462
- deepeval/optimization/simba/__init__.py +0 -0
- deepeval/optimization/simba/configs.py +0 -33
- deepeval/optimization/types.py +0 -361
- deepeval/test_case/mllm_test_case.py +0 -170
- /deepeval/metrics/{multimodal_metrics/multimodal_answer_relevancy → turn_contextual_precision}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_precision → turn_contextual_recall}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_recall → turn_contextual_relevancy}/__init__.py +0 -0
- /deepeval/metrics/{multimodal_metrics/multimodal_contextual_relevancy → turn_faithfulness}/__init__.py +0 -0
- /deepeval/{optimization → optimizer/algorithms}/simba/types.py +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/WHEEL +0 -0
- {deepeval-3.7.4.dist-info → deepeval-3.7.5.dist-info}/entry_points.txt +0 -0
|
@@ -1,115 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
import time
|
|
3
|
-
from typing import Optional
|
|
4
|
-
from pydantic import (
|
|
5
|
-
BaseModel,
|
|
6
|
-
confloat,
|
|
7
|
-
conint,
|
|
8
|
-
Field,
|
|
9
|
-
field_validator,
|
|
10
|
-
PositiveInt,
|
|
11
|
-
)
|
|
12
|
-
|
|
13
|
-
from deepeval.optimization.policies.tie_breaker import (
|
|
14
|
-
TieBreaker as TieBreakerPolicy,
|
|
15
|
-
)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class GEPAConfig(BaseModel):
|
|
19
|
-
"""
|
|
20
|
-
Core configuration for the GEPA optimization loop.
|
|
21
|
-
|
|
22
|
-
This controls:
|
|
23
|
-
- The iteration budget and acceptance threshold (iterations, min_delta).
|
|
24
|
-
- How D_train is split into a Pareto validation subset (D_pareto)
|
|
25
|
-
versus a feedback subset (D_feedback) (pareto_size).
|
|
26
|
-
- How minibatches are drawn from D_feedback, either with a fixed size
|
|
27
|
-
or dynamically from a ratio and min/max bounds (minibatch_* fields).
|
|
28
|
-
- How ties on aggregate scores are treated (tie_tolerance, tie_breaker).
|
|
29
|
-
- Randomness and rewrite instruction length (random_seed,
|
|
30
|
-
rewrite_instruction_max_chars).
|
|
31
|
-
|
|
32
|
-
See individual field descriptions for precise behavior.
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
iterations: PositiveInt = Field(
|
|
36
|
-
default=5,
|
|
37
|
-
description="Total number of GEPA loop iterations (mutation attempts). "
|
|
38
|
-
"This acts as the optimization budget B in the GEPA paper.",
|
|
39
|
-
)
|
|
40
|
-
minibatch_size: Optional[conint(ge=1)] = Field(
|
|
41
|
-
default=None,
|
|
42
|
-
description="Fixed minibatch size drawn from D_feedback. When set, this "
|
|
43
|
-
"overrides dynamic sizing based on `minibatch_ratio`, "
|
|
44
|
-
"`minibatch_min_size`, and `minibatch_max_size`.",
|
|
45
|
-
)
|
|
46
|
-
minibatch_min_size: conint(ge=1) = Field(
|
|
47
|
-
default=4,
|
|
48
|
-
description="Hard lower bound on the minibatch size used for D_feedback "
|
|
49
|
-
"when dynamic sizing is in effect.",
|
|
50
|
-
)
|
|
51
|
-
minibatch_max_size: PositiveInt = Field(
|
|
52
|
-
default=32,
|
|
53
|
-
description="Hard upper bound on the minibatch size used for D_feedback "
|
|
54
|
-
"when dynamic sizing is in effect.",
|
|
55
|
-
)
|
|
56
|
-
minibatch_ratio: confloat(gt=0.0, le=1.0) = Field(
|
|
57
|
-
default=0.05,
|
|
58
|
-
description=(
|
|
59
|
-
"Target fraction of |D_feedback| used to compute a dynamic "
|
|
60
|
-
"minibatch size when `minibatch_size` is None. The effective "
|
|
61
|
-
"size is round(len(D_feedback) * minibatch_ratio) bounded "
|
|
62
|
-
"between `minibatch_min_size` and `minibatch_max_size` and not "
|
|
63
|
-
"exceeding len(D_feedback). D_feedback is the subset of the "
|
|
64
|
-
"provided goldens that is not allocated to D_pareto by "
|
|
65
|
-
"`split_goldens(...)`."
|
|
66
|
-
),
|
|
67
|
-
)
|
|
68
|
-
pareto_size: conint(ge=1) = Field(
|
|
69
|
-
default=3,
|
|
70
|
-
description="Size of the Pareto validation subset D_pareto. The splitter "
|
|
71
|
-
"will bind this between [0, len(goldens)], and the runner requires "
|
|
72
|
-
"at least 2 total goldens to run GEPA.",
|
|
73
|
-
)
|
|
74
|
-
random_seed: conint(ge=0) = Field(
|
|
75
|
-
default=0,
|
|
76
|
-
description="Non-negative RNG seed for reproducibility. "
|
|
77
|
-
"If you explicitly pass None, it is replaced with a seed "
|
|
78
|
-
"derived from time.time_ns() via the field validator.",
|
|
79
|
-
)
|
|
80
|
-
min_delta: confloat(ge=0.0) = Field(
|
|
81
|
-
default=0.0,
|
|
82
|
-
description="Minimum improvement required for a child configuration to be "
|
|
83
|
-
"accepted, e.g. σ_child >= σ_parent + min_delta. A small jitter "
|
|
84
|
-
"is applied internally to avoid floating-point edge cases.",
|
|
85
|
-
)
|
|
86
|
-
# Two candidates are considered tied if their aggregate scores are within tie_tolerance.
|
|
87
|
-
tie_tolerance: confloat(ge=0.0) = Field(
|
|
88
|
-
1e-9,
|
|
89
|
-
description="Two candidates are considered tied on aggregate score if "
|
|
90
|
-
"their values differ by at most this tolerance.",
|
|
91
|
-
)
|
|
92
|
-
tie_breaker: TieBreakerPolicy = Field(
|
|
93
|
-
TieBreakerPolicy.PREFER_CHILD,
|
|
94
|
-
description="Policy used to break ties when multiple prompt configurations "
|
|
95
|
-
"share the best aggregate score. See `GEPAConfig.TieBreaker` "
|
|
96
|
-
"for the available options. ",
|
|
97
|
-
)
|
|
98
|
-
rewrite_instruction_max_chars: PositiveInt = Field(
|
|
99
|
-
default=4096,
|
|
100
|
-
description=(
|
|
101
|
-
"Maximum number of characters from prompt, feedback, and related text "
|
|
102
|
-
"included in rewrite instructions."
|
|
103
|
-
),
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
@field_validator("random_seed", mode="before")
|
|
107
|
-
@classmethod
|
|
108
|
-
def _coerce_random_seed(cls, seed):
|
|
109
|
-
if seed is None:
|
|
110
|
-
return time.time_ns()
|
|
111
|
-
else:
|
|
112
|
-
return seed
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
GEPAConfig.TieBreaker = TieBreakerPolicy
|
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
import time
|
|
3
|
-
from typing import Optional
|
|
4
|
-
|
|
5
|
-
from pydantic import (
|
|
6
|
-
BaseModel,
|
|
7
|
-
Field,
|
|
8
|
-
PositiveInt,
|
|
9
|
-
conint,
|
|
10
|
-
confloat,
|
|
11
|
-
field_validator,
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class MIPROConfig(BaseModel):
|
|
16
|
-
"""
|
|
17
|
-
Configuration for 0-shot MIPRO style prompt optimization.
|
|
18
|
-
|
|
19
|
-
This is adapted to the DeepEval setting where we optimize a single Prompt
|
|
20
|
-
(instruction) against a list of Goldens, using mini-batch evaluation and a
|
|
21
|
-
simple surrogate over prompt candidates.
|
|
22
|
-
|
|
23
|
-
Fields
|
|
24
|
-
------
|
|
25
|
-
iterations:
|
|
26
|
-
Total number of optimization trials. Each iteration selects
|
|
27
|
-
a parent candidate, proposes a child via the PromptRewriter,
|
|
28
|
-
evaluates it on a mini-batch, and updates the surrogate stats.
|
|
29
|
-
|
|
30
|
-
minibatch_size:
|
|
31
|
-
Fixed minibatch size drawn from the full set of goldens. When set,
|
|
32
|
-
this overrides dynamic sizing based on `minibatch_ratio`,
|
|
33
|
-
`minibatch_min_size`, and `minibatch_max_size`.
|
|
34
|
-
|
|
35
|
-
minibatch_min_size:
|
|
36
|
-
Hard lower bound on minibatch size when dynamic sizing is in effect.
|
|
37
|
-
|
|
38
|
-
minibatch_max_size:
|
|
39
|
-
Hard upper bound on minibatch size when dynamic sizing is in effect.
|
|
40
|
-
|
|
41
|
-
minibatch_ratio:
|
|
42
|
-
Target fraction of len(goldens) used to compute a dynamic minibatch
|
|
43
|
-
size. The final size is bounded between `minibatch_min_size` and
|
|
44
|
-
`minibatch_max_size`.
|
|
45
|
-
|
|
46
|
-
random_seed:
|
|
47
|
-
RNG seed for reproducibility. If set to None, a seed is derived from
|
|
48
|
-
time.time_ns() by the validator.
|
|
49
|
-
|
|
50
|
-
exploration_probability:
|
|
51
|
-
Epsilon greedy exploration rate for candidate selection. With this
|
|
52
|
-
probability the runner picks a random candidate; otherwise it picks
|
|
53
|
-
the candidate with the highest mean minibatch score.
|
|
54
|
-
|
|
55
|
-
full_eval_every:
|
|
56
|
-
If set, every `full_eval_every` trials the runner fully evaluates the
|
|
57
|
-
current best candidate (by mean minibatch score) on the full set of
|
|
58
|
-
goldens, storing scores per-instance. If None, only a final full
|
|
59
|
-
evaluation is done at the end.
|
|
60
|
-
|
|
61
|
-
rewrite_instruction_max_chars:
|
|
62
|
-
Maximum number of characters pulled into rewrite instructions
|
|
63
|
-
(prompt text + feedback) when using PromptRewriter.
|
|
64
|
-
|
|
65
|
-
min_delta:
|
|
66
|
-
Minimum improvement on minibatch mean required for a child
|
|
67
|
-
configuration to be accepted over its parent.
|
|
68
|
-
"""
|
|
69
|
-
|
|
70
|
-
iterations: PositiveInt = Field(
|
|
71
|
-
default=5,
|
|
72
|
-
description="Total number of MIPRO trials or prompt proposals.",
|
|
73
|
-
)
|
|
74
|
-
minibatch_size: Optional[conint(ge=1)] = Field(
|
|
75
|
-
default=None,
|
|
76
|
-
description=(
|
|
77
|
-
"Fixed minibatch size for goldens; when set, overrides dynamic sizing."
|
|
78
|
-
),
|
|
79
|
-
)
|
|
80
|
-
minibatch_min_size: conint(ge=1) = Field(
|
|
81
|
-
default=4,
|
|
82
|
-
description="Hard lower bound on minibatch size.",
|
|
83
|
-
)
|
|
84
|
-
minibatch_max_size: PositiveInt = Field(
|
|
85
|
-
default=32,
|
|
86
|
-
description="Hard upper bound on minibatch size.",
|
|
87
|
-
)
|
|
88
|
-
minibatch_ratio: confloat(gt=0.0, le=1.0) = Field(
|
|
89
|
-
default=0.05,
|
|
90
|
-
description=(
|
|
91
|
-
"Target fraction of len(goldens) used to compute a dynamic minibatch "
|
|
92
|
-
"size; bounded between minibatch_min_size and minibatch_max_size."
|
|
93
|
-
),
|
|
94
|
-
)
|
|
95
|
-
random_seed: conint(ge=0) = 0
|
|
96
|
-
min_delta: confloat(ge=0.0) = Field(
|
|
97
|
-
default=0.0,
|
|
98
|
-
description=(
|
|
99
|
-
"Minimum improvement in minibatch score required for a child "
|
|
100
|
-
"prompt to be accepted over its parent."
|
|
101
|
-
),
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
exploration_probability: confloat(ge=0.0, le=1.0) = Field(
|
|
105
|
-
default=0.2,
|
|
106
|
-
description=(
|
|
107
|
-
"Probability of sampling a random candidate instead of "
|
|
108
|
-
"the best-by-mean minibatch score."
|
|
109
|
-
),
|
|
110
|
-
)
|
|
111
|
-
|
|
112
|
-
full_eval_every: Optional[PositiveInt] = Field(
|
|
113
|
-
default=5,
|
|
114
|
-
description=(
|
|
115
|
-
"If set, the runner fully evaluates the current best candidate on the "
|
|
116
|
-
"full goldens every N trials. If None, only a single full evaluation "
|
|
117
|
-
"is performed at the end."
|
|
118
|
-
),
|
|
119
|
-
)
|
|
120
|
-
|
|
121
|
-
rewrite_instruction_max_chars: PositiveInt = Field(
|
|
122
|
-
default=4096,
|
|
123
|
-
description=(
|
|
124
|
-
"Maximum number of characters from prompt, feedback, and related "
|
|
125
|
-
"text included in rewrite instructions."
|
|
126
|
-
),
|
|
127
|
-
)
|
|
128
|
-
|
|
129
|
-
@field_validator("random_seed", mode="before")
|
|
130
|
-
@classmethod
|
|
131
|
-
def _coerce_random_seed(cls, seed):
|
|
132
|
-
if seed is None:
|
|
133
|
-
return time.time_ns()
|
|
134
|
-
return seed
|