edsl 0.1.50__py3-none-any.whl → 0.1.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/__init__.py +45 -34
- edsl/__version__.py +1 -1
- edsl/base/base_exception.py +2 -2
- edsl/buckets/bucket_collection.py +1 -1
- edsl/buckets/exceptions.py +32 -0
- edsl/buckets/token_bucket_api.py +26 -10
- edsl/caching/cache.py +5 -2
- edsl/caching/remote_cache_sync.py +5 -5
- edsl/caching/sql_dict.py +12 -11
- edsl/config/__init__.py +1 -1
- edsl/config/config_class.py +4 -2
- edsl/conversation/Conversation.py +9 -5
- edsl/conversation/car_buying.py +1 -3
- edsl/conversation/mug_negotiation.py +2 -6
- edsl/coop/__init__.py +11 -8
- edsl/coop/coop.py +15 -13
- edsl/coop/coop_functions.py +1 -1
- edsl/coop/ep_key_handling.py +1 -1
- edsl/coop/price_fetcher.py +2 -2
- edsl/coop/utils.py +2 -2
- edsl/dataset/dataset.py +144 -63
- edsl/dataset/dataset_operations_mixin.py +14 -6
- edsl/dataset/dataset_tree.py +3 -3
- edsl/dataset/display/table_renderers.py +6 -3
- edsl/dataset/file_exports.py +4 -4
- edsl/dataset/r/ggplot.py +3 -3
- edsl/inference_services/available_model_fetcher.py +2 -2
- edsl/inference_services/data_structures.py +5 -5
- edsl/inference_services/inference_service_abc.py +1 -1
- edsl/inference_services/inference_services_collection.py +1 -1
- edsl/inference_services/service_availability.py +3 -3
- edsl/inference_services/services/azure_ai.py +3 -3
- edsl/inference_services/services/google_service.py +1 -1
- edsl/inference_services/services/test_service.py +1 -1
- edsl/instructions/change_instruction.py +5 -4
- edsl/instructions/instruction.py +1 -0
- edsl/instructions/instruction_collection.py +5 -4
- edsl/instructions/instruction_handler.py +10 -8
- edsl/interviews/answering_function.py +20 -21
- edsl/interviews/exception_tracking.py +3 -2
- edsl/interviews/interview.py +1 -1
- edsl/interviews/interview_status_dictionary.py +1 -1
- edsl/interviews/interview_task_manager.py +7 -4
- edsl/interviews/request_token_estimator.py +3 -2
- edsl/interviews/statistics.py +2 -2
- edsl/invigilators/invigilators.py +34 -6
- edsl/jobs/__init__.py +39 -2
- edsl/jobs/async_interview_runner.py +1 -1
- edsl/jobs/check_survey_scenario_compatibility.py +5 -5
- edsl/jobs/data_structures.py +2 -2
- edsl/jobs/html_table_job_logger.py +494 -257
- edsl/jobs/jobs.py +2 -2
- edsl/jobs/jobs_checks.py +5 -5
- edsl/jobs/jobs_component_constructor.py +2 -2
- edsl/jobs/jobs_pricing_estimation.py +1 -1
- edsl/jobs/jobs_runner_asyncio.py +2 -2
- edsl/jobs/jobs_status_enums.py +1 -0
- edsl/jobs/remote_inference.py +47 -13
- edsl/jobs/results_exceptions_handler.py +2 -2
- edsl/language_models/language_model.py +151 -145
- edsl/notebooks/__init__.py +24 -1
- edsl/notebooks/exceptions.py +82 -0
- edsl/notebooks/notebook.py +7 -3
- edsl/notebooks/notebook_to_latex.py +1 -1
- edsl/prompts/__init__.py +23 -2
- edsl/prompts/prompt.py +1 -1
- edsl/questions/__init__.py +4 -4
- edsl/questions/answer_validator_mixin.py +0 -5
- edsl/questions/compose_questions.py +2 -2
- edsl/questions/descriptors.py +1 -1
- edsl/questions/question_base.py +32 -3
- edsl/questions/question_base_prompts_mixin.py +4 -4
- edsl/questions/question_budget.py +503 -102
- edsl/questions/question_check_box.py +658 -156
- edsl/questions/question_dict.py +176 -2
- edsl/questions/question_extract.py +401 -61
- edsl/questions/question_free_text.py +77 -9
- edsl/questions/question_functional.py +118 -9
- edsl/questions/{derived/question_likert_five.py → question_likert_five.py} +2 -2
- edsl/questions/{derived/question_linear_scale.py → question_linear_scale.py} +3 -4
- edsl/questions/question_list.py +246 -26
- edsl/questions/question_matrix.py +586 -73
- edsl/questions/question_multiple_choice.py +213 -47
- edsl/questions/question_numerical.py +360 -29
- edsl/questions/question_rank.py +401 -124
- edsl/questions/question_registry.py +3 -3
- edsl/questions/{derived/question_top_k.py → question_top_k.py} +3 -3
- edsl/questions/{derived/question_yes_no.py → question_yes_no.py} +3 -4
- edsl/questions/register_questions_meta.py +2 -1
- edsl/questions/response_validator_abc.py +6 -2
- edsl/questions/response_validator_factory.py +10 -12
- edsl/results/report.py +1 -1
- edsl/results/result.py +7 -4
- edsl/results/results.py +500 -271
- edsl/results/results_selector.py +2 -2
- edsl/scenarios/construct_download_link.py +3 -3
- edsl/scenarios/scenario.py +1 -2
- edsl/scenarios/scenario_list.py +41 -23
- edsl/surveys/survey_css.py +3 -3
- edsl/surveys/survey_simulator.py +2 -1
- edsl/tasks/__init__.py +22 -2
- edsl/tasks/exceptions.py +72 -0
- edsl/tasks/task_history.py +48 -11
- edsl/templates/error_reporting/base.html +37 -4
- edsl/templates/error_reporting/exceptions_table.html +105 -33
- edsl/templates/error_reporting/interview_details.html +130 -126
- edsl/templates/error_reporting/overview.html +21 -25
- edsl/templates/error_reporting/report.css +215 -46
- edsl/templates/error_reporting/report.js +122 -20
- edsl/tokens/__init__.py +27 -1
- edsl/tokens/exceptions.py +37 -0
- edsl/tokens/interview_token_usage.py +3 -2
- edsl/tokens/token_usage.py +4 -3
- {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/METADATA +1 -1
- {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/RECORD +118 -116
- edsl/questions/derived/__init__.py +0 -0
- {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/LICENSE +0 -0
- {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/WHEEL +0 -0
- {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/entry_points.txt +0 -0
@@ -1,12 +1,95 @@
|
|
1
1
|
from __future__ import annotations
|
2
|
-
from typing import Optional, Callable
|
2
|
+
from typing import Optional, Callable, Any
|
3
3
|
import inspect
|
4
4
|
|
5
|
+
from pydantic import BaseModel
|
6
|
+
|
5
7
|
from .question_base import QuestionBase
|
8
|
+
from .response_validator_abc import ResponseValidatorABC
|
9
|
+
from .exceptions import QuestionErrors, QuestionAnswerValidationError, QuestionNotImplementedError
|
6
10
|
|
7
11
|
from ..utilities.restricted_python import create_restricted_function
|
8
12
|
|
9
13
|
|
14
|
+
class FunctionalResponse(BaseModel):
|
15
|
+
"""
|
16
|
+
Pydantic model for functional question responses.
|
17
|
+
|
18
|
+
Since functional questions are evaluated directly by Python code rather than an LLM,
|
19
|
+
this model primarily serves as a structured way to represent the output.
|
20
|
+
|
21
|
+
Attributes:
|
22
|
+
answer: The result of the function evaluation
|
23
|
+
comment: Optional comment about the result
|
24
|
+
generated_tokens: Optional token usage data
|
25
|
+
|
26
|
+
Examples:
|
27
|
+
>>> # Valid response with a numeric answer
|
28
|
+
>>> response = FunctionalResponse(answer=42)
|
29
|
+
>>> response.answer
|
30
|
+
42
|
31
|
+
|
32
|
+
>>> # Valid response with a string answer and a comment
|
33
|
+
>>> response = FunctionalResponse(answer="Hello world", comment="Function executed successfully")
|
34
|
+
>>> response.answer
|
35
|
+
'Hello world'
|
36
|
+
>>> response.comment
|
37
|
+
'Function executed successfully'
|
38
|
+
"""
|
39
|
+
answer: Any
|
40
|
+
comment: Optional[str] = None
|
41
|
+
generated_tokens: Optional[Any] = None
|
42
|
+
|
43
|
+
|
44
|
+
class FunctionalResponseValidator(ResponseValidatorABC):
|
45
|
+
"""
|
46
|
+
Validator for functional question responses.
|
47
|
+
|
48
|
+
Since functional questions are evaluated directly and not by an LLM,
|
49
|
+
this validator is minimal and mainly serves for consistency with other question types.
|
50
|
+
"""
|
51
|
+
required_params = []
|
52
|
+
valid_examples = [
|
53
|
+
(
|
54
|
+
{"answer": 42},
|
55
|
+
{},
|
56
|
+
),
|
57
|
+
(
|
58
|
+
{"answer": "Hello world", "comment": "Function executed successfully"},
|
59
|
+
{},
|
60
|
+
),
|
61
|
+
]
|
62
|
+
invalid_examples = []
|
63
|
+
|
64
|
+
def fix(self, response, verbose=False):
|
65
|
+
"""
|
66
|
+
Attempt to fix an invalid response.
|
67
|
+
|
68
|
+
Since functional questions are evaluated directly, this method is mainly
|
69
|
+
for consistency with other question types.
|
70
|
+
|
71
|
+
Args:
|
72
|
+
response: The response to fix
|
73
|
+
verbose: Whether to print verbose output
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
The fixed response or the original response if it cannot be fixed
|
77
|
+
"""
|
78
|
+
if verbose:
|
79
|
+
print(f"Fixing functional response: {response}")
|
80
|
+
|
81
|
+
# Handle case where response is a raw value without the proper structure
|
82
|
+
if not isinstance(response, dict):
|
83
|
+
try:
|
84
|
+
return {"answer": response}
|
85
|
+
except Exception as e:
|
86
|
+
if verbose:
|
87
|
+
print(f"Failed to fix response: {e}")
|
88
|
+
return {"answer": None, "comment": "Failed to execute function"}
|
89
|
+
|
90
|
+
return response
|
91
|
+
|
92
|
+
|
10
93
|
class QuestionFunctional(QuestionBase):
|
11
94
|
"""A special type of question that is *not* answered by an LLM.
|
12
95
|
|
@@ -40,7 +123,7 @@ class QuestionFunctional(QuestionBase):
|
|
40
123
|
function_name = ""
|
41
124
|
|
42
125
|
_response_model = None
|
43
|
-
response_validator_class =
|
126
|
+
response_validator_class = FunctionalResponseValidator
|
44
127
|
|
45
128
|
def __init__(
|
46
129
|
self,
|
@@ -73,6 +156,12 @@ class QuestionFunctional(QuestionBase):
|
|
73
156
|
self.question_text = question_text
|
74
157
|
self.instructions = self.default_instructions
|
75
158
|
|
159
|
+
def create_response_model(self):
|
160
|
+
"""
|
161
|
+
Returns the Pydantic model for validating responses to this question.
|
162
|
+
"""
|
163
|
+
return FunctionalResponse
|
164
|
+
|
76
165
|
def activate(self):
|
77
166
|
self.activated = True
|
78
167
|
|
@@ -85,12 +174,14 @@ class QuestionFunctional(QuestionBase):
|
|
85
174
|
def answer_question_directly(self, scenario, agent_traits=None):
|
86
175
|
"""Return the answer to the question, ensuring the function is activated."""
|
87
176
|
if not self.activated:
|
88
|
-
raise
|
177
|
+
raise QuestionErrors("Function not activated. Please activate it first.")
|
89
178
|
try:
|
90
|
-
|
179
|
+
result = {"answer": self.func(scenario, agent_traits), "comment": None}
|
180
|
+
# Validate the result using the Pydantic model
|
181
|
+
return self.create_response_model()(**result).model_dump()
|
91
182
|
except Exception as e:
|
92
183
|
print("Function execution error:", e)
|
93
|
-
raise
|
184
|
+
raise QuestionErrors("Error during function execution.")
|
94
185
|
|
95
186
|
def _translate_answer_code_to_answer(self, answer, scenario):
|
96
187
|
"""Required by Question, but not used by QuestionFunctional."""
|
@@ -98,13 +189,31 @@ class QuestionFunctional(QuestionBase):
|
|
98
189
|
|
99
190
|
def _simulate_answer(self, human_readable=True) -> dict[str, str]:
|
100
191
|
"""Required by Question, but not used by QuestionFunctional."""
|
101
|
-
from .exceptions import QuestionNotImplementedError
|
102
192
|
raise QuestionNotImplementedError("_simulate_answer not implemented for QuestionFunctional")
|
103
193
|
|
104
194
|
def _validate_answer(self, answer: dict[str, str]):
|
105
|
-
"""
|
106
|
-
|
107
|
-
|
195
|
+
"""Validate the answer using the Pydantic model."""
|
196
|
+
try:
|
197
|
+
return self.create_response_model()(**answer).model_dump()
|
198
|
+
except Exception as e:
|
199
|
+
from pydantic import ValidationError
|
200
|
+
# Create a ValidationError with a helpful message
|
201
|
+
validation_error = ValidationError.from_exception_data(
|
202
|
+
title='FunctionalResponse',
|
203
|
+
line_errors=[{
|
204
|
+
'type': 'value_error',
|
205
|
+
'loc': ('answer',),
|
206
|
+
'msg': f'Function response validation failed: {str(e)}',
|
207
|
+
'input': answer,
|
208
|
+
'ctx': {'error': str(e)}
|
209
|
+
}]
|
210
|
+
)
|
211
|
+
raise QuestionAnswerValidationError(
|
212
|
+
message=f"Invalid function response: {str(e)}",
|
213
|
+
data=answer,
|
214
|
+
model=self.create_response_model(),
|
215
|
+
pydantic_error=validation_error
|
216
|
+
)
|
108
217
|
|
109
218
|
@property
|
110
219
|
def question_html_content(self) -> str:
|
@@ -1,7 +1,7 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
from typing import Optional
|
3
|
-
from
|
4
|
-
from
|
3
|
+
from .question_multiple_choice import QuestionMultipleChoice
|
4
|
+
from .decorators import inject_exception
|
5
5
|
|
6
6
|
|
7
7
|
class QuestionLikertFive(QuestionMultipleChoice):
|
@@ -1,10 +1,9 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
from typing import Optional
|
3
3
|
|
4
|
-
from
|
5
|
-
from
|
6
|
-
|
7
|
-
from ..decorators import inject_exception
|
4
|
+
from .descriptors import QuestionOptionsDescriptor, OptionLabelDescriptor
|
5
|
+
from .question_multiple_choice import QuestionMultipleChoice
|
6
|
+
from .decorators import inject_exception
|
8
7
|
|
9
8
|
|
10
9
|
class QuestionLinearScale(QuestionMultipleChoice):
|
edsl/questions/question_list.py
CHANGED
@@ -2,9 +2,8 @@ from __future__ import annotations
|
|
2
2
|
import json
|
3
3
|
from typing import Any, Optional, Union, ForwardRef
|
4
4
|
|
5
|
-
from pydantic import Field
|
5
|
+
from pydantic import Field, model_validator, ValidationError
|
6
6
|
from json_repair import repair_json
|
7
|
-
from .exceptions import QuestionAnswerValidationError
|
8
7
|
from .question_base import QuestionBase
|
9
8
|
from .descriptors import IntegerOrNoneDescriptor
|
10
9
|
from .decorators import inject_exception
|
@@ -60,11 +59,44 @@ def create_model(min_list_items: Optional[int], max_list_items: Optional[int], p
|
|
60
59
|
from pydantic import BaseModel
|
61
60
|
|
62
61
|
if permissive or (max_list_items is None and min_list_items is None):
|
63
|
-
|
64
62
|
class ListResponse(BaseModel):
|
63
|
+
"""
|
64
|
+
Pydantic model for validating list responses with no constraints.
|
65
|
+
|
66
|
+
Examples:
|
67
|
+
>>> # Valid response with any number of items
|
68
|
+
>>> response = ListResponse(answer=["one", "two", "three"])
|
69
|
+
>>> response.answer
|
70
|
+
['one', 'two', 'three']
|
71
|
+
|
72
|
+
>>> # Empty list is valid in permissive mode
|
73
|
+
>>> response = ListResponse(answer=[])
|
74
|
+
>>> response.answer
|
75
|
+
[]
|
76
|
+
|
77
|
+
>>> # Missing answer field raises error
|
78
|
+
>>> try:
|
79
|
+
... ListResponse(you="will never be able to do this!")
|
80
|
+
... except Exception as e:
|
81
|
+
... "Field required" in str(e)
|
82
|
+
True
|
83
|
+
"""
|
65
84
|
answer: list[Any]
|
66
85
|
comment: Optional[str] = None
|
67
86
|
generated_tokens: Optional[str] = None
|
87
|
+
|
88
|
+
@classmethod
|
89
|
+
def model_validate(cls, obj, *args, **kwargs):
|
90
|
+
try:
|
91
|
+
return super().model_validate(obj, *args, **kwargs)
|
92
|
+
except ValidationError as e:
|
93
|
+
from .exceptions import QuestionAnswerValidationError
|
94
|
+
raise QuestionAnswerValidationError(
|
95
|
+
message=f"Invalid list response: {e}",
|
96
|
+
data=obj,
|
97
|
+
model=cls,
|
98
|
+
pydantic_error=e
|
99
|
+
)
|
68
100
|
|
69
101
|
else:
|
70
102
|
# Determine field constraints
|
@@ -78,58 +110,246 @@ def create_model(min_list_items: Optional[int], max_list_items: Optional[int], p
|
|
78
110
|
|
79
111
|
class ListResponse(BaseModel):
|
80
112
|
"""
|
81
|
-
|
82
|
-
|
83
|
-
|
113
|
+
Pydantic model for validating list responses with size constraints.
|
114
|
+
|
115
|
+
Examples:
|
116
|
+
>>> # Create a model with min=2, max=4 items
|
117
|
+
>>> ConstrainedList = create_model(min_list_items=2, max_list_items=4, permissive=False)
|
118
|
+
|
119
|
+
>>> # Valid response within constraints
|
120
|
+
>>> response = ConstrainedList(answer=["Apple", "Cherry", "Banana"])
|
121
|
+
>>> len(response.answer)
|
122
|
+
3
|
123
|
+
|
124
|
+
>>> # Too few items raises error
|
125
|
+
>>> try:
|
126
|
+
... ConstrainedList(answer=["Apple"])
|
127
|
+
... except QuestionAnswerValidationError as e:
|
128
|
+
... "must have at least 2 items" in str(e)
|
129
|
+
True
|
130
|
+
|
131
|
+
>>> # Too many items raises error
|
132
|
+
>>> try:
|
133
|
+
... ConstrainedList(answer=["A", "B", "C", "D", "E"])
|
134
|
+
... except QuestionAnswerValidationError as e:
|
135
|
+
... "cannot have more than 4 items" in str(e)
|
136
|
+
True
|
137
|
+
|
138
|
+
>>> # Optional comment is allowed
|
139
|
+
>>> response = ConstrainedList(
|
140
|
+
... answer=["Apple", "Cherry"],
|
141
|
+
... comment="These are my favorites"
|
142
|
+
... )
|
143
|
+
>>> response.comment
|
144
|
+
'These are my favorites'
|
145
|
+
|
146
|
+
>>> # Generated tokens are optional
|
147
|
+
>>> response = ConstrainedList(
|
148
|
+
... answer=["Apple", "Cherry"],
|
149
|
+
... generated_tokens="Apple, Cherry"
|
150
|
+
... )
|
151
|
+
>>> response.generated_tokens
|
152
|
+
'Apple, Cherry'
|
84
153
|
"""
|
85
154
|
|
86
155
|
answer: list[Any] = Field(**field_kwargs)
|
87
156
|
comment: Optional[str] = None
|
88
157
|
generated_tokens: Optional[str] = None
|
89
158
|
|
159
|
+
@model_validator(mode='after')
|
160
|
+
def validate_list_constraints(self):
|
161
|
+
"""
|
162
|
+
Validate that the list meets size constraints.
|
163
|
+
|
164
|
+
Returns:
|
165
|
+
The validated model instance.
|
166
|
+
|
167
|
+
Raises:
|
168
|
+
QuestionAnswerValidationError: If list size constraints are violated.
|
169
|
+
"""
|
170
|
+
if max_list_items is not None and len(self.answer) > max_list_items:
|
171
|
+
from .exceptions import QuestionAnswerValidationError
|
172
|
+
validation_error = ValidationError.from_exception_data(
|
173
|
+
title='ListResponse',
|
174
|
+
line_errors=[{
|
175
|
+
'type': 'value_error',
|
176
|
+
'loc': ('answer',),
|
177
|
+
'msg': f'List cannot have more than {max_list_items} items',
|
178
|
+
'input': self.answer,
|
179
|
+
'ctx': {'error': 'Too many items'}
|
180
|
+
}]
|
181
|
+
)
|
182
|
+
raise QuestionAnswerValidationError(
|
183
|
+
message=f"List cannot have more than {max_list_items} items",
|
184
|
+
data=self.model_dump(),
|
185
|
+
model=self.__class__,
|
186
|
+
pydantic_error=validation_error
|
187
|
+
)
|
188
|
+
|
189
|
+
if min_list_items is not None and len(self.answer) < min_list_items:
|
190
|
+
from .exceptions import QuestionAnswerValidationError
|
191
|
+
validation_error = ValidationError.from_exception_data(
|
192
|
+
title='ListResponse',
|
193
|
+
line_errors=[{
|
194
|
+
'type': 'value_error',
|
195
|
+
'loc': ('answer',),
|
196
|
+
'msg': f'List must have at least {min_list_items} items',
|
197
|
+
'input': self.answer,
|
198
|
+
'ctx': {'error': 'Too few items'}
|
199
|
+
}]
|
200
|
+
)
|
201
|
+
raise QuestionAnswerValidationError(
|
202
|
+
message=f"List must have at least {min_list_items} items",
|
203
|
+
data=self.model_dump(),
|
204
|
+
model=self.__class__,
|
205
|
+
pydantic_error=validation_error
|
206
|
+
)
|
207
|
+
return self
|
208
|
+
|
209
|
+
@classmethod
|
210
|
+
def model_validate(cls, obj, *args, **kwargs):
|
211
|
+
try:
|
212
|
+
return super().model_validate(obj, *args, **kwargs)
|
213
|
+
except ValidationError as e:
|
214
|
+
from .exceptions import QuestionAnswerValidationError
|
215
|
+
raise QuestionAnswerValidationError(
|
216
|
+
message=f"Invalid list response: {e}",
|
217
|
+
data=obj,
|
218
|
+
model=cls,
|
219
|
+
pydantic_error=e
|
220
|
+
)
|
221
|
+
|
90
222
|
return ListResponse
|
91
223
|
|
92
224
|
|
93
225
|
class ListResponseValidator(ResponseValidatorABC):
|
94
226
|
required_params = ["min_list_items", "max_list_items", "permissive"]
|
95
227
|
valid_examples = [({"answer": ["hello", "world"]}, {"max_list_items": 5})]
|
96
|
-
|
97
228
|
invalid_examples = [
|
98
229
|
(
|
99
230
|
{"answer": ["hello", "world", "this", "is", "a", "test"]},
|
100
231
|
{"max_list_items": 5},
|
101
|
-
"
|
232
|
+
"List cannot have more than 5 items",
|
102
233
|
),
|
103
234
|
(
|
104
235
|
{"answer": ["hello"]},
|
105
236
|
{"min_list_items": 2},
|
106
|
-
"
|
237
|
+
"List must have at least 2 items",
|
107
238
|
),
|
108
239
|
]
|
240
|
+
|
241
|
+
def validate(
|
242
|
+
self,
|
243
|
+
raw_edsl_answer_dict: dict,
|
244
|
+
fix=False,
|
245
|
+
verbose=False,
|
246
|
+
replacement_dict: dict = None,
|
247
|
+
) -> dict:
|
248
|
+
"""Override validate to handle missing answer key properly."""
|
249
|
+
# Check for missing answer key
|
250
|
+
if "answer" not in raw_edsl_answer_dict:
|
251
|
+
from .exceptions import QuestionAnswerValidationError
|
252
|
+
from pydantic import ValidationError
|
253
|
+
|
254
|
+
# Create a synthetic validation error
|
255
|
+
validation_error = ValidationError.from_exception_data(
|
256
|
+
title='ListResponse',
|
257
|
+
line_errors=[{
|
258
|
+
'type': 'missing',
|
259
|
+
'loc': ('answer',),
|
260
|
+
'msg': 'Field required',
|
261
|
+
'input': raw_edsl_answer_dict,
|
262
|
+
}]
|
263
|
+
)
|
264
|
+
|
265
|
+
raise QuestionAnswerValidationError(
|
266
|
+
message="Missing required 'answer' field in response",
|
267
|
+
data=raw_edsl_answer_dict,
|
268
|
+
model=self.response_model,
|
269
|
+
pydantic_error=validation_error
|
270
|
+
)
|
271
|
+
|
272
|
+
# Check if answer is not a list
|
273
|
+
if "answer" in raw_edsl_answer_dict and not isinstance(raw_edsl_answer_dict["answer"], list):
|
274
|
+
from .exceptions import QuestionAnswerValidationError
|
275
|
+
from pydantic import ValidationError
|
276
|
+
|
277
|
+
# Create a synthetic validation error
|
278
|
+
validation_error = ValidationError.from_exception_data(
|
279
|
+
title='ListResponse',
|
280
|
+
line_errors=[{
|
281
|
+
'type': 'list_type',
|
282
|
+
'loc': ('answer',),
|
283
|
+
'msg': 'Input should be a valid list',
|
284
|
+
'input': raw_edsl_answer_dict["answer"],
|
285
|
+
}]
|
286
|
+
)
|
287
|
+
|
288
|
+
raise QuestionAnswerValidationError(
|
289
|
+
message=f"Answer must be a list (got {type(raw_edsl_answer_dict['answer']).__name__})",
|
290
|
+
data=raw_edsl_answer_dict,
|
291
|
+
model=self.response_model,
|
292
|
+
pydantic_error=validation_error
|
293
|
+
)
|
294
|
+
|
295
|
+
# Continue with parent validation
|
296
|
+
return super().validate(raw_edsl_answer_dict, fix, verbose, replacement_dict)
|
109
297
|
|
110
298
|
def _check_constraints(self, response) -> None:
|
111
|
-
|
112
|
-
|
113
|
-
and len(response.answer) > self.max_list_items
|
114
|
-
):
|
115
|
-
raise QuestionAnswerValidationError("Too many items.")
|
116
|
-
|
117
|
-
if (
|
118
|
-
self.min_list_items is not None
|
119
|
-
and len(response.answer) < self.min_list_items
|
120
|
-
):
|
121
|
-
raise QuestionAnswerValidationError("Too few items.")
|
299
|
+
# This method can now be removed since validation is handled in the Pydantic model
|
300
|
+
pass
|
122
301
|
|
123
302
|
def fix(self, response, verbose=False):
|
303
|
+
"""
|
304
|
+
Fix common issues in list responses by splitting strings into lists.
|
305
|
+
|
306
|
+
Examples:
|
307
|
+
>>> from edsl import QuestionList
|
308
|
+
>>> q = QuestionList.example(min_list_items=2, max_list_items=4)
|
309
|
+
>>> validator = q.response_validator
|
310
|
+
|
311
|
+
>>> # Fix a string that should be a list
|
312
|
+
>>> bad_response = {"answer": "apple,banana,cherry"}
|
313
|
+
>>> try:
|
314
|
+
... validator.validate(bad_response)
|
315
|
+
... except Exception:
|
316
|
+
... fixed = validator.fix(bad_response)
|
317
|
+
... validated = validator.validate(fixed)
|
318
|
+
... validated # Show full response
|
319
|
+
{'answer': ['apple', 'banana', 'cherry'], 'comment': None, 'generated_tokens': None}
|
320
|
+
|
321
|
+
>>> # Fix using generated_tokens when answer is invalid
|
322
|
+
>>> bad_response = {
|
323
|
+
... "answer": None,
|
324
|
+
... "generated_tokens": "pizza, pasta, salad"
|
325
|
+
... }
|
326
|
+
>>> try:
|
327
|
+
... validator.validate(bad_response)
|
328
|
+
... except Exception:
|
329
|
+
... fixed = validator.fix(bad_response)
|
330
|
+
... validated = validator.validate(fixed)
|
331
|
+
... validated
|
332
|
+
{'answer': ['pizza', ' pasta', ' salad'], 'comment': None, 'generated_tokens': None}
|
333
|
+
|
334
|
+
>>> # Preserve comments during fixing
|
335
|
+
>>> bad_response = {
|
336
|
+
... "answer": "red,blue,green",
|
337
|
+
... "comment": "These are colors"
|
338
|
+
... }
|
339
|
+
>>> fixed = validator.fix(bad_response)
|
340
|
+
>>> fixed == {
|
341
|
+
... "answer": ["red", "blue", "green"],
|
342
|
+
... "comment": "These are colors"
|
343
|
+
... }
|
344
|
+
True
|
345
|
+
"""
|
124
346
|
if verbose:
|
125
347
|
print(f"Fixing list response: {response}")
|
126
348
|
answer = str(response.get("answer") or response.get("generated_tokens", ""))
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
else {}
|
132
|
-
)
|
349
|
+
result = {"answer": answer.split(",")}
|
350
|
+
if "comment" in response:
|
351
|
+
result["comment"] = response["comment"]
|
352
|
+
return result
|
133
353
|
|
134
354
|
def _post_process(self, edsl_answer_dict):
|
135
355
|
edsl_answer_dict["answer"] = [
|