edsl 0.1.33__py3-none-any.whl → 0.1.33.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +3 -9
- edsl/__init__.py +3 -8
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +8 -40
- edsl/agents/AgentList.py +0 -43
- edsl/agents/Invigilator.py +219 -135
- edsl/agents/InvigilatorBase.py +59 -148
- edsl/agents/{PromptConstructor.py → PromptConstructionMixin.py} +89 -138
- edsl/agents/__init__.py +0 -1
- edsl/config.py +56 -47
- edsl/coop/coop.py +7 -50
- edsl/data/Cache.py +1 -35
- edsl/data_transfer_models.py +38 -73
- edsl/enums.py +0 -4
- edsl/exceptions/language_models.py +1 -25
- edsl/exceptions/questions.py +5 -62
- edsl/exceptions/results.py +0 -4
- edsl/inference_services/AnthropicService.py +11 -13
- edsl/inference_services/AwsBedrock.py +17 -19
- edsl/inference_services/AzureAI.py +20 -37
- edsl/inference_services/GoogleService.py +12 -16
- edsl/inference_services/GroqService.py +0 -2
- edsl/inference_services/InferenceServiceABC.py +3 -58
- edsl/inference_services/OpenAIService.py +54 -48
- edsl/inference_services/models_available_cache.py +6 -0
- edsl/inference_services/registry.py +0 -6
- edsl/jobs/Answers.py +12 -10
- edsl/jobs/Jobs.py +21 -36
- edsl/jobs/buckets/BucketCollection.py +15 -24
- edsl/jobs/buckets/TokenBucket.py +14 -93
- edsl/jobs/interviews/Interview.py +78 -366
- edsl/jobs/interviews/InterviewExceptionEntry.py +19 -85
- edsl/jobs/interviews/InterviewTaskBuildingMixin.py +286 -0
- edsl/jobs/interviews/{InterviewExceptionCollection.py → interview_exception_tracking.py} +68 -14
- edsl/jobs/interviews/retry_management.py +37 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +175 -146
- edsl/jobs/runners/JobsRunnerStatusMixin.py +333 -0
- edsl/jobs/tasks/QuestionTaskCreator.py +23 -30
- edsl/jobs/tasks/TaskHistory.py +213 -148
- edsl/language_models/LanguageModel.py +156 -261
- edsl/language_models/ModelList.py +2 -2
- edsl/language_models/RegisterLanguageModelsMeta.py +29 -14
- edsl/language_models/registry.py +6 -23
- edsl/language_models/repair.py +19 -0
- edsl/prompts/Prompt.py +2 -52
- edsl/questions/AnswerValidatorMixin.py +26 -23
- edsl/questions/QuestionBase.py +249 -329
- edsl/questions/QuestionBudget.py +41 -99
- edsl/questions/QuestionCheckBox.py +35 -227
- edsl/questions/QuestionExtract.py +27 -98
- edsl/questions/QuestionFreeText.py +29 -52
- edsl/questions/QuestionFunctional.py +0 -7
- edsl/questions/QuestionList.py +22 -141
- edsl/questions/QuestionMultipleChoice.py +65 -159
- edsl/questions/QuestionNumerical.py +46 -88
- edsl/questions/QuestionRank.py +24 -182
- edsl/questions/RegisterQuestionsMeta.py +12 -31
- edsl/questions/__init__.py +4 -3
- edsl/questions/derived/QuestionLikertFive.py +5 -10
- edsl/questions/derived/QuestionLinearScale.py +2 -15
- edsl/questions/derived/QuestionTopK.py +1 -10
- edsl/questions/derived/QuestionYesNo.py +3 -24
- edsl/questions/descriptors.py +7 -43
- edsl/questions/question_registry.py +2 -6
- edsl/results/Dataset.py +0 -20
- edsl/results/DatasetExportMixin.py +48 -46
- edsl/results/Result.py +5 -32
- edsl/results/Results.py +46 -135
- edsl/results/ResultsDBMixin.py +3 -3
- edsl/scenarios/FileStore.py +10 -71
- edsl/scenarios/Scenario.py +25 -96
- edsl/scenarios/ScenarioImageMixin.py +2 -2
- edsl/scenarios/ScenarioList.py +39 -361
- edsl/scenarios/ScenarioListExportMixin.py +0 -9
- edsl/scenarios/ScenarioListPdfMixin.py +4 -150
- edsl/study/SnapShot.py +1 -8
- edsl/study/Study.py +0 -32
- edsl/surveys/Rule.py +1 -10
- edsl/surveys/RuleCollection.py +5 -21
- edsl/surveys/Survey.py +310 -636
- edsl/surveys/SurveyExportMixin.py +9 -71
- edsl/surveys/SurveyFlowVisualizationMixin.py +1 -2
- edsl/surveys/SurveyQualtricsImport.py +4 -75
- edsl/utilities/gcp_bucket/simple_example.py +9 -0
- edsl/utilities/utilities.py +1 -9
- {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/METADATA +2 -5
- edsl-0.1.33.dev1.dist-info/RECORD +209 -0
- edsl/TemplateLoader.py +0 -24
- edsl/auto/AutoStudy.py +0 -117
- edsl/auto/StageBase.py +0 -230
- edsl/auto/StageGenerateSurvey.py +0 -178
- edsl/auto/StageLabelQuestions.py +0 -125
- edsl/auto/StagePersona.py +0 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +0 -88
- edsl/auto/StagePersonaDimensionValues.py +0 -74
- edsl/auto/StagePersonaDimensions.py +0 -69
- edsl/auto/StageQuestions.py +0 -73
- edsl/auto/SurveyCreatorPipeline.py +0 -21
- edsl/auto/utilities.py +0 -224
- edsl/coop/PriceFetcher.py +0 -58
- edsl/inference_services/MistralAIService.py +0 -120
- edsl/inference_services/TestService.py +0 -80
- edsl/inference_services/TogetherAIService.py +0 -170
- edsl/jobs/FailedQuestion.py +0 -78
- edsl/jobs/runners/JobsRunnerStatus.py +0 -331
- edsl/language_models/fake_openai_call.py +0 -15
- edsl/language_models/fake_openai_service.py +0 -61
- edsl/language_models/utilities.py +0 -61
- edsl/questions/QuestionBaseGenMixin.py +0 -133
- edsl/questions/QuestionBasePromptsMixin.py +0 -266
- edsl/questions/Quick.py +0 -41
- edsl/questions/ResponseValidatorABC.py +0 -170
- edsl/questions/decorators.py +0 -21
- edsl/questions/prompt_templates/question_budget.jinja +0 -13
- edsl/questions/prompt_templates/question_checkbox.jinja +0 -32
- edsl/questions/prompt_templates/question_extract.jinja +0 -11
- edsl/questions/prompt_templates/question_free_text.jinja +0 -3
- edsl/questions/prompt_templates/question_linear_scale.jinja +0 -11
- edsl/questions/prompt_templates/question_list.jinja +0 -17
- edsl/questions/prompt_templates/question_multiple_choice.jinja +0 -33
- edsl/questions/prompt_templates/question_numerical.jinja +0 -37
- edsl/questions/templates/__init__.py +0 -0
- edsl/questions/templates/budget/__init__.py +0 -0
- edsl/questions/templates/budget/answering_instructions.jinja +0 -7
- edsl/questions/templates/budget/question_presentation.jinja +0 -7
- edsl/questions/templates/checkbox/__init__.py +0 -0
- edsl/questions/templates/checkbox/answering_instructions.jinja +0 -10
- edsl/questions/templates/checkbox/question_presentation.jinja +0 -22
- edsl/questions/templates/extract/__init__.py +0 -0
- edsl/questions/templates/extract/answering_instructions.jinja +0 -7
- edsl/questions/templates/extract/question_presentation.jinja +0 -1
- edsl/questions/templates/free_text/__init__.py +0 -0
- edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
- edsl/questions/templates/free_text/question_presentation.jinja +0 -1
- edsl/questions/templates/likert_five/__init__.py +0 -0
- edsl/questions/templates/likert_five/answering_instructions.jinja +0 -10
- edsl/questions/templates/likert_five/question_presentation.jinja +0 -12
- edsl/questions/templates/linear_scale/__init__.py +0 -0
- edsl/questions/templates/linear_scale/answering_instructions.jinja +0 -5
- edsl/questions/templates/linear_scale/question_presentation.jinja +0 -5
- edsl/questions/templates/list/__init__.py +0 -0
- edsl/questions/templates/list/answering_instructions.jinja +0 -4
- edsl/questions/templates/list/question_presentation.jinja +0 -5
- edsl/questions/templates/multiple_choice/__init__.py +0 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +0 -9
- edsl/questions/templates/multiple_choice/html.jinja +0 -0
- edsl/questions/templates/multiple_choice/question_presentation.jinja +0 -12
- edsl/questions/templates/numerical/__init__.py +0 -0
- edsl/questions/templates/numerical/answering_instructions.jinja +0 -8
- edsl/questions/templates/numerical/question_presentation.jinja +0 -7
- edsl/questions/templates/rank/__init__.py +0 -0
- edsl/questions/templates/rank/answering_instructions.jinja +0 -11
- edsl/questions/templates/rank/question_presentation.jinja +0 -15
- edsl/questions/templates/top_k/__init__.py +0 -0
- edsl/questions/templates/top_k/answering_instructions.jinja +0 -8
- edsl/questions/templates/top_k/question_presentation.jinja +0 -22
- edsl/questions/templates/yes_no/__init__.py +0 -0
- edsl/questions/templates/yes_no/answering_instructions.jinja +0 -6
- edsl/questions/templates/yes_no/question_presentation.jinja +0 -12
- edsl/results/DatasetTree.py +0 -145
- edsl/results/Selector.py +0 -118
- edsl/results/tree_explore.py +0 -115
- edsl/surveys/instructions/ChangeInstruction.py +0 -47
- edsl/surveys/instructions/Instruction.py +0 -34
- edsl/surveys/instructions/InstructionCollection.py +0 -77
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/templates/error_reporting/base.html +0 -24
- edsl/templates/error_reporting/exceptions_by_model.html +0 -35
- edsl/templates/error_reporting/exceptions_by_question_name.html +0 -17
- edsl/templates/error_reporting/exceptions_by_type.html +0 -17
- edsl/templates/error_reporting/interview_details.html +0 -116
- edsl/templates/error_reporting/interviews.html +0 -10
- edsl/templates/error_reporting/overview.html +0 -5
- edsl/templates/error_reporting/performance_plot.html +0 -2
- edsl/templates/error_reporting/report.css +0 -74
- edsl/templates/error_reporting/report.html +0 -118
- edsl/templates/error_reporting/report.js +0 -25
- edsl-0.1.33.dist-info/RECORD +0 -295
- {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/LICENSE +0 -0
- {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/WHEEL +0 -0
@@ -1,98 +1,26 @@
|
|
1
1
|
from __future__ import annotations
|
2
|
-
|
3
|
-
# from decimal import Decimal
|
2
|
+
import textwrap
|
4
3
|
from random import uniform
|
5
|
-
from typing import Any, Optional, Union
|
6
|
-
|
7
|
-
from pydantic import BaseModel, Field, field_validator
|
4
|
+
from typing import Any, Optional, Union
|
8
5
|
|
9
6
|
from edsl.exceptions import QuestionAnswerValidationError
|
10
7
|
from edsl.questions.QuestionBase import QuestionBase
|
11
8
|
from edsl.questions.descriptors import NumericalOrNoneDescriptor
|
12
|
-
from edsl.questions.decorators import inject_exception
|
13
|
-
from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
|
14
|
-
from edsl.exceptions.questions import QuestionAnswerValidationError
|
15
|
-
|
16
|
-
|
17
|
-
def create_numeric_response(
|
18
|
-
min_value: Optional[float] = None,
|
19
|
-
max_value: Optional[float] = None,
|
20
|
-
permissive=False,
|
21
|
-
):
|
22
|
-
field_kwargs = {}
|
23
|
-
if not permissive:
|
24
|
-
field_kwargs = {}
|
25
|
-
if min_value is not None:
|
26
|
-
field_kwargs["ge"] = min_value
|
27
|
-
if max_value is not None:
|
28
|
-
field_kwargs["le"] = max_value
|
29
|
-
|
30
|
-
class ConstrainedNumericResponse(BaseModel):
|
31
|
-
answer: Union[int, float] = Field(**field_kwargs)
|
32
|
-
comment: Optional[str] = Field(None)
|
33
|
-
generated_tokens: Optional[Any] = Field(None)
|
34
|
-
|
35
|
-
return ConstrainedNumericResponse
|
36
|
-
|
37
|
-
|
38
|
-
class NumericalResponseValidator(ResponseValidatorABC):
|
39
|
-
required_params = ["min_value", "max_value", "permissive"]
|
40
|
-
|
41
|
-
valid_examples = [
|
42
|
-
({"answer": 1}, {"min_value": 0, "max_value": 10}),
|
43
|
-
({"answer": 1}, {"min_value": None, "max_value": None}),
|
44
|
-
]
|
45
|
-
|
46
|
-
invalid_examples = [
|
47
|
-
({"answer": 10}, {"min_value": 0, "max_value": 5}, "Answer is out of range"),
|
48
|
-
({"answer": "ten"}, {"min_value": 0, "max_value": 5}, "Answer is not a number"),
|
49
|
-
({}, {"min_value": 0, "max_value": 5}, "Answer key is missing"),
|
50
|
-
]
|
51
|
-
|
52
|
-
def fix(self, response, verbose=False):
|
53
|
-
response_text = str(response).lower()
|
54
|
-
import re
|
55
|
-
|
56
|
-
if verbose:
|
57
|
-
print(f"Ivalid generated tokens was was: {response_text}")
|
58
|
-
pattern = r"\b\d+(?:\.\d+)?\b"
|
59
|
-
match = re.search(pattern, response_text.replace(",", ""))
|
60
|
-
solution = match.group(0) if match else response.get("answer")
|
61
|
-
if verbose:
|
62
|
-
print("Proposed solution is: ", solution)
|
63
|
-
if "comment" in response:
|
64
|
-
return {"answer": solution, "comment": response["comment"]}
|
65
|
-
else:
|
66
|
-
return {"answer": solution}
|
67
|
-
|
68
|
-
def _check_constraints(self, pydantic_edsl_answer: BaseModel):
|
69
|
-
pass
|
70
9
|
|
71
10
|
|
72
11
|
class QuestionNumerical(QuestionBase):
|
73
|
-
"""This question prompts the agent to answer with a numerical value.
|
74
|
-
|
75
|
-
>>> QuestionNumerical.self_check()
|
76
|
-
|
77
|
-
"""
|
12
|
+
"""This question prompts the agent to answer with a numerical value."""
|
78
13
|
|
79
14
|
question_type = "numerical"
|
80
15
|
min_value: Optional[float] = NumericalOrNoneDescriptor()
|
81
16
|
max_value: Optional[float] = NumericalOrNoneDescriptor()
|
82
17
|
|
83
|
-
_response_model = None
|
84
|
-
response_validator_class = NumericalResponseValidator
|
85
|
-
|
86
18
|
def __init__(
|
87
19
|
self,
|
88
20
|
question_name: str,
|
89
21
|
question_text: str,
|
90
22
|
min_value: Optional[Union[int, float]] = None,
|
91
23
|
max_value: Optional[Union[int, float]] = None,
|
92
|
-
include_comment: bool = True,
|
93
|
-
question_presentation: Optional[str] = None,
|
94
|
-
answering_instructions: Optional[str] = None,
|
95
|
-
permissive: bool = False,
|
96
24
|
):
|
97
25
|
"""Initialize the question.
|
98
26
|
|
@@ -106,17 +34,30 @@ class QuestionNumerical(QuestionBase):
|
|
106
34
|
self.min_value = min_value
|
107
35
|
self.max_value = max_value
|
108
36
|
|
109
|
-
self.include_comment = include_comment
|
110
|
-
self.question_presentation = question_presentation
|
111
|
-
self.answering_instructions = answering_instructions
|
112
|
-
self.permissive = permissive
|
113
|
-
|
114
|
-
def create_response_model(self):
|
115
|
-
return create_numeric_response(self.min_value, self.max_value, self.permissive)
|
116
|
-
|
117
37
|
################
|
118
38
|
# Answer methods
|
119
39
|
################
|
40
|
+
def _validate_answer(
|
41
|
+
self, answer: dict[str, Any]
|
42
|
+
) -> dict[str, Union[str, float, int]]:
|
43
|
+
"""Validate the answer."""
|
44
|
+
self._validate_answer_template_basic(answer)
|
45
|
+
self._validate_answer_key_value_numeric(answer, "answer")
|
46
|
+
self._validate_answer_numerical(answer)
|
47
|
+
return answer
|
48
|
+
|
49
|
+
def _translate_answer_code_to_answer(self, answer, scenario: "Scenario" = None):
|
50
|
+
"""There is no answer code."""
|
51
|
+
return answer
|
52
|
+
|
53
|
+
def _simulate_answer(self, human_readable: bool = True):
|
54
|
+
"""Simulate a valid answer for debugging purposes."""
|
55
|
+
from edsl.utilities.utilities import random_string
|
56
|
+
|
57
|
+
return {
|
58
|
+
"answer": uniform(self.min_value, self.max_value),
|
59
|
+
"comment": random_string(),
|
60
|
+
}
|
120
61
|
|
121
62
|
@property
|
122
63
|
def question_html_content(self) -> str:
|
@@ -135,19 +76,36 @@ class QuestionNumerical(QuestionBase):
|
|
135
76
|
# Helpful methods
|
136
77
|
################
|
137
78
|
@classmethod
|
138
|
-
|
139
|
-
def example(cls, include_comment=False) -> QuestionNumerical:
|
79
|
+
def example(cls) -> QuestionNumerical:
|
140
80
|
"""Return an example question."""
|
141
81
|
return cls(
|
142
82
|
question_name="age",
|
143
|
-
question_text="
|
83
|
+
question_text="How old are you in years?",
|
144
84
|
min_value=0,
|
145
85
|
max_value=86.7,
|
146
|
-
include_comment=include_comment,
|
147
86
|
)
|
148
87
|
|
149
88
|
|
150
|
-
|
89
|
+
def main():
|
90
|
+
"""Show example usage."""
|
91
|
+
from edsl.questions.QuestionNumerical import QuestionNumerical
|
92
|
+
|
93
|
+
q = QuestionNumerical.example()
|
94
|
+
q.question_text
|
95
|
+
q.min_value
|
96
|
+
q.max_value
|
97
|
+
# validate an answer
|
98
|
+
q._validate_answer({"answer": 1, "comment": "I like custard"})
|
99
|
+
# translate answer code
|
100
|
+
q._translate_answer_code_to_answer(1)
|
101
|
+
# simulate answer
|
102
|
+
q._simulate_answer()
|
103
|
+
q._simulate_answer(human_readable=False)
|
104
|
+
q._validate_answer(q._simulate_answer(human_readable=False))
|
105
|
+
# serialization (inherits from Question)
|
106
|
+
q.to_dict()
|
107
|
+
assert q.from_dict(q.to_dict()) == q
|
108
|
+
|
151
109
|
import doctest
|
152
110
|
|
153
111
|
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
edsl/questions/QuestionRank.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
import random
|
3
|
-
import textwrap
|
4
3
|
from jinja2 import Template
|
5
4
|
from typing import Any, Optional, Union
|
6
5
|
from edsl.questions.QuestionBase import QuestionBase
|
@@ -11,129 +10,6 @@ from edsl.questions.descriptors import (
|
|
11
10
|
NumSelectionsDescriptor,
|
12
11
|
)
|
13
12
|
|
14
|
-
from edsl.prompts import Prompt
|
15
|
-
|
16
|
-
from pydantic import field_validator
|
17
|
-
from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
|
18
|
-
from edsl.questions.ResponseValidatorABC import BaseResponse
|
19
|
-
from edsl.exceptions import QuestionAnswerValidationError
|
20
|
-
|
21
|
-
from pydantic import BaseModel, Field, create_model
|
22
|
-
from typing import Optional, Any, List, Annotated, Literal
|
23
|
-
|
24
|
-
|
25
|
-
def create_response_model(
|
26
|
-
choices: list,
|
27
|
-
num_selections: Optional[int] = None,
|
28
|
-
permissive: bool = False,
|
29
|
-
):
|
30
|
-
"""
|
31
|
-
:param choices: A list of allowed values for the answer field.
|
32
|
-
:param include_comment: Whether to include a comment field in the model.
|
33
|
-
:return: A new Pydantic model class.
|
34
|
-
"""
|
35
|
-
# Convert the choices list to a tuple for use with Literal
|
36
|
-
choice_tuple = tuple(choices)
|
37
|
-
|
38
|
-
field_params = {}
|
39
|
-
if num_selections is not None and not permissive:
|
40
|
-
field_params["min_items"] = num_selections
|
41
|
-
field_params["max_items"] = num_selections
|
42
|
-
|
43
|
-
class RankResponse(BaseModel):
|
44
|
-
answer: Annotated[
|
45
|
-
List[Literal[choice_tuple]],
|
46
|
-
Field(..., **field_params),
|
47
|
-
] = Field(..., description="List of selected choices")
|
48
|
-
comment: Optional[str] = Field(None, description="Optional comment field")
|
49
|
-
generated_tokens: Optional[Any] = Field(None)
|
50
|
-
|
51
|
-
class Config:
|
52
|
-
@staticmethod
|
53
|
-
def json_schema_extra(schema: dict, model: BaseModel) -> None:
|
54
|
-
# Add the list of choices to the schema for better documentation
|
55
|
-
for prop in schema.get("properties", {}).values():
|
56
|
-
if prop.get("title") == "answer":
|
57
|
-
prop["items"] = {"enum": choices}
|
58
|
-
|
59
|
-
return RankResponse
|
60
|
-
|
61
|
-
|
62
|
-
class RankResponseValidator(ResponseValidatorABC):
|
63
|
-
required_params = ["num_selections", "permissive", "use_code", "question_options"]
|
64
|
-
valid_examples = []
|
65
|
-
invalid_examples = []
|
66
|
-
|
67
|
-
def fix(self, response, verbose=False):
|
68
|
-
if verbose:
|
69
|
-
print("Invalid response of QuestionRank was: ", False)
|
70
|
-
response_text = response.get("generated_tokens")
|
71
|
-
if response_text is None or response_text == "": # nothing to be done
|
72
|
-
return response
|
73
|
-
# Maybe it's a comma separated list?
|
74
|
-
response_text = str(response.get("answer"))
|
75
|
-
proposed_list = (
|
76
|
-
response_text.replace("[", "").replace("]", "").replace("'", "").split(",")
|
77
|
-
)
|
78
|
-
proposed_list = [item.strip() for item in proposed_list]
|
79
|
-
|
80
|
-
if verbose:
|
81
|
-
print("Using code? ", self.use_code)
|
82
|
-
if self.use_code:
|
83
|
-
try:
|
84
|
-
proposed_list = [int(i) for i in proposed_list]
|
85
|
-
except ValueError:
|
86
|
-
# print("Could not convert to int")
|
87
|
-
pass
|
88
|
-
|
89
|
-
if verbose:
|
90
|
-
print("Proposed solution is: ", proposed_list)
|
91
|
-
|
92
|
-
# print(f"Ivalid generated tokens was was: {response_text}")
|
93
|
-
if "comment" in response:
|
94
|
-
proposed_data = {
|
95
|
-
"answer": proposed_list,
|
96
|
-
"comment": response["comment"],
|
97
|
-
"generated_tokens": response.get("generated_tokens", None),
|
98
|
-
}
|
99
|
-
else:
|
100
|
-
proposed_data = {
|
101
|
-
"answer": proposed_list,
|
102
|
-
"generated_tokens": response.get("generated_tokens", None),
|
103
|
-
}
|
104
|
-
|
105
|
-
try:
|
106
|
-
self.response_model(**proposed_data)
|
107
|
-
return proposed_data
|
108
|
-
except Exception as e:
|
109
|
-
if verbose:
|
110
|
-
print(f"Proposed solution {proposed_data} is invalid. Error: {e}")
|
111
|
-
# return response
|
112
|
-
if verbose:
|
113
|
-
print("Now seeing if responses show up in the answer")
|
114
|
-
matches = []
|
115
|
-
for index, option in enumerate(self.question_options):
|
116
|
-
if self.use_code:
|
117
|
-
if str(index) in response_text:
|
118
|
-
if index not in matches:
|
119
|
-
matches.append(index)
|
120
|
-
else:
|
121
|
-
if option in response_text:
|
122
|
-
if option not in matches:
|
123
|
-
matches.append(option)
|
124
|
-
proposed_data = {
|
125
|
-
"answer": matches,
|
126
|
-
"comment": response.get("comment", None),
|
127
|
-
"generated_tokens": response.get("generated_tokens", None),
|
128
|
-
}
|
129
|
-
try:
|
130
|
-
self.response_model(**proposed_data)
|
131
|
-
return proposed_data
|
132
|
-
except Exception as e:
|
133
|
-
if verbose:
|
134
|
-
print(f"Proposed solution {proposed_data} is invalid. Error: {e}")
|
135
|
-
return response
|
136
|
-
|
137
13
|
|
138
14
|
class QuestionRank(QuestionBase):
|
139
15
|
"""This question prompts the agent to rank options from a list."""
|
@@ -142,20 +18,12 @@ class QuestionRank(QuestionBase):
|
|
142
18
|
question_options: list[str] = QuestionOptionsDescriptor()
|
143
19
|
num_selections = NumSelectionsDescriptor()
|
144
20
|
|
145
|
-
_response_model = None
|
146
|
-
response_validator_class = RankResponseValidator
|
147
|
-
|
148
21
|
def __init__(
|
149
22
|
self,
|
150
23
|
question_name: str,
|
151
24
|
question_text: str,
|
152
25
|
question_options: list[str],
|
153
26
|
num_selections: Optional[int] = None,
|
154
|
-
question_presentation: Optional[str] = None,
|
155
|
-
answering_instructions: Optional[str] = None,
|
156
|
-
permissive: bool = False,
|
157
|
-
use_code: bool = True,
|
158
|
-
include_comment: bool = True,
|
159
27
|
):
|
160
28
|
"""Initialize the question.
|
161
29
|
|
@@ -169,33 +37,16 @@ class QuestionRank(QuestionBase):
|
|
169
37
|
self.question_text = question_text
|
170
38
|
self.question_options = question_options
|
171
39
|
self.num_selections = num_selections or len(question_options)
|
172
|
-
self.question_presentation = question_presentation
|
173
|
-
self.answering_instructions = answering_instructions
|
174
|
-
self.permissive = permissive
|
175
|
-
self.use_code = use_code
|
176
|
-
self.include_comment = include_comment
|
177
|
-
|
178
|
-
def create_response_model(self):
|
179
|
-
choices = (
|
180
|
-
self.question_options
|
181
|
-
if not self.use_code
|
182
|
-
else range(len(self.question_options))
|
183
|
-
)
|
184
|
-
return create_response_model(
|
185
|
-
choices=choices,
|
186
|
-
num_selections=self.num_selections,
|
187
|
-
permissive=self.permissive,
|
188
|
-
)
|
189
40
|
|
190
41
|
################
|
191
42
|
# Answer methods
|
192
43
|
################
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
44
|
+
def _validate_answer(self, answer: Any) -> dict[str, list[int]]:
|
45
|
+
"""Validate the answer."""
|
46
|
+
self._validate_answer_template_basic(answer)
|
47
|
+
self._validate_answer_key_value(answer, "answer", list)
|
48
|
+
self._validate_answer_rank(answer)
|
49
|
+
return answer
|
199
50
|
|
200
51
|
def _translate_answer_code_to_answer(
|
201
52
|
self, answer_codes, scenario: Scenario = None
|
@@ -209,27 +60,24 @@ class QuestionRank(QuestionBase):
|
|
209
60
|
]
|
210
61
|
translated_codes = []
|
211
62
|
for answer_code in answer_codes:
|
212
|
-
|
213
|
-
translated_codes.append(translated_options[int(answer_code)])
|
214
|
-
else:
|
215
|
-
translated_codes.append(answer_code)
|
63
|
+
translated_codes.append(translated_options[int(answer_code)])
|
216
64
|
return translated_codes
|
217
65
|
|
218
|
-
|
219
|
-
|
220
|
-
|
66
|
+
def _simulate_answer(self, human_readable=True) -> dict[str, Union[int, str]]:
|
67
|
+
"""Simulate a valid answer for debugging purposes."""
|
68
|
+
from edsl.utilities.utilities import random_string
|
221
69
|
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
70
|
+
if human_readable:
|
71
|
+
selected = random.sample(self.question_options, self.num_selections)
|
72
|
+
else:
|
73
|
+
selected = random.sample(
|
74
|
+
range(len(self.question_options)), self.num_selections
|
75
|
+
)
|
76
|
+
answer = {
|
77
|
+
"answer": selected,
|
78
|
+
"comment": random_string(),
|
79
|
+
}
|
80
|
+
return answer
|
233
81
|
|
234
82
|
@property
|
235
83
|
def question_html_content(self) -> str:
|
@@ -281,15 +129,13 @@ class QuestionRank(QuestionBase):
|
|
281
129
|
# Helpful methods
|
282
130
|
################
|
283
131
|
@classmethod
|
284
|
-
def example(cls
|
132
|
+
def example(cls) -> QuestionRank:
|
285
133
|
"""Return an example question."""
|
286
134
|
return cls(
|
287
135
|
question_name="rank_foods",
|
288
136
|
question_text="Rank your favorite foods.",
|
289
137
|
question_options=["Pizza", "Pasta", "Salad", "Soup"],
|
290
138
|
num_selections=2,
|
291
|
-
use_code=use_code,
|
292
|
-
include_comment=include_comment,
|
293
139
|
)
|
294
140
|
|
295
141
|
|
@@ -297,7 +143,7 @@ def main():
|
|
297
143
|
"""Show example usage."""
|
298
144
|
from edsl.questions.QuestionRank import QuestionRank
|
299
145
|
|
300
|
-
q = QuestionRank.example(
|
146
|
+
q = QuestionRank.example()
|
301
147
|
q.question_text
|
302
148
|
q.question_name
|
303
149
|
q.question_options
|
@@ -306,7 +152,7 @@ def main():
|
|
306
152
|
answer = {"answer": [0, 1], "comment": "I like pizza and pasta."}
|
307
153
|
q._validate_answer(answer)
|
308
154
|
# translate an answer code to an answer
|
309
|
-
|
155
|
+
q._translate_answer_code_to_answer([0, 1])
|
310
156
|
# simulate answer
|
311
157
|
q._simulate_answer()
|
312
158
|
q._simulate_answer(human_readable=False)
|
@@ -315,10 +161,6 @@ def main():
|
|
315
161
|
q.to_dict()
|
316
162
|
assert q.from_dict(q.to_dict()) == q
|
317
163
|
|
318
|
-
q = QuestionRank.example(use_code=False)
|
319
|
-
answer = {"answer": ["Pizza", "Pasta"], "comment": "I like pizza and pasta."}
|
320
|
-
q._validate_answer(answer)
|
321
|
-
|
322
164
|
import doctest
|
323
165
|
|
324
166
|
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
@@ -4,8 +4,6 @@ from abc import ABCMeta
|
|
4
4
|
from edsl.enums import QuestionType
|
5
5
|
from edsl.exceptions.questions import QuestionMissingTypeError, QuestionBadTypeError
|
6
6
|
|
7
|
-
import inspect
|
8
|
-
|
9
7
|
|
10
8
|
class RegisterQuestionsMeta(ABCMeta):
|
11
9
|
"""Metaclass to register output elements in a registry i.e., those that have a parent."""
|
@@ -15,39 +13,22 @@ class RegisterQuestionsMeta(ABCMeta):
|
|
15
13
|
def __init__(cls, name, bases, dct):
|
16
14
|
"""Initialize the class and adds it to the registry if it's not the base class."""
|
17
15
|
super(RegisterQuestionsMeta, cls).__init__(name, bases, dct)
|
18
|
-
if
|
19
|
-
name != "QuestionBase"
|
20
|
-
and name != "QuestionFunctional"
|
21
|
-
and name != "QuestionAddTwoNumbers"
|
22
|
-
):
|
16
|
+
if name != "QuestionBase":
|
23
17
|
## Enforce that all questions have a question_type class attribute
|
24
18
|
## and it comes from our enum of valid question types.
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
]
|
30
|
-
for attr in required_attributes:
|
31
|
-
if not hasattr(cls, attr):
|
32
|
-
raise QuestionMissingTypeError(
|
33
|
-
f"Question must have a {attr} class attribute"
|
34
|
-
)
|
19
|
+
if not hasattr(cls, "question_type"):
|
20
|
+
raise QuestionMissingTypeError(
|
21
|
+
"Question must have a question_type class attribute"
|
22
|
+
)
|
35
23
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
]
|
44
|
-
for param in required_params:
|
45
|
-
if param not in init_params:
|
46
|
-
raise QuestionBadTypeError(
|
47
|
-
f"Question type {name} must have a question_presentation parameter in its __init__ method"
|
48
|
-
)
|
24
|
+
if not QuestionType.is_value_valid(cls.question_type):
|
25
|
+
acceptable_values = [item.value for item in QuestionType]
|
26
|
+
raise QuestionBadTypeError(
|
27
|
+
f"""question_type must be one of {QuestionType} values, which are
|
28
|
+
currently {acceptable_values}"""
|
29
|
+
""
|
30
|
+
)
|
49
31
|
|
50
|
-
if name != "QuestionBase":
|
51
32
|
RegisterQuestionsMeta._registry[name] = cls
|
52
33
|
|
53
34
|
@classmethod
|
edsl/questions/__init__.py
CHANGED
@@ -6,21 +6,22 @@ from edsl.questions.RegisterQuestionsMeta import RegisterQuestionsMeta
|
|
6
6
|
from edsl.questions.QuestionBase import QuestionBase
|
7
7
|
|
8
8
|
# Core Questions
|
9
|
+
from edsl.questions.QuestionBudget import QuestionBudget
|
9
10
|
from edsl.questions.QuestionCheckBox import QuestionCheckBox
|
10
11
|
from edsl.questions.QuestionExtract import QuestionExtract
|
11
12
|
from edsl.questions.QuestionFreeText import QuestionFreeText
|
13
|
+
|
12
14
|
from edsl.questions.QuestionFunctional import QuestionFunctional
|
13
15
|
from edsl.questions.QuestionList import QuestionList
|
14
16
|
from edsl.questions.QuestionMultipleChoice import QuestionMultipleChoice
|
15
17
|
from edsl.questions.QuestionNumerical import QuestionNumerical
|
16
|
-
from edsl.questions.QuestionBudget import QuestionBudget
|
17
18
|
from edsl.questions.QuestionRank import QuestionRank
|
18
19
|
|
19
|
-
# #
|
20
|
+
# # Questions derived from core questions
|
20
21
|
from edsl.questions.derived.QuestionLikertFive import QuestionLikertFive
|
21
22
|
from edsl.questions.derived.QuestionLinearScale import QuestionLinearScale
|
22
|
-
from edsl.questions.derived.QuestionYesNo import QuestionYesNo
|
23
23
|
from edsl.questions.derived.QuestionTopK import QuestionTopK
|
24
|
+
from edsl.questions.derived.QuestionYesNo import QuestionYesNo
|
24
25
|
|
25
26
|
# # Compose Questions
|
26
27
|
# from edsl.questions.compose_questions import compose_questions
|
@@ -2,8 +2,6 @@ from __future__ import annotations
|
|
2
2
|
from typing import Optional
|
3
3
|
from edsl.questions.QuestionMultipleChoice import QuestionMultipleChoice
|
4
4
|
|
5
|
-
from edsl.questions.decorators import inject_exception
|
6
|
-
|
7
5
|
|
8
6
|
class QuestionLikertFive(QuestionMultipleChoice):
|
9
7
|
"""This question prompts the agent to respond to a statement on a 5-point Likert scale."""
|
@@ -16,34 +14,31 @@ class QuestionLikertFive(QuestionMultipleChoice):
|
|
16
14
|
"Agree",
|
17
15
|
"Strongly agree",
|
18
16
|
]
|
17
|
+
# default_instructions = QuestionMultipleChoice.default_instructions
|
19
18
|
|
20
19
|
def __init__(
|
21
20
|
self,
|
22
21
|
question_name: str,
|
23
22
|
question_text: str,
|
24
23
|
question_options: Optional[list[str]] = likert_options,
|
25
|
-
answering_instructions: Optional[str] = None,
|
26
|
-
question_presentation: Optional[str] = None,
|
27
|
-
include_comment: bool = True,
|
28
24
|
):
|
29
25
|
"""Initialize the question.
|
30
26
|
|
31
27
|
:param question_name: The name of the question.
|
32
28
|
:param question_text: The text of the question.
|
33
29
|
:param question_options: The options the respondent should select from (list of strings). If not provided, the default Likert options are used (['Strongly disagree', 'Disagree', 'Neutral', 'Agree', 'Strongly agree']). To view them, run `QuestionLikertFive.likert_options`.
|
30
|
+
:param instructions: Instructions for the question. If not provided, the default instructions are used. To view them, run `QuestionLikertFive.default_instructions`.
|
34
31
|
"""
|
35
32
|
super().__init__(
|
36
33
|
question_name=question_name,
|
37
34
|
question_text=question_text,
|
38
35
|
question_options=question_options,
|
39
|
-
use_code=False,
|
40
|
-
include_comment=include_comment,
|
41
|
-
answering_instructions=answering_instructions,
|
42
|
-
question_presentation=question_presentation,
|
43
36
|
)
|
44
37
|
|
38
|
+
################
|
39
|
+
# Helpful
|
40
|
+
################
|
45
41
|
@classmethod
|
46
|
-
@inject_exception
|
47
42
|
def example(cls) -> QuestionLikertFive:
|
48
43
|
"""Return an example question."""
|
49
44
|
return cls(
|
@@ -4,8 +4,6 @@ from typing import Optional
|
|
4
4
|
from edsl.questions.descriptors import QuestionOptionsDescriptor, OptionLabelDescriptor
|
5
5
|
from edsl.questions.QuestionMultipleChoice import QuestionMultipleChoice
|
6
6
|
|
7
|
-
from edsl.questions.decorators import inject_exception
|
8
|
-
|
9
7
|
|
10
8
|
class QuestionLinearScale(QuestionMultipleChoice):
|
11
9
|
"""This question prompts the agent to respond to a statement on a linear scale."""
|
@@ -20,9 +18,6 @@ class QuestionLinearScale(QuestionMultipleChoice):
|
|
20
18
|
question_text: str,
|
21
19
|
question_options: list[int],
|
22
20
|
option_labels: Optional[dict[int, str]] = None,
|
23
|
-
answering_instructions: Optional[str] = None,
|
24
|
-
question_presentation: Optional[str] = None,
|
25
|
-
include_comment: Optional[bool] = True,
|
26
21
|
):
|
27
22
|
"""Instantiate a new QuestionLinearScale.
|
28
23
|
|
@@ -36,29 +31,21 @@ class QuestionLinearScale(QuestionMultipleChoice):
|
|
36
31
|
question_name=question_name,
|
37
32
|
question_text=question_text,
|
38
33
|
question_options=question_options,
|
39
|
-
use_code=False, # question linear scale will have it's own code
|
40
|
-
include_comment=include_comment,
|
41
34
|
)
|
42
35
|
self.question_options = question_options
|
43
|
-
self.option_labels =
|
44
|
-
{int(k): v for k, v in option_labels.items()} if option_labels else {}
|
45
|
-
)
|
46
|
-
self.answering_instructions = answering_instructions
|
47
|
-
self.question_presentation = question_presentation
|
36
|
+
self.option_labels = option_labels
|
48
37
|
|
49
38
|
################
|
50
39
|
# Helpful
|
51
40
|
################
|
52
41
|
@classmethod
|
53
|
-
|
54
|
-
def example(cls, include_comment: bool = True) -> QuestionLinearScale:
|
42
|
+
def example(cls) -> QuestionLinearScale:
|
55
43
|
"""Return an example of a linear scale question."""
|
56
44
|
return cls(
|
57
45
|
question_text="How much do you like ice cream?",
|
58
46
|
question_options=[1, 2, 3, 4, 5],
|
59
47
|
question_name="ice_cream",
|
60
48
|
option_labels={1: "I hate it", 5: "I love it"},
|
61
|
-
include_comment=include_comment,
|
62
49
|
)
|
63
50
|
|
64
51
|
|