edsl 0.1.33__py3-none-any.whl → 0.1.33.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (180) hide show
  1. edsl/Base.py +3 -9
  2. edsl/__init__.py +3 -8
  3. edsl/__version__.py +1 -1
  4. edsl/agents/Agent.py +8 -40
  5. edsl/agents/AgentList.py +0 -43
  6. edsl/agents/Invigilator.py +219 -135
  7. edsl/agents/InvigilatorBase.py +59 -148
  8. edsl/agents/{PromptConstructor.py → PromptConstructionMixin.py} +89 -138
  9. edsl/agents/__init__.py +0 -1
  10. edsl/config.py +56 -47
  11. edsl/coop/coop.py +7 -50
  12. edsl/data/Cache.py +1 -35
  13. edsl/data_transfer_models.py +38 -73
  14. edsl/enums.py +0 -4
  15. edsl/exceptions/language_models.py +1 -25
  16. edsl/exceptions/questions.py +5 -62
  17. edsl/exceptions/results.py +0 -4
  18. edsl/inference_services/AnthropicService.py +11 -13
  19. edsl/inference_services/AwsBedrock.py +17 -19
  20. edsl/inference_services/AzureAI.py +20 -37
  21. edsl/inference_services/GoogleService.py +12 -16
  22. edsl/inference_services/GroqService.py +0 -2
  23. edsl/inference_services/InferenceServiceABC.py +3 -58
  24. edsl/inference_services/OpenAIService.py +54 -48
  25. edsl/inference_services/models_available_cache.py +6 -0
  26. edsl/inference_services/registry.py +0 -6
  27. edsl/jobs/Answers.py +12 -10
  28. edsl/jobs/Jobs.py +21 -36
  29. edsl/jobs/buckets/BucketCollection.py +15 -24
  30. edsl/jobs/buckets/TokenBucket.py +14 -93
  31. edsl/jobs/interviews/Interview.py +78 -366
  32. edsl/jobs/interviews/InterviewExceptionEntry.py +19 -85
  33. edsl/jobs/interviews/InterviewTaskBuildingMixin.py +286 -0
  34. edsl/jobs/interviews/{InterviewExceptionCollection.py → interview_exception_tracking.py} +68 -14
  35. edsl/jobs/interviews/retry_management.py +37 -0
  36. edsl/jobs/runners/JobsRunnerAsyncio.py +175 -146
  37. edsl/jobs/runners/JobsRunnerStatusMixin.py +333 -0
  38. edsl/jobs/tasks/QuestionTaskCreator.py +23 -30
  39. edsl/jobs/tasks/TaskHistory.py +213 -148
  40. edsl/language_models/LanguageModel.py +156 -261
  41. edsl/language_models/ModelList.py +2 -2
  42. edsl/language_models/RegisterLanguageModelsMeta.py +29 -14
  43. edsl/language_models/registry.py +6 -23
  44. edsl/language_models/repair.py +19 -0
  45. edsl/prompts/Prompt.py +2 -52
  46. edsl/questions/AnswerValidatorMixin.py +26 -23
  47. edsl/questions/QuestionBase.py +249 -329
  48. edsl/questions/QuestionBudget.py +41 -99
  49. edsl/questions/QuestionCheckBox.py +35 -227
  50. edsl/questions/QuestionExtract.py +27 -98
  51. edsl/questions/QuestionFreeText.py +29 -52
  52. edsl/questions/QuestionFunctional.py +0 -7
  53. edsl/questions/QuestionList.py +22 -141
  54. edsl/questions/QuestionMultipleChoice.py +65 -159
  55. edsl/questions/QuestionNumerical.py +46 -88
  56. edsl/questions/QuestionRank.py +24 -182
  57. edsl/questions/RegisterQuestionsMeta.py +12 -31
  58. edsl/questions/__init__.py +4 -3
  59. edsl/questions/derived/QuestionLikertFive.py +5 -10
  60. edsl/questions/derived/QuestionLinearScale.py +2 -15
  61. edsl/questions/derived/QuestionTopK.py +1 -10
  62. edsl/questions/derived/QuestionYesNo.py +3 -24
  63. edsl/questions/descriptors.py +7 -43
  64. edsl/questions/question_registry.py +2 -6
  65. edsl/results/Dataset.py +0 -20
  66. edsl/results/DatasetExportMixin.py +48 -46
  67. edsl/results/Result.py +5 -32
  68. edsl/results/Results.py +46 -135
  69. edsl/results/ResultsDBMixin.py +3 -3
  70. edsl/scenarios/FileStore.py +10 -71
  71. edsl/scenarios/Scenario.py +25 -96
  72. edsl/scenarios/ScenarioImageMixin.py +2 -2
  73. edsl/scenarios/ScenarioList.py +39 -361
  74. edsl/scenarios/ScenarioListExportMixin.py +0 -9
  75. edsl/scenarios/ScenarioListPdfMixin.py +4 -150
  76. edsl/study/SnapShot.py +1 -8
  77. edsl/study/Study.py +0 -32
  78. edsl/surveys/Rule.py +1 -10
  79. edsl/surveys/RuleCollection.py +5 -21
  80. edsl/surveys/Survey.py +310 -636
  81. edsl/surveys/SurveyExportMixin.py +9 -71
  82. edsl/surveys/SurveyFlowVisualizationMixin.py +1 -2
  83. edsl/surveys/SurveyQualtricsImport.py +4 -75
  84. edsl/utilities/gcp_bucket/simple_example.py +9 -0
  85. edsl/utilities/utilities.py +1 -9
  86. {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/METADATA +2 -5
  87. edsl-0.1.33.dev1.dist-info/RECORD +209 -0
  88. edsl/TemplateLoader.py +0 -24
  89. edsl/auto/AutoStudy.py +0 -117
  90. edsl/auto/StageBase.py +0 -230
  91. edsl/auto/StageGenerateSurvey.py +0 -178
  92. edsl/auto/StageLabelQuestions.py +0 -125
  93. edsl/auto/StagePersona.py +0 -61
  94. edsl/auto/StagePersonaDimensionValueRanges.py +0 -88
  95. edsl/auto/StagePersonaDimensionValues.py +0 -74
  96. edsl/auto/StagePersonaDimensions.py +0 -69
  97. edsl/auto/StageQuestions.py +0 -73
  98. edsl/auto/SurveyCreatorPipeline.py +0 -21
  99. edsl/auto/utilities.py +0 -224
  100. edsl/coop/PriceFetcher.py +0 -58
  101. edsl/inference_services/MistralAIService.py +0 -120
  102. edsl/inference_services/TestService.py +0 -80
  103. edsl/inference_services/TogetherAIService.py +0 -170
  104. edsl/jobs/FailedQuestion.py +0 -78
  105. edsl/jobs/runners/JobsRunnerStatus.py +0 -331
  106. edsl/language_models/fake_openai_call.py +0 -15
  107. edsl/language_models/fake_openai_service.py +0 -61
  108. edsl/language_models/utilities.py +0 -61
  109. edsl/questions/QuestionBaseGenMixin.py +0 -133
  110. edsl/questions/QuestionBasePromptsMixin.py +0 -266
  111. edsl/questions/Quick.py +0 -41
  112. edsl/questions/ResponseValidatorABC.py +0 -170
  113. edsl/questions/decorators.py +0 -21
  114. edsl/questions/prompt_templates/question_budget.jinja +0 -13
  115. edsl/questions/prompt_templates/question_checkbox.jinja +0 -32
  116. edsl/questions/prompt_templates/question_extract.jinja +0 -11
  117. edsl/questions/prompt_templates/question_free_text.jinja +0 -3
  118. edsl/questions/prompt_templates/question_linear_scale.jinja +0 -11
  119. edsl/questions/prompt_templates/question_list.jinja +0 -17
  120. edsl/questions/prompt_templates/question_multiple_choice.jinja +0 -33
  121. edsl/questions/prompt_templates/question_numerical.jinja +0 -37
  122. edsl/questions/templates/__init__.py +0 -0
  123. edsl/questions/templates/budget/__init__.py +0 -0
  124. edsl/questions/templates/budget/answering_instructions.jinja +0 -7
  125. edsl/questions/templates/budget/question_presentation.jinja +0 -7
  126. edsl/questions/templates/checkbox/__init__.py +0 -0
  127. edsl/questions/templates/checkbox/answering_instructions.jinja +0 -10
  128. edsl/questions/templates/checkbox/question_presentation.jinja +0 -22
  129. edsl/questions/templates/extract/__init__.py +0 -0
  130. edsl/questions/templates/extract/answering_instructions.jinja +0 -7
  131. edsl/questions/templates/extract/question_presentation.jinja +0 -1
  132. edsl/questions/templates/free_text/__init__.py +0 -0
  133. edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
  134. edsl/questions/templates/free_text/question_presentation.jinja +0 -1
  135. edsl/questions/templates/likert_five/__init__.py +0 -0
  136. edsl/questions/templates/likert_five/answering_instructions.jinja +0 -10
  137. edsl/questions/templates/likert_five/question_presentation.jinja +0 -12
  138. edsl/questions/templates/linear_scale/__init__.py +0 -0
  139. edsl/questions/templates/linear_scale/answering_instructions.jinja +0 -5
  140. edsl/questions/templates/linear_scale/question_presentation.jinja +0 -5
  141. edsl/questions/templates/list/__init__.py +0 -0
  142. edsl/questions/templates/list/answering_instructions.jinja +0 -4
  143. edsl/questions/templates/list/question_presentation.jinja +0 -5
  144. edsl/questions/templates/multiple_choice/__init__.py +0 -0
  145. edsl/questions/templates/multiple_choice/answering_instructions.jinja +0 -9
  146. edsl/questions/templates/multiple_choice/html.jinja +0 -0
  147. edsl/questions/templates/multiple_choice/question_presentation.jinja +0 -12
  148. edsl/questions/templates/numerical/__init__.py +0 -0
  149. edsl/questions/templates/numerical/answering_instructions.jinja +0 -8
  150. edsl/questions/templates/numerical/question_presentation.jinja +0 -7
  151. edsl/questions/templates/rank/__init__.py +0 -0
  152. edsl/questions/templates/rank/answering_instructions.jinja +0 -11
  153. edsl/questions/templates/rank/question_presentation.jinja +0 -15
  154. edsl/questions/templates/top_k/__init__.py +0 -0
  155. edsl/questions/templates/top_k/answering_instructions.jinja +0 -8
  156. edsl/questions/templates/top_k/question_presentation.jinja +0 -22
  157. edsl/questions/templates/yes_no/__init__.py +0 -0
  158. edsl/questions/templates/yes_no/answering_instructions.jinja +0 -6
  159. edsl/questions/templates/yes_no/question_presentation.jinja +0 -12
  160. edsl/results/DatasetTree.py +0 -145
  161. edsl/results/Selector.py +0 -118
  162. edsl/results/tree_explore.py +0 -115
  163. edsl/surveys/instructions/ChangeInstruction.py +0 -47
  164. edsl/surveys/instructions/Instruction.py +0 -34
  165. edsl/surveys/instructions/InstructionCollection.py +0 -77
  166. edsl/surveys/instructions/__init__.py +0 -0
  167. edsl/templates/error_reporting/base.html +0 -24
  168. edsl/templates/error_reporting/exceptions_by_model.html +0 -35
  169. edsl/templates/error_reporting/exceptions_by_question_name.html +0 -17
  170. edsl/templates/error_reporting/exceptions_by_type.html +0 -17
  171. edsl/templates/error_reporting/interview_details.html +0 -116
  172. edsl/templates/error_reporting/interviews.html +0 -10
  173. edsl/templates/error_reporting/overview.html +0 -5
  174. edsl/templates/error_reporting/performance_plot.html +0 -2
  175. edsl/templates/error_reporting/report.css +0 -74
  176. edsl/templates/error_reporting/report.html +0 -118
  177. edsl/templates/error_reporting/report.js +0 -25
  178. edsl-0.1.33.dist-info/RECORD +0 -295
  179. {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/LICENSE +0 -0
  180. {edsl-0.1.33.dist-info → edsl-0.1.33.dev1.dist-info}/WHEEL +0 -0
@@ -1,60 +1,8 @@
1
1
  from __future__ import annotations
2
- from typing import Any, Optional, Union, List
3
-
4
- from pydantic import Field, BaseModel, validator
5
-
2
+ import random
3
+ from typing import Any, Optional, Union
6
4
  from edsl.questions.QuestionBase import QuestionBase
7
5
  from edsl.questions.descriptors import IntegerDescriptor, QuestionOptionsDescriptor
8
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
9
-
10
-
11
- class BudgewResponseValidator(ResponseValidatorABC):
12
- valid_examples = []
13
-
14
- invalid_examples = []
15
-
16
- def fix(self, response, verbose=False):
17
- if verbose:
18
- print(f"Fixing list response: {response}")
19
- answer = str(response.get("answer") or response.get("generated_tokens", ""))
20
- if len(answer.split(",")) > 0:
21
- return (
22
- {"answer": answer.split(",")} | {"comment": response.get("comment")}
23
- if "comment" in response
24
- else {}
25
- )
26
-
27
-
28
- def create_budget_model(
29
- budget_sum: float, permissive: bool, question_options: List[str]
30
- ):
31
- class BudgetResponse(BaseModel):
32
- answer: List[float] = Field(
33
- ...,
34
- description="List of non-negative numbers representing budget allocation",
35
- min_items=len(question_options),
36
- max_items=len(question_options),
37
- )
38
- comment: Optional[str] = None
39
- generated_tokens: Optional[str] = None
40
-
41
- @validator("answer")
42
- def validate_answer(cls, v):
43
- if len(v) != len(question_options):
44
- raise ValueError(f"Must provide {len(question_options)} values")
45
- if any(x < 0 for x in v):
46
- raise ValueError("All values must be non-negative")
47
- total = sum(v)
48
- if not permissive and total != budget_sum:
49
- raise ValueError(f"Sum of numbers must equal {budget_sum}")
50
- elif permissive and total > budget_sum:
51
- raise ValueError(f"Sum of numbers cannot exceed {budget_sum}")
52
- return v
53
-
54
- class Config:
55
- extra = "forbid"
56
-
57
- return BudgetResponse
58
6
 
59
7
 
60
8
  class QuestionBudget(QuestionBase):
@@ -63,8 +11,6 @@ class QuestionBudget(QuestionBase):
63
11
  question_type = "budget"
64
12
  budget_sum: int = IntegerDescriptor(none_allowed=False)
65
13
  question_options: list[str] = QuestionOptionsDescriptor(q_budget=True)
66
- _response_model = None
67
- response_validator_class = BudgewResponseValidator
68
14
 
69
15
  def __init__(
70
16
  self,
@@ -72,10 +18,6 @@ class QuestionBudget(QuestionBase):
72
18
  question_text: str,
73
19
  question_options: list[str],
74
20
  budget_sum: int,
75
- include_comment: bool = True,
76
- question_presentation: Optional[str] = None,
77
- answering_instructions: Optional[str] = None,
78
- permissive: bool = False,
79
21
  ):
80
22
  """Instantiate a new QuestionBudget.
81
23
 
@@ -88,19 +30,20 @@ class QuestionBudget(QuestionBase):
88
30
  self.question_text = question_text
89
31
  self.question_options = question_options
90
32
  self.budget_sum = budget_sum
91
- self.question_presentation = question_presentation
92
- self.answering_instructions = answering_instructions
93
- self.permissive = permissive
94
- self.include_comment = include_comment
95
-
96
- def create_response_model(self):
97
- return create_budget_model(
98
- self.budget_sum, self.permissive, self.question_options
99
- )
33
+
34
+ ################
35
+ # Answer methods
36
+ ################
37
+ def _validate_answer(self, answer: dict[str, Any]) -> dict[str, Union[int, str]]:
38
+ """Validate the answer."""
39
+ self._validate_answer_template_basic(answer)
40
+ self._validate_answer_key_value(answer, "answer", dict)
41
+ self._validate_answer_budget(answer)
42
+ return answer
100
43
 
101
44
  def _translate_answer_code_to_answer(
102
- self, answer_code, combined_dict
103
- ) -> list[dict]:
45
+ self, answer_codes: dict[str, int], scenario: "Scenario" = None
46
+ ):
104
47
  """
105
48
  Translate the answer codes to the actual answers.
106
49
 
@@ -109,35 +52,35 @@ class QuestionBudget(QuestionBase):
109
52
  This code will translate that to "a".
110
53
  """
111
54
  translated_codes = []
112
- for answer_code, question_option in zip(answer_code, self.question_options):
113
- translated_codes.append({question_option: answer_code})
55
+ for answer_code, response in answer_codes.items():
56
+ translated_codes.append({self.question_options[int(answer_code)]: response})
114
57
 
115
58
  return translated_codes
116
59
 
117
- # def _simulate_answer(self, human_readable=True):
118
- # """Simulate a valid answer for debugging purposes (what the validator expects)."""
119
- # from edsl.utilities.utilities import random_string
120
-
121
- # if human_readable:
122
- # keys = self.question_options
123
- # else:
124
- # keys = range(len(self.question_options))
125
- # remaining_budget = self.budget_sum
126
- # values = []
127
- # for _ in range(len(self.question_options)):
128
- # if _ == len(self.question_options) - 1:
129
- # # Assign remaining budget to the last value
130
- # values.append(remaining_budget)
131
- # else:
132
- # # Generate a random value between 0 and remaining budget
133
- # value = random.randint(0, remaining_budget)
134
- # values.append(value)
135
- # remaining_budget -= value
136
- # answer = dict(zip(keys, values))
137
- # return {
138
- # "answer": answer,
139
- # "comment": random_string(),
140
- # }
60
+ def _simulate_answer(self, human_readable=True):
61
+ """Simulate a valid answer for debugging purposes (what the validator expects)."""
62
+ from edsl.utilities.utilities import random_string
63
+
64
+ if human_readable:
65
+ keys = self.question_options
66
+ else:
67
+ keys = range(len(self.question_options))
68
+ remaining_budget = self.budget_sum
69
+ values = []
70
+ for _ in range(len(self.question_options)):
71
+ if _ == len(self.question_options) - 1:
72
+ # Assign remaining budget to the last value
73
+ values.append(remaining_budget)
74
+ else:
75
+ # Generate a random value between 0 and remaining budget
76
+ value = random.randint(0, remaining_budget)
77
+ values.append(value)
78
+ remaining_budget -= value
79
+ answer = dict(zip(keys, values))
80
+ return {
81
+ "answer": answer,
82
+ "comment": random_string(),
83
+ }
141
84
 
142
85
  @property
143
86
  def question_html_content(self) -> str:
@@ -184,14 +127,13 @@ class QuestionBudget(QuestionBase):
184
127
  # Helpful methods
185
128
  ################
186
129
  @classmethod
187
- def example(cls, include_comment: bool = True) -> QuestionBudget:
130
+ def example(cls) -> QuestionBudget:
188
131
  """Return an example of a budget question."""
189
132
  return cls(
190
133
  question_name="food_budget",
191
134
  question_text="How would you allocate $100?",
192
135
  question_options=["Pizza", "Ice Cream", "Burgers", "Salad"],
193
136
  budget_sum=100,
194
- include_comment=include_comment,
195
137
  )
196
138
 
197
139
 
@@ -10,165 +10,6 @@ from edsl.questions.descriptors import (
10
10
  QuestionOptionsDescriptor,
11
11
  )
12
12
 
13
- from edsl.questions.decorators import inject_exception
14
-
15
- from pydantic import field_validator
16
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
17
- from edsl.questions.ResponseValidatorABC import BaseResponse
18
-
19
- from edsl.exceptions import QuestionAnswerValidationError
20
-
21
- from pydantic import BaseModel, Field, conlist
22
- from typing import List, Literal, Optional, Annotated
23
-
24
-
25
- def create_checkbox_response_model(
26
- choices: list,
27
- min_selections: Optional[int] = None,
28
- max_selections: Optional[int] = None,
29
- permissive: bool = False,
30
- ):
31
- """
32
- Dynamically create a CheckboxResponse model with a predefined list of choices.
33
-
34
- :param choices: A list of allowed values for the answer field.
35
- :param include_comment: Whether to include a comment field in the model.
36
- :return: A new Pydantic model class.
37
- """
38
- # Convert the choices list to a tuple for use with Literal
39
- choice_tuple = tuple(choices)
40
-
41
- field_params = {}
42
- if min_selections is not None and not permissive:
43
- field_params["min_items"] = min_selections
44
- if max_selections is not None and not permissive:
45
- field_params["max_items"] = max_selections
46
-
47
- class CheckboxResponse(BaseModel):
48
- answer: Annotated[
49
- List[Literal[choice_tuple]],
50
- Field(..., **field_params),
51
- ] = Field(..., description="List of selected choices")
52
- comment: Optional[str] = Field(None, description="Optional comment field")
53
- generated_tokens: Optional[Any] = Field(default=None)
54
-
55
- class Config:
56
- @staticmethod
57
- def json_schema_extra(schema: dict, model: BaseModel) -> None:
58
- # Add the list of choices to the schema for better documentation
59
- for prop in schema.get("properties", {}).values():
60
- if prop.get("title") == "answer":
61
- prop["items"] = {"enum": choices}
62
-
63
- return CheckboxResponse
64
-
65
-
66
- class CheckBoxResponseValidator(ResponseValidatorABC):
67
- required_params = [
68
- "question_options",
69
- "min_selections",
70
- "max_selections",
71
- "use_code",
72
- "permissive",
73
- ]
74
-
75
- valid_examples = [
76
- ({"answer": [1, 2]}, {"question_options": ["Good", "Great", "OK", "Bad"]})
77
- ]
78
-
79
- invalid_examples = [
80
- (
81
- {"answer": [-1]},
82
- {"question_options": ["Good", "Great", "OK", "Bad"]},
83
- "Answer code must be a non-negative integer",
84
- ),
85
- (
86
- {"answer": 1},
87
- {"question_options": ["Good", "Great", "OK", "Bad"]},
88
- "Answer code must be a list",
89
- ),
90
- (
91
- {"answer": [1, 2, 3, 4]},
92
- {
93
- "question_options": ["Good", "Great", "OK", "Bad"],
94
- "min_selections": 1,
95
- "max_selections": 2,
96
- },
97
- "Too many options selected",
98
- ),
99
- ]
100
-
101
- def fix(self, response, verbose=False):
102
- if verbose:
103
- print("Invalid response of QuestionCheckBox was: ", response)
104
- response_text = response.get("generated_tokens")
105
- if response_text is None or response_text == "": # nothing to be done
106
- return response
107
- # Maybe it's a comma separated list?
108
- proposed_list = response_text.split(",")
109
- proposed_list = [item.strip() for item in proposed_list]
110
- if verbose:
111
- print("Using code? ", self.use_code)
112
- if self.use_code:
113
- try:
114
- proposed_list = [int(i) for i in proposed_list]
115
- except ValueError:
116
- # print("Could not convert to int")
117
- pass
118
-
119
- if verbose:
120
- print("Proposed solution is: ", proposed_list)
121
-
122
- # print(f"Ivalid generated tokens was was: {response_text}")
123
- if "comment" in response:
124
- proposed_data = {
125
- "answer": proposed_list,
126
- "comment": response["comment"],
127
- "generated_tokens": response.get("generated_tokens", None),
128
- }
129
- else:
130
- proposed_data = {
131
- "answer": proposed_list,
132
- "generated_tokens": response.get("generated_tokens", None),
133
- }
134
-
135
- try:
136
- self.response_model(**proposed_data)
137
- print("Proposed solution is valid")
138
- print("Returning proposed data: ", proposed_data)
139
- return proposed_data
140
- except Exception as e:
141
- if verbose:
142
- print(f"Proposed solution {proposed_data} is invalid. Error: {e}")
143
- # return response
144
- if verbose:
145
- print("Now seeing if responses show up in the answer")
146
- matches = []
147
- for index, option in enumerate(self.question_options):
148
- if self.use_code:
149
- if str(index) in response_text:
150
- matches.append(index)
151
- else:
152
- if option in response_text:
153
- matches.append(index)
154
- proposed_data = {
155
- "answer": matches,
156
- "comment": response.get("comment", None),
157
- "generated_tokens": response.get("generated_tokens", None),
158
- }
159
- try:
160
- self.response_model(**proposed_data)
161
- return proposed_data
162
- except Exception as e:
163
- if verbose:
164
- print(f"Proposed solution {proposed_data} is invalid. Error: {e}")
165
- return response
166
-
167
- def custom_validate(self, response) -> BaseResponse:
168
- if response.answer is None:
169
- raise QuestionAnswerValidationError("Answer is missing.")
170
- return response.dict()
171
-
172
13
 
173
14
  class QuestionCheckBox(QuestionBase):
174
15
  """This question prompts the agent to select options from a list."""
@@ -179,9 +20,6 @@ class QuestionCheckBox(QuestionBase):
179
20
  min_selections = IntegerDescriptor(none_allowed=True)
180
21
  max_selections = IntegerDescriptor(none_allowed=True)
181
22
 
182
- _response_model = None
183
- response_validator_class = CheckBoxResponseValidator
184
-
185
23
  def __init__(
186
24
  self,
187
25
  question_name: str,
@@ -189,11 +27,6 @@ class QuestionCheckBox(QuestionBase):
189
27
  question_options: list[str],
190
28
  min_selections: Optional[int] = None,
191
29
  max_selections: Optional[int] = None,
192
- include_comment: bool = True,
193
- use_code: bool = True,
194
- question_presentation: Optional[str] = None,
195
- answering_instructions: Optional[str] = None,
196
- permissive: bool = False,
197
30
  ):
198
31
  """Instantiate a new QuestionCheckBox.
199
32
 
@@ -209,28 +42,15 @@ class QuestionCheckBox(QuestionBase):
209
42
  self.max_selections = max_selections
210
43
  self.question_options = question_options
211
44
 
212
- self._include_comment = include_comment
213
- self._use_code = use_code
214
- self.permissive = permissive
215
-
216
- self.question_presentation = question_presentation
217
- self.answering_instructions = answering_instructions
218
-
219
- def create_response_model(self):
220
- if not self._use_code:
221
- return create_checkbox_response_model(
222
- self.question_options,
223
- min_selections=self.min_selections,
224
- max_selections=self.max_selections, # include_comment=self._include_comment
225
- permissive=self.permissive,
226
- )
227
- else:
228
- return create_checkbox_response_model(
229
- list(range(len(self.question_options))),
230
- min_selections=self.min_selections,
231
- max_selections=self.max_selections, # include_comment=self._include_comment
232
- permissive=self.permissive,
233
- )
45
+ ################
46
+ # Answer methods
47
+ ################
48
+ def _validate_answer(self, answer: Any) -> dict[str, Union[int, str]]:
49
+ """Validate the answer."""
50
+ self._validate_answer_template_basic(answer)
51
+ self._validate_answer_key_value(answer, "answer", list)
52
+ self._validate_answer_checkbox(answer)
53
+ return answer
234
54
 
235
55
  def _translate_answer_code_to_answer(
236
56
  self, answer_codes, scenario: "Scenario" = None
@@ -249,36 +69,33 @@ class QuestionCheckBox(QuestionBase):
249
69
  ]
250
70
  translated_codes = []
251
71
  for answer_code in answer_codes:
252
- if self._use_code:
253
- translated_codes.append(translated_options[int(answer_code)])
254
- else:
255
- translated_codes.append(answer_code)
72
+ translated_codes.append(translated_options[int(answer_code)])
256
73
  return translated_codes
257
74
 
258
- # def _simulate_answer(self, human_readable=True) -> dict[str, Union[int, str]]:
259
- # """Simulate a valid answer for debugging purposes."""
260
- # from edsl.utilities.utilities import random_string
261
-
262
- # min_selections = self.min_selections or 1
263
- # max_selections = self.max_selections or len(self.question_options)
264
- # num_selections = random.randint(min_selections, max_selections)
265
- # if human_readable:
266
- # # Select a random number of options from self.question_options
267
- # selected_options = random.sample(self.question_options, num_selections)
268
- # answer = {
269
- # "answer": selected_options,
270
- # "comment": random_string(),
271
- # }
272
- # else:
273
- # # Select a random number of indices from the range of self.question_options
274
- # selected_indices = random.sample(
275
- # range(len(self.question_options)), num_selections
276
- # )
277
- # answer = {
278
- # "answer": selected_indices,
279
- # "comment": random_string(),
280
- # }
281
- # return answer
75
+ def _simulate_answer(self, human_readable=True) -> dict[str, Union[int, str]]:
76
+ """Simulate a valid answer for debugging purposes."""
77
+ from edsl.utilities.utilities import random_string
78
+
79
+ min_selections = self.min_selections or 1
80
+ max_selections = self.max_selections or len(self.question_options)
81
+ num_selections = random.randint(min_selections, max_selections)
82
+ if human_readable:
83
+ # Select a random number of options from self.question_options
84
+ selected_options = random.sample(self.question_options, num_selections)
85
+ answer = {
86
+ "answer": selected_options,
87
+ "comment": random_string(),
88
+ }
89
+ else:
90
+ # Select a random number of indices from the range of self.question_options
91
+ selected_indices = random.sample(
92
+ range(len(self.question_options)), num_selections
93
+ )
94
+ answer = {
95
+ "answer": selected_indices,
96
+ "comment": random_string(),
97
+ }
98
+ return answer
282
99
 
283
100
  @property
284
101
  def question_html_content(self) -> str:
@@ -308,8 +125,7 @@ class QuestionCheckBox(QuestionBase):
308
125
  # Helpful methods
309
126
  ################
310
127
  @classmethod
311
- @inject_exception
312
- def example(cls, include_comment=False, use_code=True) -> QuestionCheckBox:
128
+ def example(cls) -> QuestionCheckBox:
313
129
  """Return an example checkbox question."""
314
130
  return cls(
315
131
  question_name="never_eat",
@@ -323,8 +139,6 @@ class QuestionCheckBox(QuestionBase):
323
139
  ],
324
140
  min_selections=2,
325
141
  max_selections=5,
326
- use_code=use_code,
327
- include_comment=include_comment,
328
142
  )
329
143
 
330
144
 
@@ -351,9 +165,3 @@ def main():
351
165
  import doctest
352
166
 
353
167
  doctest.testmod(optionflags=doctest.ELLIPSIS)
354
-
355
-
356
- if __name__ == "__main__":
357
- import doctest
358
-
359
- doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,112 +1,20 @@
1
1
  from __future__ import annotations
2
- from typing import Any, Optional, Dict
2
+ from typing import Any
3
3
  from edsl.questions.QuestionBase import QuestionBase
4
4
  from edsl.questions.descriptors import AnswerTemplateDescriptor
5
5
 
6
- from edsl.questions.ResponseValidatorABC import ResponseValidatorABC
7
- from edsl.questions.ResponseValidatorABC import BaseResponse
8
- from edsl.exceptions import QuestionAnswerValidationError
9
- from edsl.questions.decorators import inject_exception
10
-
11
- from typing import Dict, Any
12
- from pydantic import create_model, Field
13
-
14
- import json
15
- import re
16
-
17
-
18
- def extract_json(text, expected_keys, verbose=False):
19
- # Escape special regex characters in keys
20
- escaped_keys = [re.escape(key) for key in expected_keys]
21
-
22
- # Create a pattern that looks for all expected keys
23
- pattern = r"\{[^}]*" + r"[^}]*".join(escaped_keys) + r"[^}]*\}"
24
-
25
- json_match = re.search(pattern, text)
26
-
27
- if json_match:
28
- json_str = json_match.group(0)
29
- try:
30
- # Parse the extracted string as JSON
31
- json_data = json.loads(json_str)
32
-
33
- # Verify that all expected keys are present
34
- if all(key in json_data for key in expected_keys):
35
- return json_data
36
- else:
37
- if verbose:
38
- print(
39
- "Error: Not all expected keys were found in the extracted JSON."
40
- )
41
- return None
42
- except json.JSONDecodeError:
43
- if verbose:
44
- print("Error: The extracted content is not valid JSON.")
45
- return None
46
- else:
47
- if verbose:
48
- print("Error: No JSON-like structure found with all expected keys.")
49
- return None
50
-
51
-
52
- def dict_to_pydantic_model(input_dict: Dict[str, Any]) -> Any:
53
- field_definitions = {
54
- key: (str, Field(default=str(value))) for key, value in input_dict.items()
55
- }
56
-
57
- DynamicModel = create_model("DynamicModel", **field_definitions)
58
-
59
- class AnswerModel(BaseResponse):
60
- answer: DynamicModel
61
- generated_tokens: Optional[str] = None
62
- comment: Optional[str] = None
63
-
64
- return AnswerModel
65
-
66
-
67
- class ExtractResponseValidator(ResponseValidatorABC):
68
- required_params = ["answer_template"]
69
- valid_examples = [({"answer": "This is great"}, {})]
70
- invalid_examples = [
71
- (
72
- {"answer": None},
73
- {"answer_template": {"name": "John Doe", "profession": "Carpenter"}},
74
- "Result cannot be empty",
75
- ),
76
- ]
77
-
78
- def custom_validate(self, response) -> BaseResponse:
79
- return response.dict()
80
-
81
- def fix(self, response, verbose=False):
82
- raw_tokens = response["generated_tokens"]
83
- if verbose:
84
- print(f"Invalid response of QuestionExtract was: {raw_tokens}")
85
- extracted_json = extract_json(raw_tokens, self.answer_template.keys(), verbose)
86
- if verbose:
87
- print("Proposed solution is: ", extracted_json)
88
- return {
89
- "answer": extracted_json,
90
- "comment": response.get("comment", None),
91
- "generated_tokens": raw_tokens,
92
- }
93
-
94
6
 
95
7
  class QuestionExtract(QuestionBase):
96
8
  """This question prompts the agent to extract information from a string and return it in a given template."""
97
9
 
98
10
  question_type = "extract"
99
11
  answer_template: dict[str, Any] = AnswerTemplateDescriptor()
100
- _response_model = None
101
- response_validator_class = ExtractResponseValidator
102
12
 
103
13
  def __init__(
104
14
  self,
105
15
  question_text: str,
106
16
  answer_template: dict[str, Any],
107
17
  question_name: str,
108
- answering_instructions: str = None,
109
- question_presentation: str = None,
110
18
  ):
111
19
  """Initialize the question.
112
20
 
@@ -118,11 +26,33 @@ class QuestionExtract(QuestionBase):
118
26
  self.question_name = question_name
119
27
  self.question_text = question_text
120
28
  self.answer_template = answer_template
121
- self.answering_instructions = answering_instructions
122
- self.question_presentation = question_presentation
123
29
 
124
- def create_response_model(self):
125
- return dict_to_pydantic_model(self.answer_template)
30
+ ################
31
+ # Answer methods
32
+ ################
33
+ def _validate_answer(self, answer: Any) -> dict[str, Any]:
34
+ """Validate the answer."""
35
+ # raw_json = answer["answer"]
36
+ # fixed_json_data = re.sub(r"\'", '"', raw_json)
37
+ # answer["answer"] = json.loads(fixed_json_data)
38
+ self._validate_answer_template_basic(answer)
39
+ # self._validate_answer_key_value(answer, "answer", dict)
40
+
41
+ self._validate_answer_extract(answer)
42
+ return answer
43
+
44
+ def _translate_answer_code_to_answer(self, answer, scenario: "Scenario" = None):
45
+ """Return the answer in a human-readable format."""
46
+ return answer
47
+
48
+ def _simulate_answer(self, human_readable: bool = True) -> dict[str, str]:
49
+ """Simulate a valid answer for debugging purposes."""
50
+ from edsl.utilities.utilities import random_string
51
+
52
+ return {
53
+ "answer": {key: random_string() for key in self.answer_template.keys()},
54
+ "comment": random_string(),
55
+ }
126
56
 
127
57
  @property
128
58
  def question_html_content(self) -> str:
@@ -147,7 +77,6 @@ class QuestionExtract(QuestionBase):
147
77
  # Helpful methods
148
78
  ################
149
79
  @classmethod
150
- @inject_exception
151
80
  def example(cls) -> QuestionExtract:
152
81
  """Return an example question."""
153
82
  return cls(