edsl 0.1.50__py3-none-any.whl → 0.1.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. edsl/__init__.py +45 -34
  2. edsl/__version__.py +1 -1
  3. edsl/base/base_exception.py +2 -2
  4. edsl/buckets/bucket_collection.py +1 -1
  5. edsl/buckets/exceptions.py +32 -0
  6. edsl/buckets/token_bucket_api.py +26 -10
  7. edsl/caching/cache.py +5 -2
  8. edsl/caching/remote_cache_sync.py +5 -5
  9. edsl/caching/sql_dict.py +12 -11
  10. edsl/config/__init__.py +1 -1
  11. edsl/config/config_class.py +4 -2
  12. edsl/conversation/Conversation.py +9 -5
  13. edsl/conversation/car_buying.py +1 -3
  14. edsl/conversation/mug_negotiation.py +2 -6
  15. edsl/coop/__init__.py +11 -8
  16. edsl/coop/coop.py +15 -13
  17. edsl/coop/coop_functions.py +1 -1
  18. edsl/coop/ep_key_handling.py +1 -1
  19. edsl/coop/price_fetcher.py +2 -2
  20. edsl/coop/utils.py +2 -2
  21. edsl/dataset/dataset.py +144 -63
  22. edsl/dataset/dataset_operations_mixin.py +14 -6
  23. edsl/dataset/dataset_tree.py +3 -3
  24. edsl/dataset/display/table_renderers.py +6 -3
  25. edsl/dataset/file_exports.py +4 -4
  26. edsl/dataset/r/ggplot.py +3 -3
  27. edsl/inference_services/available_model_fetcher.py +2 -2
  28. edsl/inference_services/data_structures.py +5 -5
  29. edsl/inference_services/inference_service_abc.py +1 -1
  30. edsl/inference_services/inference_services_collection.py +1 -1
  31. edsl/inference_services/service_availability.py +3 -3
  32. edsl/inference_services/services/azure_ai.py +3 -3
  33. edsl/inference_services/services/google_service.py +1 -1
  34. edsl/inference_services/services/test_service.py +1 -1
  35. edsl/instructions/change_instruction.py +5 -4
  36. edsl/instructions/instruction.py +1 -0
  37. edsl/instructions/instruction_collection.py +5 -4
  38. edsl/instructions/instruction_handler.py +10 -8
  39. edsl/interviews/answering_function.py +20 -21
  40. edsl/interviews/exception_tracking.py +3 -2
  41. edsl/interviews/interview.py +1 -1
  42. edsl/interviews/interview_status_dictionary.py +1 -1
  43. edsl/interviews/interview_task_manager.py +7 -4
  44. edsl/interviews/request_token_estimator.py +3 -2
  45. edsl/interviews/statistics.py +2 -2
  46. edsl/invigilators/invigilators.py +34 -6
  47. edsl/jobs/__init__.py +39 -2
  48. edsl/jobs/async_interview_runner.py +1 -1
  49. edsl/jobs/check_survey_scenario_compatibility.py +5 -5
  50. edsl/jobs/data_structures.py +2 -2
  51. edsl/jobs/html_table_job_logger.py +494 -257
  52. edsl/jobs/jobs.py +2 -2
  53. edsl/jobs/jobs_checks.py +5 -5
  54. edsl/jobs/jobs_component_constructor.py +2 -2
  55. edsl/jobs/jobs_pricing_estimation.py +1 -1
  56. edsl/jobs/jobs_runner_asyncio.py +2 -2
  57. edsl/jobs/jobs_status_enums.py +1 -0
  58. edsl/jobs/remote_inference.py +47 -13
  59. edsl/jobs/results_exceptions_handler.py +2 -2
  60. edsl/language_models/language_model.py +151 -145
  61. edsl/notebooks/__init__.py +24 -1
  62. edsl/notebooks/exceptions.py +82 -0
  63. edsl/notebooks/notebook.py +7 -3
  64. edsl/notebooks/notebook_to_latex.py +1 -1
  65. edsl/prompts/__init__.py +23 -2
  66. edsl/prompts/prompt.py +1 -1
  67. edsl/questions/__init__.py +4 -4
  68. edsl/questions/answer_validator_mixin.py +0 -5
  69. edsl/questions/compose_questions.py +2 -2
  70. edsl/questions/descriptors.py +1 -1
  71. edsl/questions/question_base.py +32 -3
  72. edsl/questions/question_base_prompts_mixin.py +4 -4
  73. edsl/questions/question_budget.py +503 -102
  74. edsl/questions/question_check_box.py +658 -156
  75. edsl/questions/question_dict.py +176 -2
  76. edsl/questions/question_extract.py +401 -61
  77. edsl/questions/question_free_text.py +77 -9
  78. edsl/questions/question_functional.py +118 -9
  79. edsl/questions/{derived/question_likert_five.py → question_likert_five.py} +2 -2
  80. edsl/questions/{derived/question_linear_scale.py → question_linear_scale.py} +3 -4
  81. edsl/questions/question_list.py +246 -26
  82. edsl/questions/question_matrix.py +586 -73
  83. edsl/questions/question_multiple_choice.py +213 -47
  84. edsl/questions/question_numerical.py +360 -29
  85. edsl/questions/question_rank.py +401 -124
  86. edsl/questions/question_registry.py +3 -3
  87. edsl/questions/{derived/question_top_k.py → question_top_k.py} +3 -3
  88. edsl/questions/{derived/question_yes_no.py → question_yes_no.py} +3 -4
  89. edsl/questions/register_questions_meta.py +2 -1
  90. edsl/questions/response_validator_abc.py +6 -2
  91. edsl/questions/response_validator_factory.py +10 -12
  92. edsl/results/report.py +1 -1
  93. edsl/results/result.py +7 -4
  94. edsl/results/results.py +500 -271
  95. edsl/results/results_selector.py +2 -2
  96. edsl/scenarios/construct_download_link.py +3 -3
  97. edsl/scenarios/scenario.py +1 -2
  98. edsl/scenarios/scenario_list.py +41 -23
  99. edsl/surveys/survey_css.py +3 -3
  100. edsl/surveys/survey_simulator.py +2 -1
  101. edsl/tasks/__init__.py +22 -2
  102. edsl/tasks/exceptions.py +72 -0
  103. edsl/tasks/task_history.py +48 -11
  104. edsl/templates/error_reporting/base.html +37 -4
  105. edsl/templates/error_reporting/exceptions_table.html +105 -33
  106. edsl/templates/error_reporting/interview_details.html +130 -126
  107. edsl/templates/error_reporting/overview.html +21 -25
  108. edsl/templates/error_reporting/report.css +215 -46
  109. edsl/templates/error_reporting/report.js +122 -20
  110. edsl/tokens/__init__.py +27 -1
  111. edsl/tokens/exceptions.py +37 -0
  112. edsl/tokens/interview_token_usage.py +3 -2
  113. edsl/tokens/token_usage.py +4 -3
  114. {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/METADATA +1 -1
  115. {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/RECORD +118 -116
  116. edsl/questions/derived/__init__.py +0 -0
  117. {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/LICENSE +0 -0
  118. {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/WHEEL +0 -0
  119. {edsl-0.1.50.dist-info → edsl-0.1.52.dist-info}/entry_points.txt +0 -0
@@ -1,7 +1,9 @@
1
1
  from __future__ import annotations
2
- from typing import Optional, Any, List, Annotated, Literal
2
+ from typing import Optional, Any, List, Union
3
+ import random
4
+ import re
3
5
 
4
- from pydantic import BaseModel, Field
6
+ from pydantic import BaseModel, Field, model_validator, ValidationError
5
7
 
6
8
  from .question_base import QuestionBase
7
9
  from .descriptors import (
@@ -9,124 +11,359 @@ from .descriptors import (
9
11
  NumSelectionsDescriptor,
10
12
  )
11
13
  from .response_validator_abc import ResponseValidatorABC
14
+ from .exceptions import QuestionAnswerValidationError
12
15
  from ..scenarios import Scenario
13
16
 
14
17
 
18
+ class RankResponseBase(BaseModel):
19
+ """
20
+ Base model for rank question responses.
21
+
22
+ Attributes:
23
+ answer: A list of selected choices in ranked order
24
+ comment: Optional comment about the ranking
25
+ generated_tokens: Optional token usage data
26
+
27
+ Examples:
28
+ >>> # Valid response with numeric indices
29
+ >>> model = RankResponseBase(answer=[0, 1], comment="First and second choices")
30
+ >>> model.answer
31
+ [0, 1]
32
+
33
+ >>> # Valid response with string options
34
+ >>> model = RankResponseBase(answer=["Pizza", "Pasta"])
35
+ >>> model.answer
36
+ ['Pizza', 'Pasta']
37
+ """
38
+ answer: List[Any]
39
+ comment: Optional[str] = None
40
+ generated_tokens: Optional[Any] = None
41
+
42
+
15
43
  def create_response_model(
16
- choices: list,
44
+ choices: Union[list, range],
17
45
  num_selections: Optional[int] = None,
18
46
  permissive: bool = False,
19
47
  ):
20
48
  """
21
- :param choices: A list of allowed values for the answer field.
22
- :param include_comment: Whether to include a comment field in the model.
23
- :return: A new Pydantic model class.
49
+ Creates a Pydantic model for rank question responses with appropriate validation.
50
+
51
+ Args:
52
+ choices: A list of allowed values for the answer field
53
+ num_selections: The exact number of selections required (if not permissive)
54
+ permissive: If True, allows any number of selections
55
+
56
+ Returns:
57
+ A Pydantic model class for validating rank responses
58
+
59
+ Examples:
60
+ >>> # Create a model for ranking 2 options from ["Pizza", "Pasta", "Salad", "Soup"]
61
+ >>> Model = create_response_model(["Pizza", "Pasta", "Salad", "Soup"], num_selections=2)
62
+ >>> response = Model(answer=["Pizza", "Pasta"])
63
+ >>> response.answer
64
+ ['Pizza', 'Pasta']
65
+
66
+ >>> # Invalid: too many selections
67
+ >>> try:
68
+ ... Model(answer=["Pizza", "Pasta", "Salad"])
69
+ ... except Exception:
70
+ ... print("Validation error occurred")
71
+ Validation error occurred
24
72
  """
25
- # Convert the choices list to a tuple for use with Literal
26
- choice_tuple = tuple(choices)
27
-
28
- field_params = {}
29
- if num_selections is not None and not permissive:
30
- field_params["min_items"] = num_selections
31
- field_params["max_items"] = num_selections
32
-
33
- class RankResponse(BaseModel):
34
- answer: Annotated[
35
- List[Literal[choice_tuple]],
36
- Field(..., **field_params),
37
- ] = Field(..., description="List of selected choices")
38
- comment: Optional[str] = Field(None, description="Optional comment field")
39
- generated_tokens: Optional[Any] = Field(None)
40
-
73
+ # Convert the choices to a tuple for Literal type annotation
74
+ choice_tuple = tuple(choices) if not isinstance(choices, range) else tuple(choices)
75
+
76
+ # Create a custom validation model that extends the base model
77
+ class RankResponse(RankResponseBase):
78
+ """
79
+ Model for rank question responses with validation for the specific choices and constraints.
80
+ """
81
+ # Use Annotated to add field metadata while keeping the type as List[Any]
82
+ # We'll validate the actual items in the model_validator
83
+ answer: List[Any] = Field(..., description="List of selected choices in ranked order")
84
+
85
+ @model_validator(mode='after')
86
+ def validate_answer_items(self):
87
+ """
88
+ Validates that:
89
+ 1. All items in the answer are valid choices
90
+ 2. The correct number of selections is made (if not permissive)
91
+ 3. No duplicates exist in the ranking
92
+ """
93
+ answer = self.answer
94
+
95
+ # Check if the correct number of selections is made
96
+ if num_selections is not None and not permissive:
97
+ if len(answer) != num_selections:
98
+ validation_error = ValidationError.from_exception_data(
99
+ title='RankResponse',
100
+ line_errors=[{
101
+ 'type': 'value_error',
102
+ 'loc': ('answer',),
103
+ 'msg': f'Expected exactly {num_selections} selections, got {len(answer)}',
104
+ 'input': answer,
105
+ 'ctx': {'expected': num_selections, 'actual': len(answer)}
106
+ }]
107
+ )
108
+ raise QuestionAnswerValidationError(
109
+ message=f"Number of selections must be exactly {num_selections}",
110
+ data=self.model_dump(),
111
+ model=self.__class__,
112
+ pydantic_error=validation_error
113
+ )
114
+
115
+ # Check for duplicates
116
+ if len(answer) != len(set(answer)):
117
+ validation_error = ValidationError.from_exception_data(
118
+ title='RankResponse',
119
+ line_errors=[{
120
+ 'type': 'value_error',
121
+ 'loc': ('answer',),
122
+ 'msg': 'Duplicate items found in ranking',
123
+ 'input': answer,
124
+ 'ctx': {'error': 'Duplicate items are not allowed in rankings'}
125
+ }]
126
+ )
127
+ raise QuestionAnswerValidationError(
128
+ message="Rankings must not contain duplicate items",
129
+ data=self.model_dump(),
130
+ model=self.__class__,
131
+ pydantic_error=validation_error
132
+ )
133
+
134
+ # If not permissive, validate that all items are in the allowed choices
135
+ if not permissive:
136
+ # Check each item against the allowed choices
137
+ for idx, item in enumerate(answer):
138
+ if item not in choice_tuple:
139
+ validation_error = ValidationError.from_exception_data(
140
+ title='RankResponse',
141
+ line_errors=[{
142
+ 'type': 'value_error',
143
+ 'loc': ('answer', idx),
144
+ 'msg': f'Value {item} is not a valid choice',
145
+ 'input': item,
146
+ 'ctx': {'allowed_values': choice_tuple}
147
+ }]
148
+ )
149
+ raise QuestionAnswerValidationError(
150
+ message=f"Item '{item}' is not in the allowed choices",
151
+ data=self.model_dump(),
152
+ model=self.__class__,
153
+ pydantic_error=validation_error
154
+ )
155
+
156
+ return self
157
+
41
158
  class Config:
42
159
  @staticmethod
43
160
  def json_schema_extra(schema: dict, model: BaseModel) -> None:
44
161
  # Add the list of choices to the schema for better documentation
45
162
  for prop in schema.get("properties", {}).values():
46
163
  if prop.get("title") == "answer":
47
- prop["items"] = {"enum": choices}
164
+ prop["items"] = {"enum": list(choices) if not isinstance(choices, range) else list(choices)}
48
165
 
49
166
  return RankResponse
50
167
 
51
168
 
52
169
  class RankResponseValidator(ResponseValidatorABC):
170
+ """
171
+ Validator for rank question responses that attempts to fix invalid responses.
172
+
173
+ This validator tries multiple strategies to recover a valid ranking from
174
+ malformed responses, including parsing comma-separated strings, extracting
175
+ numbers or options from text, and more.
176
+ """
53
177
  required_params = ["num_selections", "permissive", "use_code", "question_options"]
54
- valid_examples = []
55
- invalid_examples = []
178
+
179
+ valid_examples = [
180
+ (
181
+ {"answer": [0, 1]},
182
+ {"num_selections": 2, "use_code": True, "permissive": False,
183
+ "question_options": ["Pizza", "Pasta", "Salad", "Soup"]},
184
+ ),
185
+ (
186
+ {"answer": ["Pizza", "Pasta"]},
187
+ {"num_selections": 2, "use_code": False, "permissive": False,
188
+ "question_options": ["Pizza", "Pasta", "Salad", "Soup"]},
189
+ ),
190
+ ]
191
+
192
+ invalid_examples = [
193
+ (
194
+ {"answer": [0, 0]},
195
+ {"num_selections": 2, "use_code": True, "permissive": False,
196
+ "question_options": ["Pizza", "Pasta", "Salad", "Soup"]},
197
+ "Duplicate items found in ranking",
198
+ ),
199
+ (
200
+ {"answer": [0, 1, 2]},
201
+ {"num_selections": 2, "use_code": True, "permissive": False,
202
+ "question_options": ["Pizza", "Pasta", "Salad", "Soup"]},
203
+ "Expected exactly 2 selections",
204
+ ),
205
+ (
206
+ {"answer": [5, 6]},
207
+ {"num_selections": 2, "use_code": True, "permissive": False,
208
+ "question_options": ["Pizza", "Pasta", "Salad", "Soup"]},
209
+ "not in the allowed choices",
210
+ ),
211
+ ]
56
212
 
57
213
  def fix(self, response, verbose=False):
214
+ """
215
+ Attempts to fix an invalid rank response by trying multiple parsing strategies.
216
+
217
+ Args:
218
+ response: The invalid response to fix
219
+ verbose: Whether to print verbose debugging information
220
+
221
+ Returns:
222
+ A fixed response dict if fixable, otherwise the original response
223
+ """
58
224
  if verbose:
59
- print("Invalid response of QuestionRank was: ", False)
60
- response_text = response.get("generated_tokens")
61
- if response_text is None or response_text == "": # nothing to be done
225
+ print(f"Fixing rank response: {response}")
226
+
227
+ # If there's no answer field or it's empty, nothing to fix
228
+ if "answer" not in response or not response["answer"]:
229
+ if verbose:
230
+ print("No answer field or empty answer, nothing to fix")
62
231
  return response
63
- # Maybe it's a comma separated list?
64
- response_text = str(response.get("answer"))
232
+
233
+ # Strategy 1: Parse from answer if it's a string
234
+ if isinstance(response.get("answer"), str):
235
+ text = response["answer"]
236
+ # Try a few parsing approaches
237
+ proposed_list = self._parse_answer_from_text(text)
238
+ if proposed_list:
239
+ proposed_data = {
240
+ "answer": proposed_list,
241
+ "comment": response.get("comment"),
242
+ "generated_tokens": response.get("generated_tokens")
243
+ }
244
+ try:
245
+ self.response_model(**proposed_data)
246
+ if verbose:
247
+ print(f"Successfully fixed by parsing string: {proposed_data}")
248
+ return proposed_data
249
+ except Exception as e:
250
+ if verbose:
251
+ print(f"Failed to validate after string parsing: {e}")
252
+
253
+ # Strategy 2: Try to parse from generated_tokens if available
254
+ if "generated_tokens" in response and response["generated_tokens"]:
255
+ text = str(response["generated_tokens"])
256
+ proposed_list = self._parse_answer_from_text(text)
257
+
258
+ if proposed_list:
259
+ proposed_data = {
260
+ "answer": proposed_list,
261
+ "comment": response.get("comment"),
262
+ "generated_tokens": response.get("generated_tokens")
263
+ }
264
+ try:
265
+ self.response_model(**proposed_data)
266
+ if verbose:
267
+ print(f"Successfully fixed by parsing generated_tokens: {proposed_data}")
268
+ return proposed_data
269
+ except Exception as e:
270
+ if verbose:
271
+ print(f"Failed to validate after generated_tokens parsing: {e}")
272
+
273
+ # Strategy 3: Look for mentions of options in the text
274
+ if isinstance(response.get("answer"), str) or "generated_tokens" in response:
275
+ text = str(response.get("answer", "")) + " " + str(response.get("generated_tokens", ""))
276
+ matches = []
277
+
278
+ # Extract by index or by option text
279
+ if self.use_code:
280
+ # Look for indices in the text
281
+ indices = re.findall(r'\b(\d+)\b', text)
282
+ for idx in indices:
283
+ try:
284
+ idx_int = int(idx)
285
+ if 0 <= idx_int < len(self.question_options) and idx_int not in matches:
286
+ matches.append(idx_int)
287
+ except ValueError:
288
+ continue
289
+ else:
290
+ # Look for options in the text
291
+ for option in self.question_options:
292
+ if option in text and option not in matches:
293
+ matches.append(option)
294
+
295
+ # If we found enough matches, try to use them
296
+ if matches and (self.permissive or len(matches) == self.num_selections):
297
+ proposed_data = {
298
+ "answer": matches[:self.num_selections] if not self.permissive else matches,
299
+ "comment": response.get("comment"),
300
+ "generated_tokens": response.get("generated_tokens")
301
+ }
302
+ try:
303
+ self.response_model(**proposed_data)
304
+ if verbose:
305
+ print(f"Successfully fixed by extracting mentions: {proposed_data}")
306
+ return proposed_data
307
+ except Exception as e:
308
+ if verbose:
309
+ print(f"Failed to validate after extracting mentions: {e}")
310
+
311
+ # If we got here, we couldn't fix the response
312
+ if verbose:
313
+ print("Could not fix rank response, returning original")
314
+ return response
315
+
316
+ def _parse_answer_from_text(self, text):
317
+ """
318
+ Parse an answer list from text using multiple strategies.
319
+
320
+ Args:
321
+ text: The text to parse
322
+
323
+ Returns:
324
+ A list of parsed options or indices, or None if parsing failed
325
+ """
326
+ # Try comma-separated list
65
327
  proposed_list = (
66
- response_text.replace("[", "").replace("]", "").replace("'", "").split(",")
328
+ text.replace("[", "").replace("]", "").replace("'", "").replace('"', "").split(",")
67
329
  )
68
- proposed_list = [item.strip() for item in proposed_list]
69
-
70
- if verbose:
71
- print("Using code? ", self.use_code)
72
- if self.use_code:
330
+ proposed_list = [item.strip() for item in proposed_list if item.strip()]
331
+
332
+ # Convert to integers if using code indices
333
+ if self.use_code and proposed_list:
73
334
  try:
74
335
  proposed_list = [int(i) for i in proposed_list]
75
336
  except ValueError:
76
- # print("Could not convert to int")
77
- pass
78
-
79
- if verbose:
80
- print("Proposed solution is: ", proposed_list)
81
-
82
- # print(f"Ivalid generated tokens was was: {response_text}")
83
- if "comment" in response:
84
- proposed_data = {
85
- "answer": proposed_list,
86
- "comment": response["comment"],
87
- "generated_tokens": response.get("generated_tokens", None),
88
- }
89
- else:
90
- proposed_data = {
91
- "answer": proposed_list,
92
- "generated_tokens": response.get("generated_tokens", None),
93
- }
94
-
95
- try:
96
- self.response_model(**proposed_data)
97
- return proposed_data
98
- except Exception as e:
99
- if verbose:
100
- print(f"Proposed solution {proposed_data} is invalid. Error: {e}")
101
- # return response
102
- if verbose:
103
- print("Now seeing if responses show up in the answer")
104
- matches = []
105
- for index, option in enumerate(self.question_options):
106
- if self.use_code:
107
- if str(index) in response_text:
108
- if index not in matches:
109
- matches.append(index)
110
- else:
111
- if option in response_text:
112
- if option not in matches:
113
- matches.append(option)
114
- proposed_data = {
115
- "answer": matches,
116
- "comment": response.get("comment", None),
117
- "generated_tokens": response.get("generated_tokens", None),
118
- }
119
- try:
120
- self.response_model(**proposed_data)
121
- return proposed_data
122
- except Exception as e:
123
- if verbose:
124
- print(f"Proposed solution {proposed_data} is invalid. Error: {e}")
125
- return response
337
+ # If conversion fails but we're using codes, try to extract numbers
338
+ numbers = re.findall(r'\b(\d+)\b', text)
339
+ if numbers:
340
+ try:
341
+ proposed_list = [int(num) for num in numbers]
342
+ except ValueError:
343
+ pass
344
+
345
+ return proposed_list if proposed_list else None
126
346
 
127
347
 
128
348
  class QuestionRank(QuestionBase):
129
- """This question prompts the agent to rank options from a list."""
349
+ """
350
+ A question that prompts the agent to rank options from a list.
351
+
352
+ This question type asks respondents to put options in order of preference,
353
+ importance, or any other ordering criteria. The response is a list of
354
+ selected options in ranked order.
355
+
356
+ Examples:
357
+ >>> # Create a ranking question for food preferences
358
+ >>> question = QuestionRank(
359
+ ... question_name="food_ranking",
360
+ ... question_text="Rank these foods from most to least favorite.",
361
+ ... question_options=["Pizza", "Pasta", "Salad", "Soup"],
362
+ ... num_selections=2
363
+ ... )
364
+ >>> # The response should be a ranked list
365
+ >>> response = {"answer": ["Pizza", "Pasta"], "comment": "I prefer Italian food."}
366
+ """
130
367
 
131
368
  question_type = "rank"
132
369
  question_options: list[str] = QuestionOptionsDescriptor()
@@ -147,13 +384,19 @@ class QuestionRank(QuestionBase):
147
384
  use_code: bool = True,
148
385
  include_comment: bool = True,
149
386
  ):
150
- """Initialize the question.
151
-
152
- :param question_name: The name of the question.
153
- :param question_text: The text of the question.
154
- :param question_options: The options the respondent should select from.
155
- :param min_selections: The minimum number of options that must be selected.
156
- :param max_selections: The maximum number of options that must be selected.
387
+ """
388
+ Initialize a rank question.
389
+
390
+ Args:
391
+ question_name: The name of the question
392
+ question_text: The text of the question
393
+ question_options: The options the respondent should rank
394
+ num_selections: The number of options to select and rank (defaults to all)
395
+ question_presentation: Custom presentation template (optional)
396
+ answering_instructions: Custom instructions template (optional)
397
+ permissive: Whether to relax validation constraints
398
+ use_code: Whether to use numeric indices (0,1,2) instead of option text
399
+ include_comment: Whether to include a comment field
157
400
  """
158
401
  self.question_name = question_name
159
402
  self.question_text = question_text
@@ -166,6 +409,12 @@ class QuestionRank(QuestionBase):
166
409
  self.include_comment = include_comment
167
410
 
168
411
  def create_response_model(self):
412
+ """
413
+ Returns the pydantic model for validating responses to this question.
414
+
415
+ The model is dynamically created based on the question's configuration,
416
+ including allowed choices, number of selections, and permissiveness.
417
+ """
169
418
  choices = (
170
419
  self.question_options
171
420
  if not self.use_code
@@ -177,20 +426,19 @@ class QuestionRank(QuestionBase):
177
426
  permissive=self.permissive,
178
427
  )
179
428
 
180
- ################
181
- # Answer methods
182
- ################
183
- # def _validate_answer(self, answer: Any) -> dict[str, list[int]]:
184
- # """Validate the answer."""
185
- # self._validate_answer_template_basic(answer)
186
- # self._validate_answer_key_value(answer, "answer", list)
187
- # self._validate_answer_rank(answer)
188
- # return answer
189
-
190
429
  def _translate_answer_code_to_answer(
191
430
  self, answer_codes, scenario: Scenario = None
192
431
  ) -> list[str]:
193
- """Translate the answer code to the actual answer."""
432
+ """
433
+ Translate numeric answer codes to the actual option text.
434
+
435
+ Args:
436
+ answer_codes: The codes to translate
437
+ scenario: The scenario for template rendering (optional)
438
+
439
+ Returns:
440
+ A list of translated option texts
441
+ """
194
442
  from jinja2 import Template
195
443
 
196
444
  scenario = scenario or Scenario()
@@ -199,30 +447,53 @@ class QuestionRank(QuestionBase):
199
447
  ]
200
448
  translated_codes = []
201
449
  for answer_code in answer_codes:
202
- if self._use_code:
450
+ if self.use_code:
203
451
  translated_codes.append(translated_options[int(answer_code)])
204
452
  else:
205
453
  translated_codes.append(answer_code)
206
454
  return translated_codes
207
455
 
208
- # def _simulate_answer(self, human_readable=True) -> dict[str, Union[int, str]]:
209
- # """Simulate a valid answer for debugging purposes."""
210
- # from edsl.utilities.utilities import random_string
456
+ def _simulate_answer(self, human_readable=True) -> dict:
457
+ """
458
+ Simulate a valid answer for testing purposes.
459
+
460
+ Args:
461
+ human_readable: Whether to use option text (True) or indices (False)
462
+
463
+ Returns:
464
+ A valid simulated response
465
+ """
466
+ from ..utilities.utilities import random_string
211
467
 
212
- # if human_readable:
213
- # selected = random.sample(self.question_options, self.num_selections)
214
- # else:
215
- # selected = random.sample(
216
- # range(len(self.question_options)), self.num_selections
217
- # )
218
- # answer = {
219
- # "answer": selected,
220
- # "comment": random_string(),
221
- # }
222
- # return answer
468
+ # Handle the simulation logic based on use_code and human_readable flags
469
+ if human_readable:
470
+ if not self.use_code:
471
+ # When human_readable=True and not using code, return text options
472
+ selected = random.sample(self.question_options, self.num_selections)
473
+ else:
474
+ # When human_readable=True but we're configured to use_code,
475
+ # still use the option text for better test compatibility
476
+ selected = random.sample(self.question_options, self.num_selections)
477
+ else:
478
+ # When human_readable=False, always use indices
479
+ selected = random.sample(
480
+ range(len(self.question_options)), self.num_selections
481
+ )
482
+
483
+ answer = {
484
+ "answer": selected,
485
+ "comment": random_string(),
486
+ }
487
+ return answer
223
488
 
224
489
  @property
225
490
  def question_html_content(self) -> str:
491
+ """
492
+ Generate an HTML representation of the ranking question.
493
+
494
+ Returns:
495
+ HTML content string for rendering the question
496
+ """
226
497
  from jinja2 import Template
227
498
 
228
499
  question_html_content = Template(
@@ -267,12 +538,18 @@ class QuestionRank(QuestionBase):
267
538
  )
268
539
  return question_html_content
269
540
 
270
- ################
271
- # Helpful methods
272
- ################
273
541
  @classmethod
274
542
  def example(cls, use_code=False, include_comment=True) -> QuestionRank:
275
- """Return an example question."""
543
+ """
544
+ Return an example rank question.
545
+
546
+ Args:
547
+ use_code: Whether to use numeric indices
548
+ include_comment: Whether to include a comment field
549
+
550
+ Returns:
551
+ An example QuestionRank instance
552
+ """
276
553
  return cls(
277
554
  question_name="rank_foods",
278
555
  question_text="Rank your favorite foods.",
@@ -62,7 +62,7 @@ class Question(metaclass=Meta):
62
62
  @classmethod
63
63
  def pull(cls, url_or_uuid: Union[str, UUID]):
64
64
  """Pull the object from coop."""
65
- from edsl.coop import Coop
65
+ from ..coop import Coop
66
66
 
67
67
  coop = Coop()
68
68
  return coop.get(url_or_uuid, "question")
@@ -70,7 +70,7 @@ class Question(metaclass=Meta):
70
70
  @classmethod
71
71
  def delete(cls, url_or_uuid: Union[str, UUID]):
72
72
  """Delete the object from coop."""
73
- from edsl.coop import Coop
73
+ from ..coop import Coop
74
74
 
75
75
  coop = Coop()
76
76
  return coop.delete(url_or_uuid)
@@ -84,7 +84,7 @@ class Question(metaclass=Meta):
84
84
  visibility: Optional[str] = None,
85
85
  ):
86
86
  """Patch the object on coop."""
87
- from edsl.coop import Coop
87
+ from ..coop import Coop
88
88
 
89
89
  coop = Coop()
90
90
  return coop.patch(url_or_uuid, description, value, visibility)
@@ -1,9 +1,9 @@
1
1
  from __future__ import annotations
2
2
  from typing import Optional
3
3
 
4
- from ..exceptions import QuestionCreationValidationError
5
- from ..question_check_box import QuestionCheckBox
6
- from ..decorators import inject_exception
4
+ from .exceptions import QuestionCreationValidationError
5
+ from .question_check_box import QuestionCheckBox
6
+ from .decorators import inject_exception
7
7
 
8
8
 
9
9
  class QuestionTopK(QuestionCheckBox):
@@ -1,9 +1,8 @@
1
1
  from __future__ import annotations
2
2
  from typing import Optional
3
- from ..descriptors import QuestionOptionsDescriptor
4
- from ..question_multiple_choice import QuestionMultipleChoice
5
-
6
- from ..decorators import inject_exception
3
+ from .descriptors import QuestionOptionsDescriptor
4
+ from .question_multiple_choice import QuestionMultipleChoice
5
+ from .decorators import inject_exception
7
6
 
8
7
 
9
8
  class QuestionYesNo(QuestionMultipleChoice):