edsl 0.1.44__py3-none-any.whl → 0.1.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. edsl/Base.py +7 -3
  2. edsl/__version__.py +1 -1
  3. edsl/agents/InvigilatorBase.py +3 -1
  4. edsl/agents/PromptConstructor.py +66 -91
  5. edsl/agents/QuestionInstructionPromptBuilder.py +160 -79
  6. edsl/agents/QuestionTemplateReplacementsBuilder.py +80 -17
  7. edsl/agents/question_option_processor.py +15 -6
  8. edsl/coop/CoopFunctionsMixin.py +3 -4
  9. edsl/coop/coop.py +171 -96
  10. edsl/data/RemoteCacheSync.py +10 -9
  11. edsl/enums.py +3 -3
  12. edsl/inference_services/AnthropicService.py +11 -9
  13. edsl/inference_services/AvailableModelFetcher.py +2 -0
  14. edsl/inference_services/AwsBedrock.py +1 -2
  15. edsl/inference_services/AzureAI.py +12 -9
  16. edsl/inference_services/GoogleService.py +9 -4
  17. edsl/inference_services/InferenceServicesCollection.py +2 -2
  18. edsl/inference_services/MistralAIService.py +1 -2
  19. edsl/inference_services/OpenAIService.py +9 -4
  20. edsl/inference_services/PerplexityService.py +2 -1
  21. edsl/inference_services/{GrokService.py → XAIService.py} +2 -2
  22. edsl/inference_services/registry.py +2 -2
  23. edsl/jobs/AnswerQuestionFunctionConstructor.py +12 -1
  24. edsl/jobs/Jobs.py +24 -17
  25. edsl/jobs/JobsChecks.py +10 -13
  26. edsl/jobs/JobsPrompts.py +49 -26
  27. edsl/jobs/JobsRemoteInferenceHandler.py +4 -5
  28. edsl/jobs/async_interview_runner.py +3 -1
  29. edsl/jobs/check_survey_scenario_compatibility.py +5 -5
  30. edsl/jobs/data_structures.py +3 -0
  31. edsl/jobs/interviews/Interview.py +6 -3
  32. edsl/jobs/interviews/InterviewExceptionEntry.py +12 -0
  33. edsl/jobs/tasks/TaskHistory.py +1 -1
  34. edsl/language_models/LanguageModel.py +6 -3
  35. edsl/language_models/PriceManager.py +45 -5
  36. edsl/language_models/model.py +47 -26
  37. edsl/questions/QuestionBase.py +21 -0
  38. edsl/questions/QuestionBasePromptsMixin.py +103 -0
  39. edsl/questions/QuestionFreeText.py +22 -5
  40. edsl/questions/descriptors.py +4 -0
  41. edsl/questions/question_base_gen_mixin.py +96 -29
  42. edsl/results/Dataset.py +65 -0
  43. edsl/results/DatasetExportMixin.py +320 -32
  44. edsl/results/Result.py +27 -0
  45. edsl/results/Results.py +22 -2
  46. edsl/results/ResultsGGMixin.py +7 -3
  47. edsl/scenarios/DocumentChunker.py +2 -0
  48. edsl/scenarios/FileStore.py +10 -0
  49. edsl/scenarios/PdfExtractor.py +21 -1
  50. edsl/scenarios/Scenario.py +25 -9
  51. edsl/scenarios/ScenarioList.py +226 -24
  52. edsl/scenarios/handlers/__init__.py +1 -0
  53. edsl/scenarios/handlers/docx.py +5 -1
  54. edsl/scenarios/handlers/jpeg.py +39 -0
  55. edsl/surveys/Survey.py +5 -4
  56. edsl/surveys/SurveyFlowVisualization.py +91 -43
  57. edsl/templates/error_reporting/exceptions_table.html +7 -8
  58. edsl/templates/error_reporting/interview_details.html +1 -1
  59. edsl/templates/error_reporting/interviews.html +0 -1
  60. edsl/templates/error_reporting/overview.html +2 -7
  61. edsl/templates/error_reporting/performance_plot.html +1 -1
  62. edsl/templates/error_reporting/report.css +1 -1
  63. edsl/utilities/PrettyList.py +14 -0
  64. edsl-0.1.46.dist-info/METADATA +246 -0
  65. {edsl-0.1.44.dist-info → edsl-0.1.46.dist-info}/RECORD +67 -66
  66. edsl-0.1.44.dist-info/METADATA +0 -110
  67. {edsl-0.1.44.dist-info → edsl-0.1.46.dist-info}/LICENSE +0 -0
  68. {edsl-0.1.44.dist-info → edsl-0.1.46.dist-info}/WHEEL +0 -0
edsl/Base.py CHANGED
@@ -65,10 +65,10 @@ class PersistenceMixin:
65
65
  def pull(
66
66
  cls,
67
67
  url_or_uuid: Optional[Union[str, UUID]] = None,
68
- #expected_parrot_url: Optional[str] = None,
68
+ # expected_parrot_url: Optional[str] = None,
69
69
  ):
70
70
  """Pull the object from coop.
71
-
71
+
72
72
  Args:
73
73
  url_or_uuid: Either a UUID string or a URL pointing to the object
74
74
  expected_parrot_url: Optional URL for the Parrot server
@@ -80,7 +80,11 @@ class PersistenceMixin:
80
80
  coop = Coop()
81
81
 
82
82
  # Determine if input is URL or UUID
83
- if url_or_uuid and ("http://" in str(url_or_uuid) or "https://" in str(url_or_uuid)):
83
+ if "www" in url_or_uuid:
84
+ url_or_uuid = url_or_uuid.replace("www", "api")
85
+ if url_or_uuid and (
86
+ "http://" in str(url_or_uuid) or "https://" in str(url_or_uuid)
87
+ ):
84
88
  return coop.get(url=url_or_uuid, expected_object_type=object_type)
85
89
  else:
86
90
  return coop.get(uuid=url_or_uuid, expected_object_type=object_type)
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.44"
1
+ __version__ = "0.1.46"
@@ -74,7 +74,7 @@ class InvigilatorBase(ABC):
74
74
  @property
75
75
  def prompt_constructor(self) -> PromptConstructor:
76
76
  """Return the prompt constructor."""
77
- return PromptConstructor(self, prompt_plan=self.prompt_plan)
77
+ return PromptConstructor.from_invigilator(self, prompt_plan=self.prompt_plan)
78
78
 
79
79
  def to_dict(self, include_cache=False) -> Dict[str, Any]:
80
80
  attributes = [
@@ -87,6 +87,7 @@ class InvigilatorBase(ABC):
87
87
  "iteration",
88
88
  "additional_prompt_data",
89
89
  "survey",
90
+ "raw_model_response",
90
91
  ]
91
92
  if include_cache:
92
93
  attributes.append("cache")
@@ -135,6 +136,7 @@ class InvigilatorBase(ABC):
135
136
  d["additional_prompt_data"] = data["additional_prompt_data"]
136
137
 
137
138
  d = cls(**d)
139
+ d.raw_model_response = data.get("raw_model_response")
138
140
  return d
139
141
 
140
142
  def __repr__(self) -> str:
@@ -1,20 +1,16 @@
1
1
  from __future__ import annotations
2
2
  from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING, Literal
3
3
  from functools import cached_property
4
- from multiprocessing import Pool, freeze_support, get_context
5
- from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
6
4
  import time
7
5
  import logging
8
6
 
9
7
  from edsl.prompts.Prompt import Prompt
10
8
 
11
- from dataclasses import dataclass
12
-
13
- from .prompt_helpers import PromptPlan
14
- from .QuestionTemplateReplacementsBuilder import (
9
+ from edsl.agents.prompt_helpers import PromptPlan
10
+ from edsl.agents.QuestionTemplateReplacementsBuilder import (
15
11
  QuestionTemplateReplacementsBuilder,
16
12
  )
17
- from .question_option_processor import QuestionOptionProcessor
13
+ from edsl.agents.question_option_processor import QuestionOptionProcessor
18
14
 
19
15
  if TYPE_CHECKING:
20
16
  from edsl.agents.InvigilatorBase import InvigilatorBase
@@ -71,24 +67,49 @@ class PromptConstructor:
71
67
  - The question instructions - "You are being asked the following question: Do you like school? The options are 0: yes 1: no Return a valid JSON formatted like this, selecting only the number of the option: {"answer": <put answer code here>, "comment": "<put explanation here>"} Only 1 option may be selected."
72
68
  - The memory prompt - "Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer"
73
69
  """
70
+ @classmethod
71
+ def from_invigilator(
72
+ cls,
73
+ invigilator: "InvigilatorBase",
74
+ prompt_plan: Optional["PromptPlan"] = None
75
+ ) -> "PromptConstructor":
76
+ return cls(
77
+ agent=invigilator.agent,
78
+ question=invigilator.question,
79
+ scenario=invigilator.scenario,
80
+ survey=invigilator.survey,
81
+ model=invigilator.model,
82
+ current_answers=invigilator.current_answers,
83
+ memory_plan=invigilator.memory_plan,
84
+ prompt_plan=prompt_plan
85
+ )
74
86
 
75
87
  def __init__(
76
- self, invigilator: "InvigilatorBase", prompt_plan: Optional["PromptPlan"] = None
88
+ self,
89
+ agent: "Agent",
90
+ question: "QuestionBase",
91
+ scenario: "Scenario",
92
+ survey: "Survey",
93
+ model: "LanguageModel",
94
+ current_answers: dict,
95
+ memory_plan: "MemoryPlan",
96
+ prompt_plan: Optional["PromptPlan"] = None
77
97
  ):
78
- self.invigilator = invigilator
98
+ self.agent = agent
99
+ self.question = question
100
+ self.scenario = scenario
101
+ self.survey = survey
102
+ self.model = model
103
+ self.current_answers = current_answers
104
+ self.memory_plan = memory_plan
79
105
  self.prompt_plan = prompt_plan or PromptPlan()
80
106
 
81
- self.agent = invigilator.agent
82
- self.question = invigilator.question
83
- self.scenario = invigilator.scenario
84
- self.survey = invigilator.survey
85
- self.model = invigilator.model
86
- self.current_answers = invigilator.current_answers
87
- self.memory_plan = invigilator.memory_plan
88
-
89
- def get_question_options(self, question_data):
107
+ def get_question_options(self, question_data: dict) -> list[str]:
90
108
  """Get the question options."""
91
- return QuestionOptionProcessor(self).get_question_options(question_data)
109
+ return (QuestionOptionProcessor
110
+ .from_prompt_constructor(self)
111
+ .get_question_options(question_data)
112
+ )
92
113
 
93
114
  @cached_property
94
115
  def agent_instructions_prompt(self) -> Prompt:
@@ -121,23 +142,29 @@ class PromptConstructor:
121
142
  return self.agent.prompt()
122
143
 
123
144
  def prior_answers_dict(self) -> dict[str, "QuestionBase"]:
124
- """This is a dictionary of prior answers, if they exist."""
145
+ """This is a dictionary of prior answers, if they exist.
146
+
147
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
148
+ >>> i = InvigilatorBase.example()
149
+ >>> i.prompt_constructor.prior_answers_dict()
150
+ {'q0': ..., 'q1': ...}
151
+ """
125
152
  return self._add_answers(
126
153
  self.survey.question_names_to_questions(), self.current_answers
127
154
  )
128
155
 
129
156
  @staticmethod
130
- def _extract_quetion_and_entry_type(key_entry) -> tuple[str, str]:
157
+ def _extract_question_and_entry_type(key_entry) -> tuple[str, str]:
131
158
  """
132
159
  Extracts the question name and type for the current answer dictionary key entry.
133
160
 
134
- >>> PromptConstructor._extract_quetion_and_entry_type("q0")
161
+ >>> PromptConstructor._extract_question_and_entry_type("q0")
135
162
  ('q0', 'answer')
136
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_comment")
163
+ >>> PromptConstructor._extract_question_and_entry_type("q0_comment")
137
164
  ('q0', 'comment')
138
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_alternate_generated_tokens")
165
+ >>> PromptConstructor._extract_question_and_entry_type("q0_alternate_generated_tokens")
139
166
  ('q0_alternate', 'generated_tokens')
140
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_alt_comment")
167
+ >>> PromptConstructor._extract_question_and_entry_type("q0_alt_comment")
141
168
  ('q0_alt', 'comment')
142
169
  """
143
170
  split_list = key_entry.rsplit("_", maxsplit=1)
@@ -167,7 +194,7 @@ class PromptConstructor:
167
194
  d = defaultdict(dict)
168
195
  for key, value in current_answers.items():
169
196
  question_name, entry_type = (
170
- PromptConstructor._extract_quetion_and_entry_type(key)
197
+ PromptConstructor._extract_question_and_entry_type(key)
171
198
  )
172
199
  d[question_name][entry_type] = value
173
200
  return dict(d)
@@ -198,9 +225,10 @@ class PromptConstructor:
198
225
  @cached_property
199
226
  def question_file_keys(self) -> list:
200
227
  """Extracts the file keys from the question text.
228
+
201
229
  It checks if the variables in the question text are in the scenario file keys.
202
230
  """
203
- return QuestionTemplateReplacementsBuilder(self).question_file_keys()
231
+ return QuestionTemplateReplacementsBuilder.from_prompt_constructor(self).question_file_keys()
204
232
 
205
233
  @cached_property
206
234
  def question_instructions_prompt(self) -> Prompt:
@@ -219,7 +247,7 @@ class PromptConstructor:
219
247
  QuestionInstructionPromptBuilder,
220
248
  )
221
249
 
222
- return QuestionInstructionPromptBuilder(self).build()
250
+ return QuestionInstructionPromptBuilder.from_prompt_constructor(self).build()
223
251
 
224
252
  @cached_property
225
253
  def prior_question_memory_prompt(self) -> Prompt:
@@ -247,97 +275,44 @@ class PromptConstructor:
247
275
  question_name, self.current_answers
248
276
  )
249
277
 
250
- def get_prompts(self, parallel: Literal["thread", "process", None] = None) -> Dict[str, Any]:
278
+ def get_prompts(self) -> Dict[str, Any]:
251
279
  """Get the prompts for the question."""
252
280
  start = time.time()
253
281
 
254
282
  # Build all the components
255
- instr_start = time.time()
256
283
  agent_instructions = self.agent_instructions_prompt
257
- instr_end = time.time()
258
- logger.debug(f"Time taken for agent instructions: {instr_end - instr_start:.4f}s")
259
-
260
- persona_start = time.time()
261
284
  agent_persona = self.agent_persona_prompt
262
- persona_end = time.time()
263
- logger.debug(f"Time taken for agent persona: {persona_end - persona_start:.4f}s")
264
-
265
- q_instr_start = time.time()
266
285
  question_instructions = self.question_instructions_prompt
267
- q_instr_end = time.time()
268
- logger.debug(f"Time taken for question instructions: {q_instr_end - q_instr_start:.4f}s")
269
-
270
- memory_start = time.time()
271
286
  prior_question_memory = self.prior_question_memory_prompt
272
- memory_end = time.time()
273
- logger.debug(f"Time taken for prior question memory: {memory_end - memory_start:.4f}s")
274
-
287
+
275
288
  # Get components dict
276
289
  components = {
277
290
  "agent_instructions": agent_instructions.text,
278
291
  "agent_persona": agent_persona.text,
279
292
  "question_instructions": question_instructions.text,
280
293
  "prior_question_memory": prior_question_memory.text,
281
- }
282
-
283
- # Use PromptPlan's get_prompts method
284
- plan_start = time.time()
285
-
294
+ }
286
295
  # Get arranged components first
287
296
  arranged = self.prompt_plan.arrange_components(**components)
288
297
 
289
- if parallel == "process":
290
- ctx = get_context('fork')
291
- with ctx.Pool() as pool:
292
- results = pool.map(_process_prompt, [
293
- (arranged["user_prompt"], {}),
294
- (arranged["system_prompt"], {})
295
- ])
296
- prompts = {
297
- "user_prompt": results[0],
298
- "system_prompt": results[1]
299
- }
300
-
301
- elif parallel == "thread":
302
- with ThreadPoolExecutor() as executor:
303
- user_prompt_list = arranged["user_prompt"]
304
- system_prompt_list = arranged["system_prompt"]
305
-
306
- # Process both prompt lists in parallel
307
- rendered_user = executor.submit(_process_prompt, (user_prompt_list, {}))
308
- rendered_system = executor.submit(_process_prompt, (system_prompt_list, {}))
309
-
310
- prompts = {
311
- "user_prompt": rendered_user.result(),
312
- "system_prompt": rendered_system.result()
313
- }
314
-
315
- else: # sequential processing
316
- prompts = self.prompt_plan.get_prompts(**components)
317
-
318
- plan_end = time.time()
319
- logger.debug(f"Time taken for prompt processing: {plan_end - plan_start:.4f}s")
298
+ prompts = self.prompt_plan.get_prompts(**components)
320
299
 
321
300
  # Handle file keys if present
322
301
  if hasattr(self, 'question_file_keys') and self.question_file_keys:
323
- files_start = time.time()
324
302
  files_list = []
325
303
  for key in self.question_file_keys:
326
304
  files_list.append(self.scenario[key])
327
305
  prompts["files_list"] = files_list
328
- files_end = time.time()
329
- logger.debug(f"Time taken for file key processing: {files_end - files_start:.4f}s")
330
306
 
331
- end = time.time()
332
- logger.debug(f"Total time in get_prompts: {end - start:.4f}s")
333
307
  return prompts
334
308
 
335
309
 
336
- def _process_prompt(args):
337
- """Helper function to process a single prompt list with its replacements."""
338
- prompt_list, replacements = args
339
- return prompt_list.reduce()
310
+ # def _process_prompt(args):
311
+ # """Helper function to process a single prompt list with its replacements."""
312
+ # prompt_list, replacements = args
313
+ # return prompt_list.reduce()
340
314
 
341
315
 
342
316
  if __name__ == '__main__':
343
- freeze_support()
317
+ import doctest
318
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,8 +1,16 @@
1
- from typing import Dict, List, Set
1
+ from typing import Dict, List, Set, Any, Union, TYPE_CHECKING
2
2
  from warnings import warn
3
3
  import logging
4
4
  from edsl.prompts.Prompt import Prompt
5
5
 
6
+ if TYPE_CHECKING:
7
+ from edsl.agents.PromptConstructor import PromptConstructor
8
+ from edsl import Model
9
+ from edsl import Survey
10
+ from edsl.questions.QuestionBase import QuestionBase
11
+ from edsl import Scenario
12
+ from edsl import Agent
13
+
6
14
  from edsl.agents.QuestionTemplateReplacementsBuilder import (
7
15
  QuestionTemplateReplacementsBuilder as QTRB,
8
16
  )
@@ -11,97 +19,179 @@ from edsl.agents.QuestionTemplateReplacementsBuilder import (
11
19
  class QuestionInstructionPromptBuilder:
12
20
  """Handles the construction and rendering of question instructions."""
13
21
 
14
- def __init__(self, prompt_constructor: "PromptConstructor"):
15
- self.prompt_constructor = prompt_constructor
22
+ @classmethod
23
+ def from_prompt_constructor(cls, prompt_constructor: "PromptConstructor"):
24
+
25
+ model = prompt_constructor.model
26
+ survey = prompt_constructor.survey
27
+ question = prompt_constructor.question
28
+ scenario = prompt_constructor.scenario
29
+ prior_answers_dict = prompt_constructor.prior_answers_dict()
30
+ agent = prompt_constructor.agent
31
+ return cls(
32
+ prompt_constructor,
33
+ model,
34
+ survey,
35
+ question,
36
+ scenario,
37
+ prior_answers_dict,
38
+ agent,
39
+ )
40
+
41
+ def __init__(
42
+ self,
43
+ prompt_constructor: "PromptConstructor",
44
+ model: "Model",
45
+ survey: "Survey",
46
+ question: "QuestionBase",
47
+ scenario: "Scenario",
48
+ prior_answers_dict: Dict[str, Any],
49
+ agent: "Agent",
50
+ ):
51
+
52
+ self.qtrb = QTRB(scenario, question, prior_answers_dict, agent)
16
53
 
17
- self.model = self.prompt_constructor.model
18
- self.survey = self.prompt_constructor.survey
19
- self.question = self.prompt_constructor.question
54
+ self.model = model
55
+ self.survey = survey
56
+ self.question = question
57
+ self.agent = agent
58
+ self.scenario = scenario
59
+ self.prior_answers_dict = prior_answers_dict
20
60
 
21
61
  def build(self) -> Prompt:
22
62
  """Builds the complete question instructions prompt with all necessary components.
23
63
 
24
64
  Returns:
25
- Prompt: The fully rendered question instructions
65
+ Prompt: The fully rendered question instructions to be send to the Language Model
66
+
67
+ >>> from edsl import QuestionMultipleChoice
68
+ >>> from edsl import Survey
69
+ >>> q = Survey.example().questions[0]
70
+ >>> from edsl import Model
71
+ >>> class FakePromptConstructor:
72
+ ... def __init__(self, scenario, question, agent):
73
+ ... self.scenario = scenario
74
+ ... self.question = question
75
+ ... self.agent = agent
76
+ ... self.model = Model('test')
77
+ ... self.survey = Survey.example()
78
+ ... scenario = {"file1": "file1"}
79
+ ... question = q
80
+ ... agent = "agent"
81
+ ... def prior_answers_dict(self):
82
+ ... return {'q0': 'q0'}
83
+ >>> mpc = FakePromptConstructor(
84
+ ... scenario={"file1": "file1"},
85
+ ... question=q,
86
+ ... agent="agent"
87
+ ... )
88
+ >>> qipb = QuestionInstructionPromptBuilder.from_prompt_constructor(mpc)
89
+ >>> qipb.build()
90
+ Prompt(text=\"""
91
+ Do you like school?
92
+ <BLANKLINE>
93
+ <BLANKLINE>
94
+ yes
95
+ <BLANKLINE>
96
+ no
97
+ <BLANKLINE>
98
+ <BLANKLINE>
99
+ Only 1 option may be selected.
100
+ <BLANKLINE>
101
+ Respond only with a string corresponding to one of the options.
102
+ <BLANKLINE>
103
+ <BLANKLINE>
104
+ After the answer, you can put a comment explaining why you chose that option on the next line.\""")
26
105
  """
27
- import time
28
-
29
- start = time.time()
30
-
31
106
  # Create base prompt
32
- base_start = time.time()
33
107
  base_prompt = self._create_base_prompt()
34
- base_end = time.time()
35
- logging.debug(f"Time for base prompt: {base_end - base_start}")
36
-
108
+
37
109
  # Enrich with options
38
- enrich_start = time.time()
39
- enriched_prompt = self._enrich_with_question_options(base_prompt)
40
- enrich_end = time.time()
41
- logging.debug(f"Time for enriching with options: {enrich_end - enrich_start}")
42
-
110
+ enriched_prompt = self._enrich_with_question_options(
111
+ prompt_data=base_prompt,
112
+ scenario=self.scenario,
113
+ prior_answers_dict=self.prior_answers_dict,
114
+ )
115
+
43
116
  # Render prompt
44
- render_start = time.time()
45
117
  rendered_prompt = self._render_prompt(enriched_prompt)
46
- render_end = time.time()
47
- logging.debug(f"Time for rendering prompt: {render_end - render_start}")
48
-
118
+
49
119
  # Validate template variables
50
- validate_start = time.time()
51
120
  self._validate_template_variables(rendered_prompt)
52
- validate_end = time.time()
53
- logging.debug(f"Time for template validation: {validate_end - validate_start}")
54
-
121
+
55
122
  # Append survey instructions
56
- append_start = time.time()
57
123
  final_prompt = self._append_survey_instructions(rendered_prompt)
58
- append_end = time.time()
59
- logging.debug(f"Time for appending survey instructions: {append_end - append_start}")
60
-
61
- end = time.time()
62
- logging.debug(f"Total time in build_question_instructions: {end - start}")
63
-
124
+
64
125
  return final_prompt
65
126
 
66
- def _create_base_prompt(self) -> Dict:
127
+ def _create_base_prompt(self) -> Dict[str, Union[Prompt, Dict[str, Any]]]:
67
128
  """Creates the initial prompt with basic question data.
68
129
 
130
+ The data are, e.g., the question name, question text, question options, etc.
131
+
132
+ >>> from edsl import QuestionMultipleChoice
133
+ >>> QuestionMultipleChoice.example().data.copy()
134
+ {'question_name': 'how_feeling', 'question_text': 'How are you?', 'question_options': ['Good', 'Great', 'OK', 'Bad'], 'include_comment': False}
135
+
69
136
  Returns:
70
- Dict: Base question data
137
+ Dict[str, Union[Prompt, Dict[str, Any]]]: Base question data with prompt and data fields
71
138
  """
72
139
  return {
73
140
  "prompt": Prompt(self.question.get_instructions(model=self.model.model)),
74
141
  "data": self.question.data.copy(),
75
142
  }
76
143
 
77
- def _enrich_with_question_options(self, prompt_data: Dict) -> Dict:
78
- """Enriches the prompt data with question options if they exist.
144
+ @staticmethod
145
+ def _process_question_options(
146
+ question_data: Dict, scenario: "Scenario", prior_answers_dict: Dict
147
+ ) -> Dict:
148
+ """Processes and replaces question options in the question data if they exist.
149
+
150
+ The question_options could be intended to be replaced with data from a scenario or prior answers.
151
+
152
+ >>> question_data = {'question_name': 'q0', 'question_text': 'Do you like school?', 'question_options': '{{ options }}'}
153
+ >>> scenario = {"options": ["yes", "no"]}
154
+ >>> prior_answers_dict = {}
155
+ >>> QuestionInstructionPromptBuilder._process_question_options(question_data, scenario, prior_answers_dict)
156
+ {'question_name': 'q0', 'question_text': 'Do you like school?', 'question_options': ['yes', 'no']}
79
157
 
80
158
  Args:
81
- prompt_data: Dictionary containing prompt and question data
159
+ question_data: Dictionary containing question data
160
+ scenario: Scenario object
161
+ prior_answers_dict: Dictionary of prior answers
82
162
 
83
163
  Returns:
84
- Dict: Enriched prompt data
164
+ Dict: Question data with processed question options
85
165
  """
86
- import time
87
-
88
- start = time.time()
89
-
90
- if "question_options" in prompt_data["data"]:
166
+ if "question_options" in question_data:
91
167
  from edsl.agents.question_option_processor import QuestionOptionProcessor
92
-
93
- processor_start = time.time()
168
+
94
169
  question_options = QuestionOptionProcessor(
95
- self.prompt_constructor
96
- ).get_question_options(question_data=prompt_data["data"])
97
- processor_end = time.time()
98
- logging.debug(f"Time to process question options: {processor_end - processor_start}")
99
-
100
- prompt_data["data"]["question_options"] = question_options
101
-
102
- end = time.time()
103
- logging.debug(f"Total time in _enrich_with_question_options: {end - start}")
104
-
170
+ scenario, prior_answers_dict
171
+ ).get_question_options(question_data=question_data)
172
+ question_data["question_options"] = question_options
173
+
174
+ return question_data
175
+
176
+ @staticmethod
177
+ def _enrich_with_question_options(
178
+ prompt_data: Dict, scenario: "Scenario", prior_answers_dict: Dict
179
+ ) -> Dict:
180
+ """Enriches the prompt data with processed question options if they exist.
181
+
182
+ Args:
183
+ prompt_data: Dictionary containing prompt and question data
184
+ scenario: Scenario object
185
+ prior_answers_dict: Dictionary of prior answers
186
+
187
+ Returns:
188
+ Dict: Enriched prompt data
189
+ """
190
+ prompt_data["data"] = (
191
+ QuestionInstructionPromptBuilder._process_question_options(
192
+ prompt_data["data"], scenario, prior_answers_dict
193
+ )
194
+ )
105
195
  return prompt_data
106
196
 
107
197
  def _render_prompt(self, prompt_data: Dict) -> Prompt:
@@ -113,28 +203,11 @@ class QuestionInstructionPromptBuilder:
113
203
  Returns:
114
204
  Prompt: Rendered instructions
115
205
  """
116
- import time
117
-
118
- start = time.time()
119
-
120
206
  # Build replacement dict
121
- dict_start = time.time()
122
- replacement_dict = QTRB(self.prompt_constructor).build_replacement_dict(
123
- prompt_data["data"]
124
- )
125
- dict_end = time.time()
126
- logging.debug(f"Time to build replacement dict: {dict_end - dict_start}")
127
-
207
+ replacement_dict = self.qtrb.build_replacement_dict(prompt_data["data"])
208
+
128
209
  # Render with dict
129
- render_start = time.time()
130
- result = prompt_data["prompt"].render(replacement_dict)
131
- render_end = time.time()
132
- logging.debug(f"Time to render with dict: {render_end - render_start}")
133
-
134
- end = time.time()
135
- logging.debug(f"Total time in _render_prompt: {end - start}")
136
-
137
- return result
210
+ return prompt_data["prompt"].render(replacement_dict)
138
211
 
139
212
  def _validate_template_variables(self, rendered_prompt: Prompt) -> None:
140
213
  """Validates that all template variables have been properly replaced.
@@ -162,7 +235,9 @@ class QuestionInstructionPromptBuilder:
162
235
  """
163
236
  for question_name in self.survey.question_names:
164
237
  if question_name in undefined_vars:
165
- logging.warning(f"Question name found in undefined_template_variables: {question_name}")
238
+ logging.warning(
239
+ f"Question name found in undefined_template_variables: {question_name}"
240
+ )
166
241
 
167
242
  def _append_survey_instructions(self, rendered_prompt: Prompt) -> Prompt:
168
243
  """Appends any relevant survey instructions to the rendered prompt.
@@ -185,3 +260,9 @@ class QuestionInstructionPromptBuilder:
185
260
  preamble += instruction.text
186
261
 
187
262
  return preamble + rendered_prompt
263
+
264
+
265
+ if __name__ == "__main__":
266
+ import doctest
267
+
268
+ doctest.testmod()