edsl 0.1.44__py3-none-any.whl → 0.1.45__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. edsl/__version__.py +1 -1
  2. edsl/agents/InvigilatorBase.py +3 -1
  3. edsl/agents/PromptConstructor.py +62 -34
  4. edsl/agents/QuestionInstructionPromptBuilder.py +111 -68
  5. edsl/agents/QuestionTemplateReplacementsBuilder.py +69 -16
  6. edsl/agents/question_option_processor.py +15 -6
  7. edsl/coop/CoopFunctionsMixin.py +3 -4
  8. edsl/coop/coop.py +23 -9
  9. edsl/enums.py +3 -3
  10. edsl/inference_services/AnthropicService.py +11 -9
  11. edsl/inference_services/AvailableModelFetcher.py +2 -0
  12. edsl/inference_services/AwsBedrock.py +1 -2
  13. edsl/inference_services/AzureAI.py +12 -9
  14. edsl/inference_services/GoogleService.py +9 -4
  15. edsl/inference_services/InferenceServicesCollection.py +2 -2
  16. edsl/inference_services/MistralAIService.py +1 -2
  17. edsl/inference_services/OpenAIService.py +9 -4
  18. edsl/inference_services/PerplexityService.py +2 -1
  19. edsl/inference_services/{GrokService.py → XAIService.py} +2 -2
  20. edsl/inference_services/registry.py +2 -2
  21. edsl/jobs/Jobs.py +9 -0
  22. edsl/jobs/JobsChecks.py +10 -13
  23. edsl/jobs/async_interview_runner.py +3 -1
  24. edsl/jobs/check_survey_scenario_compatibility.py +5 -5
  25. edsl/jobs/interviews/InterviewExceptionEntry.py +12 -0
  26. edsl/jobs/tasks/TaskHistory.py +1 -1
  27. edsl/language_models/LanguageModel.py +0 -3
  28. edsl/language_models/PriceManager.py +45 -5
  29. edsl/language_models/model.py +47 -26
  30. edsl/questions/QuestionBase.py +21 -0
  31. edsl/questions/QuestionBasePromptsMixin.py +103 -0
  32. edsl/questions/QuestionFreeText.py +22 -5
  33. edsl/questions/descriptors.py +4 -0
  34. edsl/questions/question_base_gen_mixin.py +94 -29
  35. edsl/results/Dataset.py +65 -0
  36. edsl/results/DatasetExportMixin.py +299 -32
  37. edsl/results/Result.py +27 -0
  38. edsl/results/Results.py +22 -2
  39. edsl/results/ResultsGGMixin.py +7 -3
  40. edsl/scenarios/DocumentChunker.py +2 -0
  41. edsl/scenarios/FileStore.py +10 -0
  42. edsl/scenarios/PdfExtractor.py +21 -1
  43. edsl/scenarios/Scenario.py +25 -9
  44. edsl/scenarios/ScenarioList.py +73 -3
  45. edsl/scenarios/handlers/__init__.py +1 -0
  46. edsl/scenarios/handlers/docx.py +5 -1
  47. edsl/scenarios/handlers/jpeg.py +39 -0
  48. edsl/surveys/Survey.py +5 -4
  49. edsl/surveys/SurveyFlowVisualization.py +91 -43
  50. edsl/templates/error_reporting/exceptions_table.html +7 -8
  51. edsl/templates/error_reporting/interview_details.html +1 -1
  52. edsl/templates/error_reporting/interviews.html +0 -1
  53. edsl/templates/error_reporting/overview.html +2 -7
  54. edsl/templates/error_reporting/performance_plot.html +1 -1
  55. edsl/templates/error_reporting/report.css +1 -1
  56. edsl/utilities/PrettyList.py +14 -0
  57. edsl-0.1.45.dist-info/METADATA +246 -0
  58. {edsl-0.1.44.dist-info → edsl-0.1.45.dist-info}/RECORD +60 -59
  59. edsl-0.1.44.dist-info/METADATA +0 -110
  60. {edsl-0.1.44.dist-info → edsl-0.1.45.dist-info}/LICENSE +0 -0
  61. {edsl-0.1.44.dist-info → edsl-0.1.45.dist-info}/WHEEL +0 -0
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.44"
1
+ __version__ = "0.1.45"
@@ -74,7 +74,7 @@ class InvigilatorBase(ABC):
74
74
  @property
75
75
  def prompt_constructor(self) -> PromptConstructor:
76
76
  """Return the prompt constructor."""
77
- return PromptConstructor(self, prompt_plan=self.prompt_plan)
77
+ return PromptConstructor.from_invigilator(self, prompt_plan=self.prompt_plan)
78
78
 
79
79
  def to_dict(self, include_cache=False) -> Dict[str, Any]:
80
80
  attributes = [
@@ -87,6 +87,7 @@ class InvigilatorBase(ABC):
87
87
  "iteration",
88
88
  "additional_prompt_data",
89
89
  "survey",
90
+ "raw_model_response",
90
91
  ]
91
92
  if include_cache:
92
93
  attributes.append("cache")
@@ -135,6 +136,7 @@ class InvigilatorBase(ABC):
135
136
  d["additional_prompt_data"] = data["additional_prompt_data"]
136
137
 
137
138
  d = cls(**d)
139
+ d.raw_model_response = data.get("raw_model_response")
138
140
  return d
139
141
 
140
142
  def __repr__(self) -> str:
@@ -71,24 +71,49 @@ class PromptConstructor:
71
71
  - The question instructions - "You are being asked the following question: Do you like school? The options are 0: yes 1: no Return a valid JSON formatted like this, selecting only the number of the option: {"answer": <put answer code here>, "comment": "<put explanation here>"} Only 1 option may be selected."
72
72
  - The memory prompt - "Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer"
73
73
  """
74
+ @classmethod
75
+ def from_invigilator(
76
+ cls,
77
+ invigilator: "InvigilatorBase",
78
+ prompt_plan: Optional["PromptPlan"] = None
79
+ ) -> "PromptConstructor":
80
+ return cls(
81
+ agent=invigilator.agent,
82
+ question=invigilator.question,
83
+ scenario=invigilator.scenario,
84
+ survey=invigilator.survey,
85
+ model=invigilator.model,
86
+ current_answers=invigilator.current_answers,
87
+ memory_plan=invigilator.memory_plan,
88
+ prompt_plan=prompt_plan
89
+ )
74
90
 
75
91
  def __init__(
76
- self, invigilator: "InvigilatorBase", prompt_plan: Optional["PromptPlan"] = None
92
+ self,
93
+ agent: "Agent",
94
+ question: "QuestionBase",
95
+ scenario: "Scenario",
96
+ survey: "Survey",
97
+ model: "LanguageModel",
98
+ current_answers: dict,
99
+ memory_plan: "MemoryPlan",
100
+ prompt_plan: Optional["PromptPlan"] = None
77
101
  ):
78
- self.invigilator = invigilator
102
+ self.agent = agent
103
+ self.question = question
104
+ self.scenario = scenario
105
+ self.survey = survey
106
+ self.model = model
107
+ self.current_answers = current_answers
108
+ self.memory_plan = memory_plan
79
109
  self.prompt_plan = prompt_plan or PromptPlan()
80
110
 
81
- self.agent = invigilator.agent
82
- self.question = invigilator.question
83
- self.scenario = invigilator.scenario
84
- self.survey = invigilator.survey
85
- self.model = invigilator.model
86
- self.current_answers = invigilator.current_answers
87
- self.memory_plan = invigilator.memory_plan
88
-
89
- def get_question_options(self, question_data):
111
+ def get_question_options(self, question_data: dict) -> list[str]:
90
112
  """Get the question options."""
91
- return QuestionOptionProcessor(self).get_question_options(question_data)
113
+ return (QuestionOptionProcessor
114
+ .from_prompt_constructor(self)
115
+ .get_question_options(question_data)
116
+ )
92
117
 
93
118
  @cached_property
94
119
  def agent_instructions_prompt(self) -> Prompt:
@@ -198,9 +223,10 @@ class PromptConstructor:
198
223
  @cached_property
199
224
  def question_file_keys(self) -> list:
200
225
  """Extracts the file keys from the question text.
226
+
201
227
  It checks if the variables in the question text are in the scenario file keys.
202
228
  """
203
- return QuestionTemplateReplacementsBuilder(self).question_file_keys()
229
+ return QuestionTemplateReplacementsBuilder.from_prompt_constructor(self).question_file_keys()
204
230
 
205
231
  @cached_property
206
232
  def question_instructions_prompt(self) -> Prompt:
@@ -219,7 +245,7 @@ class PromptConstructor:
219
245
  QuestionInstructionPromptBuilder,
220
246
  )
221
247
 
222
- return QuestionInstructionPromptBuilder(self).build()
248
+ return QuestionInstructionPromptBuilder.from_prompt_constructor(self).build()
223
249
 
224
250
  @cached_property
225
251
  def prior_question_memory_prompt(self) -> Prompt:
@@ -287,30 +313,32 @@ class PromptConstructor:
287
313
  arranged = self.prompt_plan.arrange_components(**components)
288
314
 
289
315
  if parallel == "process":
290
- ctx = get_context('fork')
291
- with ctx.Pool() as pool:
292
- results = pool.map(_process_prompt, [
293
- (arranged["user_prompt"], {}),
294
- (arranged["system_prompt"], {})
295
- ])
296
- prompts = {
297
- "user_prompt": results[0],
298
- "system_prompt": results[1]
299
- }
316
+ pass
317
+ # ctx = get_context('fork')
318
+ # with ctx.Pool() as pool:
319
+ # results = pool.map(_process_prompt, [
320
+ # (arranged["user_prompt"], {}),
321
+ # (arranged["system_prompt"], {})
322
+ # ])
323
+ # prompts = {
324
+ # "user_prompt": results[0],
325
+ # "system_prompt": results[1]
326
+ # }
300
327
 
301
328
  elif parallel == "thread":
302
- with ThreadPoolExecutor() as executor:
303
- user_prompt_list = arranged["user_prompt"]
304
- system_prompt_list = arranged["system_prompt"]
329
+ pass
330
+ # with ThreadPoolExecutor() as executor:
331
+ # user_prompt_list = arranged["user_prompt"]
332
+ # system_prompt_list = arranged["system_prompt"]
305
333
 
306
- # Process both prompt lists in parallel
307
- rendered_user = executor.submit(_process_prompt, (user_prompt_list, {}))
308
- rendered_system = executor.submit(_process_prompt, (system_prompt_list, {}))
334
+ # # Process both prompt lists in parallel
335
+ # rendered_user = executor.submit(_process_prompt, (user_prompt_list, {}))
336
+ # rendered_system = executor.submit(_process_prompt, (system_prompt_list, {}))
309
337
 
310
- prompts = {
311
- "user_prompt": rendered_user.result(),
312
- "system_prompt": rendered_system.result()
313
- }
338
+ # prompts = {
339
+ # "user_prompt": rendered_user.result(),
340
+ # "system_prompt": rendered_system.result()
341
+ # }
314
342
 
315
343
  else: # sequential processing
316
344
  prompts = self.prompt_plan.get_prompts(**components)
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Set
1
+ from typing import Dict, List, Set, Any, Union
2
2
  from warnings import warn
3
3
  import logging
4
4
  from edsl.prompts.Prompt import Prompt
@@ -11,97 +11,150 @@ from edsl.agents.QuestionTemplateReplacementsBuilder import (
11
11
  class QuestionInstructionPromptBuilder:
12
12
  """Handles the construction and rendering of question instructions."""
13
13
 
14
- def __init__(self, prompt_constructor: "PromptConstructor"):
14
+ @classmethod
15
+ def from_prompt_constructor(cls, prompt_constructor: "PromptConstructor"):
16
+
17
+ model = prompt_constructor.model
18
+ survey = prompt_constructor.survey
19
+ question = prompt_constructor.question
20
+ return cls(prompt_constructor, model, survey, question)
21
+
22
+ def __init__(self, prompt_constructor: "PromptConstructor", model:"Model", survey:"Survey", question:"QuestionBase"):
15
23
  self.prompt_constructor = prompt_constructor
24
+ self.model = model
25
+ self.survey = survey
26
+ self.question = question
27
+
28
+ self.scenario = prompt_constructor.scenario
29
+ self.prior_answers_dict = prompt_constructor.prior_answers_dict()
30
+
31
+ self.qtrb = QTRB.from_prompt_constructor(self.prompt_constructor)
16
32
 
17
- self.model = self.prompt_constructor.model
18
- self.survey = self.prompt_constructor.survey
19
- self.question = self.prompt_constructor.question
20
33
 
21
34
  def build(self) -> Prompt:
22
35
  """Builds the complete question instructions prompt with all necessary components.
23
36
 
24
37
  Returns:
25
- Prompt: The fully rendered question instructions
38
+ Prompt: The fully rendered question instructions to be send to the Language Model
39
+
40
+ >>> from edsl import QuestionMultipleChoice
41
+ >>> from edsl import Survey
42
+ >>> q = Survey.example().questions[0]
43
+ >>> from edsl import Model
44
+ >>> class FakePromptConstructor:
45
+ ... def __init__(self, scenario, question, agent):
46
+ ... self.scenario = scenario
47
+ ... self.question = question
48
+ ... self.agent = agent
49
+ ... self.model = Model('test')
50
+ ... self.survey = Survey.example()
51
+ ... scenario = {"file1": "file1"}
52
+ ... question = q
53
+ ... agent = "agent"
54
+ ... def prior_answers_dict(self):
55
+ ... return {'q0': 'q0'}
56
+ >>> mpc = FakePromptConstructor(
57
+ ... scenario={"file1": "file1"},
58
+ ... question=q,
59
+ ... agent="agent"
60
+ ... )
61
+ >>> qipb = QuestionInstructionPromptBuilder.from_prompt_constructor(mpc)
62
+ >>> qipb.build()
63
+ Prompt(text=\"""
64
+ Do you like school?
65
+ <BLANKLINE>
66
+ <BLANKLINE>
67
+ yes
68
+ <BLANKLINE>
69
+ no
70
+ <BLANKLINE>
71
+ <BLANKLINE>
72
+ Only 1 option may be selected.
73
+ <BLANKLINE>
74
+ Respond only with a string corresponding to one of the options.
75
+ <BLANKLINE>
76
+ <BLANKLINE>
77
+ After the answer, you can put a comment explaining why you chose that option on the next line.\""")
26
78
  """
27
- import time
28
-
29
- start = time.time()
30
-
31
79
  # Create base prompt
32
- base_start = time.time()
33
80
  base_prompt = self._create_base_prompt()
34
- base_end = time.time()
35
- logging.debug(f"Time for base prompt: {base_end - base_start}")
36
81
 
37
82
  # Enrich with options
38
- enrich_start = time.time()
39
- enriched_prompt = self._enrich_with_question_options(base_prompt)
40
- enrich_end = time.time()
41
- logging.debug(f"Time for enriching with options: {enrich_end - enrich_start}")
83
+ enriched_prompt = self._enrich_with_question_options(prompt_data=base_prompt, scenario=self.scenario, prior_answers_dict=self.prior_answers_dict)
42
84
 
43
85
  # Render prompt
44
- render_start = time.time()
45
86
  rendered_prompt = self._render_prompt(enriched_prompt)
46
- render_end = time.time()
47
- logging.debug(f"Time for rendering prompt: {render_end - render_start}")
48
87
 
49
88
  # Validate template variables
50
- validate_start = time.time()
51
89
  self._validate_template_variables(rendered_prompt)
52
- validate_end = time.time()
53
- logging.debug(f"Time for template validation: {validate_end - validate_start}")
54
90
 
55
91
  # Append survey instructions
56
- append_start = time.time()
57
92
  final_prompt = self._append_survey_instructions(rendered_prompt)
58
- append_end = time.time()
59
- logging.debug(f"Time for appending survey instructions: {append_end - append_start}")
60
-
61
- end = time.time()
62
- logging.debug(f"Total time in build_question_instructions: {end - start}")
63
93
 
64
94
  return final_prompt
65
95
 
66
- def _create_base_prompt(self) -> Dict:
96
+ def _create_base_prompt(self) -> Dict[str, Union[Prompt, Dict[str, Any]]]:
67
97
  """Creates the initial prompt with basic question data.
68
98
 
99
+ The data are, e.g., the question name, question text, question options, etc.
100
+
101
+ >>> from edsl import QuestionMultipleChoice
102
+ >>> QuestionMultipleChoice.example().data.copy()
103
+ {'question_name': 'how_feeling', 'question_text': 'How are you?', 'question_options': ['Good', 'Great', 'OK', 'Bad'], 'include_comment': False}
104
+
69
105
  Returns:
70
- Dict: Base question data
106
+ Dict[str, Union[Prompt, Dict[str, Any]]]: Base question data with prompt and data fields
71
107
  """
72
108
  return {
73
109
  "prompt": Prompt(self.question.get_instructions(model=self.model.model)),
74
110
  "data": self.question.data.copy(),
75
111
  }
76
112
 
77
- def _enrich_with_question_options(self, prompt_data: Dict) -> Dict:
78
- """Enriches the prompt data with question options if they exist.
113
+ @staticmethod
114
+ def _process_question_options(question_data: Dict, scenario: 'Scenario', prior_answers_dict: Dict) -> Dict:
115
+ """Processes and replaces question options in the question data if they exist.
116
+
117
+ The question_options could be intended to be replaced with data from a scenario or prior answers.
118
+
119
+ >>> question_data = {'question_name': 'q0', 'question_text': 'Do you like school?', 'question_options': '{{ options }}'}
120
+ >>> scenario = {"options": ["yes", "no"]}
121
+ >>> prior_answers_dict = {}
122
+ >>> QuestionInstructionPromptBuilder._process_question_options(question_data, scenario, prior_answers_dict)
123
+ {'question_name': 'q0', 'question_text': 'Do you like school?', 'question_options': ['yes', 'no']}
79
124
 
80
125
  Args:
81
- prompt_data: Dictionary containing prompt and question data
126
+ question_data: Dictionary containing question data
127
+ scenario: Scenario object
128
+ prior_answers_dict: Dictionary of prior answers
82
129
 
83
130
  Returns:
84
- Dict: Enriched prompt data
131
+ Dict: Question data with processed question options
85
132
  """
86
- import time
87
-
88
- start = time.time()
89
-
90
- if "question_options" in prompt_data["data"]:
133
+ if "question_options" in question_data:
91
134
  from edsl.agents.question_option_processor import QuestionOptionProcessor
92
135
 
93
- processor_start = time.time()
94
- question_options = QuestionOptionProcessor(
95
- self.prompt_constructor
96
- ).get_question_options(question_data=prompt_data["data"])
97
- processor_end = time.time()
98
- logging.debug(f"Time to process question options: {processor_end - processor_start}")
99
-
100
- prompt_data["data"]["question_options"] = question_options
136
+ question_options = (QuestionOptionProcessor(scenario, prior_answers_dict)
137
+ .get_question_options(question_data=question_data)
138
+ )
139
+ question_data["question_options"] = question_options
101
140
 
102
- end = time.time()
103
- logging.debug(f"Total time in _enrich_with_question_options: {end - start}")
104
-
141
+ return question_data
142
+
143
+ @staticmethod
144
+ def _enrich_with_question_options(prompt_data: Dict, scenario: 'Scenario', prior_answers_dict: Dict) -> Dict:
145
+ """Enriches the prompt data with processed question options if they exist.
146
+
147
+ Args:
148
+ prompt_data: Dictionary containing prompt and question data
149
+ scenario: Scenario object
150
+ prior_answers_dict: Dictionary of prior answers
151
+
152
+ Returns:
153
+ Dict: Enriched prompt data
154
+ """
155
+ prompt_data["data"] = QuestionInstructionPromptBuilder._process_question_options(
156
+ prompt_data["data"], scenario, prior_answers_dict
157
+ )
105
158
  return prompt_data
106
159
 
107
160
  def _render_prompt(self, prompt_data: Dict) -> Prompt:
@@ -113,28 +166,13 @@ class QuestionInstructionPromptBuilder:
113
166
  Returns:
114
167
  Prompt: Rendered instructions
115
168
  """
116
- import time
117
-
118
- start = time.time()
119
-
120
169
  # Build replacement dict
121
- dict_start = time.time()
122
- replacement_dict = QTRB(self.prompt_constructor).build_replacement_dict(
170
+ replacement_dict = self.qtrb.build_replacement_dict(
123
171
  prompt_data["data"]
124
172
  )
125
- dict_end = time.time()
126
- logging.debug(f"Time to build replacement dict: {dict_end - dict_start}")
127
173
 
128
174
  # Render with dict
129
- render_start = time.time()
130
- result = prompt_data["prompt"].render(replacement_dict)
131
- render_end = time.time()
132
- logging.debug(f"Time to render with dict: {render_end - render_start}")
133
-
134
- end = time.time()
135
- logging.debug(f"Total time in _render_prompt: {end - start}")
136
-
137
- return result
175
+ return prompt_data["prompt"].render(replacement_dict)
138
176
 
139
177
  def _validate_template_variables(self, rendered_prompt: Prompt) -> None:
140
178
  """Validates that all template variables have been properly replaced.
@@ -185,3 +223,8 @@ class QuestionInstructionPromptBuilder:
185
223
  preamble += instruction.text
186
224
 
187
225
  return preamble + rendered_prompt
226
+
227
+
228
+ if __name__ == "__main__":
229
+ import doctest
230
+ doctest.testmod()
@@ -4,19 +4,53 @@ from typing import Any, Set, TYPE_CHECKING
4
4
  if TYPE_CHECKING:
5
5
  from edsl.agents.PromptConstructor import PromptConstructor
6
6
  from edsl.scenarios.Scenario import Scenario
7
+ from edsl.questions.QuestionBase import QuestionBase
8
+ from edsl.agents.Agent import Agent
7
9
 
8
10
 
9
11
  class QuestionTemplateReplacementsBuilder:
10
- def __init__(self, prompt_constructor: "PromptConstructor"):
11
- self.prompt_constructor = prompt_constructor
12
+
13
+ @classmethod
14
+ def from_prompt_constructor(cls, prompt_constructor: "PromptConstructor"):
15
+ scenario = prompt_constructor.scenario
16
+ question = prompt_constructor.question
17
+ prior_answers_dict = prompt_constructor.prior_answers_dict()
18
+ agent = prompt_constructor.agent
19
+
20
+ return cls(scenario, question, prior_answers_dict, agent)
21
+
22
+ def __init__(
23
+ self,
24
+ scenario: "Scenario",
25
+ question: "QuestionBase",
26
+ prior_answers_dict: dict,
27
+ agent: "Agent",
28
+ ):
29
+ self.scenario = scenario
30
+ self.question = question
31
+ self.prior_answers_dict = prior_answers_dict
32
+ self.agent = agent
12
33
 
13
34
  def question_file_keys(self):
14
- question_text = self.prompt_constructor.question.question_text
15
- file_keys = self._find_file_keys(self.prompt_constructor.scenario)
35
+ """
36
+ >>> from edsl import QuestionMultipleChoice, Scenario
37
+ >>> q = QuestionMultipleChoice(question_text="Do you like school?", question_name = "q0", question_options = ["yes", "no"])
38
+ >>> qtrb = QuestionTemplateReplacementsBuilder(scenario = {"file1": "file1"}, question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
39
+ >>> qtrb.question_file_keys()
40
+ []
41
+ >>> from edsl import FileStore
42
+ >>> fs = FileStore.example()
43
+ >>> q = QuestionMultipleChoice(question_text="What do you think of this file: {{ file1 }}", question_name = "q0", question_options = ["good", "bad"])
44
+ >>> qtrb = QuestionTemplateReplacementsBuilder(scenario = Scenario({"file1": fs}), question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
45
+ >>> qtrb.question_file_keys()
46
+ ['file1']
47
+ """
48
+ question_text = self.question.question_text
49
+ file_keys = self._find_file_keys(self.scenario)
16
50
  return self._extract_file_keys_from_question_text(question_text, file_keys)
17
51
 
18
52
  def scenario_file_keys(self):
19
- return self._find_file_keys(self.prompt_constructor.scenario)
53
+ return self._find_file_keys(self.scenario)
20
54
 
21
55
  def get_jinja2_variables(template_str: str) -> Set[str]:
22
56
  """
@@ -88,15 +122,17 @@ class QuestionTemplateReplacementsBuilder:
88
122
  question_file_keys.append(var)
89
123
  return question_file_keys
90
124
 
91
- def _scenario_replacements(self) -> dict[str, Any]:
125
+ def _scenario_replacements(
126
+ self, replacement_string: str = "<see file {key}>"
127
+ ) -> dict[str, Any]:
92
128
  # File references dictionary
93
- file_refs = {key: f"<see file {key}>" for key in self.scenario_file_keys()}
129
+ file_refs = {
130
+ key: replacement_string.format(key=key) for key in self.scenario_file_keys()
131
+ }
94
132
 
95
133
  # Scenario items excluding file keys
96
134
  scenario_items = {
97
- k: v
98
- for k, v in self.prompt_constructor.scenario.items()
99
- if k not in self.scenario_file_keys()
135
+ k: v for k, v in self.scenario.items() if k not in self.scenario_file_keys()
100
136
  }
101
137
  return {**file_refs, **scenario_items}
102
138
 
@@ -119,14 +155,31 @@ class QuestionTemplateReplacementsBuilder:
119
155
  return {**question_settings, **question_data}
120
156
 
121
157
  def build_replacement_dict(self, question_data: dict) -> dict[str, Any]:
122
- """Builds a dictionary of replacement values for rendering a prompt by combining multiple data sources."""
158
+ """Builds a dictionary of replacement values for rendering a prompt by combining multiple data sources.
159
+
160
+
161
+ >>> from edsl import QuestionMultipleChoice, Scenario
162
+ >>> q = QuestionMultipleChoice(question_text="Do you like school?", question_name = "q0", question_options = ["yes", "no"])
163
+ >>> qtrb = QuestionTemplateReplacementsBuilder(scenario = {"file1": "file1"}, question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
164
+ >>> qtrb.question_file_keys()
165
+ []
166
+ >>> from edsl import FileStore
167
+ >>> fs = FileStore.example()
168
+ >>> s = Scenario({"file1": fs, "first_name": "John"})
169
+ >>> q = QuestionMultipleChoice(question_text="What do you think of this file: {{ file1 }}, {{ first_name}}", question_name = "q0", question_options = ["good", "bad"])
170
+ >>> qtrb = QuestionTemplateReplacementsBuilder(scenario = s, question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
171
+ >>> qtrb.build_replacement_dict(q.data)
172
+ {'file1': '<see file file1>', 'first_name': 'John', 'use_code': False, 'include_comment': True, 'question_name': 'q0', 'question_text': 'What do you think of this file: {{ file1 }}, {{ first_name}}', 'question_options': ['good', 'bad'], 'q0': 'q0', 'agent': 'agent'}
173
+
174
+
175
+ """
123
176
  rpl = {}
124
177
  rpl["scenario"] = self._scenario_replacements()
125
- rpl["question"] = self._question_data_replacements(
126
- self.prompt_constructor.question, question_data
127
- )
128
- rpl["prior_answers"] = self.prompt_constructor.prior_answers_dict()
129
- rpl["agent"] = {"agent": self.prompt_constructor.agent}
178
+ rpl["question"] = self._question_data_replacements(self.question, question_data)
179
+ # rpl["prior_answers"] = self.prompt_constructor.prior_answers_dict()
180
+ rpl["prior_answers"] = self.prior_answers_dict
181
+ # rpl["agent"] = {"agent": self.prompt_constructor.agent}
182
+ rpl["agent"] = {"agent": self.agent}
130
183
 
131
184
  # Combine all dictionaries using dict.update() for clarity
132
185
  replacement_dict = {}
@@ -8,8 +8,16 @@ class QuestionOptionProcessor:
8
8
  These can be provided directly, as a template string, or fetched from prior answers or the scenario.
9
9
  """
10
10
 
11
- def __init__(self, prompt_constructor):
12
- self.prompt_constructor = prompt_constructor
11
+ @classmethod
12
+ def from_prompt_constructor(cls, prompt_constructor):
13
+ scenario = prompt_constructor.scenario
14
+ prior_answers_dict = prompt_constructor.prior_answers_dict()
15
+
16
+ return cls(scenario, prior_answers_dict)
17
+
18
+ def __init__(self, scenario: 'Scenario', prior_answers_dict: dict):
19
+ self.scenario = scenario
20
+ self.prior_answers_dict = prior_answers_dict
13
21
 
14
22
  @staticmethod
15
23
  def _get_default_options() -> list:
@@ -109,7 +117,8 @@ class QuestionOptionProcessor:
109
117
  >>> mpc = MockPromptConstructor()
110
118
  >>> from edsl import Scenario
111
119
  >>> mpc.scenario = Scenario({"options": ["Option 1", "Option 2"]})
112
- >>> processor = QuestionOptionProcessor(mpc)
120
+ >>> mpc.prior_answers_dict = lambda: {'q0': 'q0'}
121
+ >>> processor = QuestionOptionProcessor.from_prompt_constructor(mpc)
113
122
 
114
123
  The basic case where options are directly provided:
115
124
 
@@ -130,7 +139,7 @@ class QuestionOptionProcessor:
130
139
  >>> q0 = MockQuestion()
131
140
  >>> q0.answer = ["Option 1", "Option 2"]
132
141
  >>> mpc.prior_answers_dict = lambda: {'q0': q0}
133
- >>> processor = QuestionOptionProcessor(mpc)
142
+ >>> processor = QuestionOptionProcessor.from_prompt_constructor(mpc)
134
143
  >>> question_data = {"question_options": "{{ q0 }}"}
135
144
  >>> processor.get_question_options(question_data)
136
145
  ['Option 1', 'Option 2']
@@ -151,14 +160,14 @@ class QuestionOptionProcessor:
151
160
 
152
161
  # Try getting options from scenario
153
162
  scenario_options = self._get_options_from_scenario(
154
- self.prompt_constructor.scenario, option_key
163
+ self.scenario, option_key
155
164
  )
156
165
  if scenario_options:
157
166
  return scenario_options
158
167
 
159
168
  # Try getting options from prior answers
160
169
  prior_answer_options = self._get_options_from_prior_answers(
161
- self.prompt_constructor.prior_answers_dict(), option_key
170
+ self.prior_answers_dict, option_key
162
171
  )
163
172
  if prior_answer_options:
164
173
  return prior_answer_options
@@ -4,10 +4,9 @@ class CoopFunctionsMixin:
4
4
 
5
5
  s = Scenario({"existing_names": existing_names})
6
6
  q = QuestionList(
7
- question_text="""The following colum names are already in use: {{ existing_names }}
8
- Please provide new names for the columns.
9
- They should be short, one or two words, and unique. They should be valid Python idenifiers.
10
- No spaces - use underscores instead.
7
+ question_text="""The following column names are already in use: {{ existing_names }}
8
+ Please provide new column names.
9
+ They should be short (one or two words) and unique valid Python idenifiers (i.e., use underscores instead of spaces).
11
10
  """,
12
11
  question_name="better_names",
13
12
  )