edsl 0.1.45__py3-none-any.whl → 0.1.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. edsl/Base.py +87 -16
  2. edsl/__version__.py +1 -1
  3. edsl/agents/PromptConstructor.py +26 -79
  4. edsl/agents/QuestionInstructionPromptBuilder.py +70 -32
  5. edsl/agents/QuestionTemplateReplacementsBuilder.py +12 -2
  6. edsl/coop/coop.py +289 -147
  7. edsl/data/Cache.py +2 -0
  8. edsl/data/CacheEntry.py +10 -2
  9. edsl/data/RemoteCacheSync.py +10 -9
  10. edsl/inference_services/AvailableModelFetcher.py +1 -1
  11. edsl/inference_services/PerplexityService.py +9 -5
  12. edsl/jobs/AnswerQuestionFunctionConstructor.py +12 -1
  13. edsl/jobs/Jobs.py +35 -17
  14. edsl/jobs/JobsComponentConstructor.py +2 -1
  15. edsl/jobs/JobsPrompts.py +49 -26
  16. edsl/jobs/JobsRemoteInferenceHandler.py +4 -5
  17. edsl/jobs/data_structures.py +3 -0
  18. edsl/jobs/interviews/Interview.py +6 -3
  19. edsl/language_models/LanguageModel.py +7 -1
  20. edsl/questions/QuestionBase.py +5 -0
  21. edsl/questions/question_base_gen_mixin.py +2 -0
  22. edsl/questions/question_registry.py +6 -7
  23. edsl/results/DatasetExportMixin.py +124 -6
  24. edsl/results/Results.py +59 -0
  25. edsl/scenarios/FileStore.py +112 -7
  26. edsl/scenarios/ScenarioList.py +283 -21
  27. edsl/study/Study.py +2 -2
  28. edsl/surveys/Survey.py +15 -20
  29. {edsl-0.1.45.dist-info → edsl-0.1.47.dist-info}/METADATA +4 -3
  30. {edsl-0.1.45.dist-info → edsl-0.1.47.dist-info}/RECORD +32 -44
  31. edsl/auto/AutoStudy.py +0 -130
  32. edsl/auto/StageBase.py +0 -243
  33. edsl/auto/StageGenerateSurvey.py +0 -178
  34. edsl/auto/StageLabelQuestions.py +0 -125
  35. edsl/auto/StagePersona.py +0 -61
  36. edsl/auto/StagePersonaDimensionValueRanges.py +0 -88
  37. edsl/auto/StagePersonaDimensionValues.py +0 -74
  38. edsl/auto/StagePersonaDimensions.py +0 -69
  39. edsl/auto/StageQuestions.py +0 -74
  40. edsl/auto/SurveyCreatorPipeline.py +0 -21
  41. edsl/auto/utilities.py +0 -218
  42. edsl/base/Base.py +0 -279
  43. {edsl-0.1.45.dist-info → edsl-0.1.47.dist-info}/LICENSE +0 -0
  44. {edsl-0.1.45.dist-info → edsl-0.1.47.dist-info}/WHEEL +0 -0
edsl/Base.py CHANGED
@@ -65,13 +65,11 @@ class PersistenceMixin:
65
65
  def pull(
66
66
  cls,
67
67
  url_or_uuid: Optional[Union[str, UUID]] = None,
68
- #expected_parrot_url: Optional[str] = None,
69
68
  ):
70
69
  """Pull the object from coop.
71
-
70
+
72
71
  Args:
73
72
  url_or_uuid: Either a UUID string or a URL pointing to the object
74
- expected_parrot_url: Optional URL for the Parrot server
75
73
  """
76
74
  from edsl.coop import Coop
77
75
  from edsl.coop.utils import ObjectRegistry
@@ -79,32 +77,27 @@ class PersistenceMixin:
79
77
  object_type = ObjectRegistry.get_object_type_by_edsl_class(cls)
80
78
  coop = Coop()
81
79
 
82
- # Determine if input is URL or UUID
83
- if url_or_uuid and ("http://" in str(url_or_uuid) or "https://" in str(url_or_uuid)):
84
- return coop.get(url=url_or_uuid, expected_object_type=object_type)
85
- else:
86
- return coop.get(uuid=url_or_uuid, expected_object_type=object_type)
80
+ return coop.get(url_or_uuid, expected_object_type=object_type)
87
81
 
88
82
  @classmethod
89
- def delete(cls, uuid: Optional[Union[str, UUID]] = None, url: Optional[str] = None):
83
+ def delete(cls, url_or_uuid: Union[str, UUID]) -> None:
90
84
  """Delete the object from coop."""
91
85
  from edsl.coop import Coop
92
86
 
93
87
  coop = Coop()
94
- return coop.delete(uuid, url)
88
+
89
+ return coop.delete(url_or_uuid)
95
90
 
96
91
  @classmethod
97
- def patch(
92
+ def patch_cls(
98
93
  cls,
99
- uuid: Optional[Union[str, UUID]] = None,
100
- url: Optional[str] = None,
94
+ url_or_uuid: Union[str, UUID],
101
95
  description: Optional[str] = None,
102
- alias: Optional[str] = None,
103
96
  value: Optional[Any] = None,
104
97
  visibility: Optional[str] = None,
105
98
  ):
106
99
  """
107
- Patch an uploaded objects attributes.
100
+ Patch an uploaded object's attributes (class method version).
108
101
  - `description` changes the description of the object on Coop
109
102
  - `value` changes the value of the object on Coop. **has to be an EDSL object**
110
103
  - `visibility` changes the visibility of the object on Coop
@@ -112,7 +105,85 @@ class PersistenceMixin:
112
105
  from edsl.coop import Coop
113
106
 
114
107
  coop = Coop()
115
- return coop.patch(uuid, url, description, alias, value, visibility)
108
+
109
+ return coop.patch(
110
+ url_or_uuid=url_or_uuid,
111
+ description=description,
112
+ value=value,
113
+ visibility=visibility,
114
+ )
115
+
116
+ class ClassOrInstanceMethod:
117
+ """Descriptor that allows a method to be called as both a class method and an instance method."""
118
+
119
+ def __init__(self, func):
120
+ self.func = func
121
+
122
+ def __get__(self, obj, objtype=None):
123
+ if obj is None:
124
+ # Called as a class method
125
+ def wrapper(*args, **kwargs):
126
+ return self.func(objtype, *args, **kwargs)
127
+
128
+ return wrapper
129
+ else:
130
+ # Called as an instance method
131
+ def wrapper(*args, **kwargs):
132
+ return self.func(obj, *args, **kwargs)
133
+
134
+ return wrapper
135
+
136
+ @ClassOrInstanceMethod
137
+ def patch(
138
+ self_or_cls,
139
+ url_or_uuid: Union[str, UUID],
140
+ description: Optional[str] = None,
141
+ value: Optional[Any] = None,
142
+ visibility: Optional[str] = None,
143
+ ):
144
+ """
145
+ Patch an uploaded object's attributes.
146
+
147
+ When called as a class method:
148
+ - Requires explicit `value` parameter
149
+
150
+ When called as an instance method:
151
+ - Uses the instance itself as the `value` parameter
152
+
153
+ Parameters:
154
+ - `id_or_url`: ID or URL of the object to patch
155
+ - `description`: changes the description of the object on Coop
156
+ - `value`: changes the value of the object on Coop (required for class method)
157
+ - `visibility`: changes the visibility of the object on Coop
158
+ """
159
+
160
+ # Check if this is being called as a class method
161
+ if isinstance(self_or_cls, type):
162
+ # This is a class method call
163
+ cls = self_or_cls
164
+ return cls.patch_cls(
165
+ url_or_uuid=url_or_uuid,
166
+ description=description,
167
+ value=value,
168
+ visibility=visibility,
169
+ )
170
+ else:
171
+ # This is an instance method call
172
+ instance = self_or_cls
173
+ cls_type = instance.__class__
174
+
175
+ # Use the instance as the value if not explicitly provided
176
+ if value is None:
177
+ value = instance
178
+ else:
179
+ pass
180
+
181
+ return cls_type.patch_cls(
182
+ url_or_uuid=url_or_uuid,
183
+ description=description,
184
+ value=value,
185
+ visibility=visibility,
186
+ )
116
187
 
117
188
  @classmethod
118
189
  def search(cls, query):
edsl/__version__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.45"
1
+ __version__ = "0.1.47"
@@ -1,20 +1,16 @@
1
1
  from __future__ import annotations
2
2
  from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING, Literal
3
3
  from functools import cached_property
4
- from multiprocessing import Pool, freeze_support, get_context
5
- from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
6
4
  import time
7
5
  import logging
8
6
 
9
7
  from edsl.prompts.Prompt import Prompt
10
8
 
11
- from dataclasses import dataclass
12
-
13
- from .prompt_helpers import PromptPlan
14
- from .QuestionTemplateReplacementsBuilder import (
9
+ from edsl.agents.prompt_helpers import PromptPlan
10
+ from edsl.agents.QuestionTemplateReplacementsBuilder import (
15
11
  QuestionTemplateReplacementsBuilder,
16
12
  )
17
- from .question_option_processor import QuestionOptionProcessor
13
+ from edsl.agents.question_option_processor import QuestionOptionProcessor
18
14
 
19
15
  if TYPE_CHECKING:
20
16
  from edsl.agents.InvigilatorBase import InvigilatorBase
@@ -146,23 +142,29 @@ class PromptConstructor:
146
142
  return self.agent.prompt()
147
143
 
148
144
  def prior_answers_dict(self) -> dict[str, "QuestionBase"]:
149
- """This is a dictionary of prior answers, if they exist."""
145
+ """This is a dictionary of prior answers, if they exist.
146
+
147
+ >>> from edsl.agents.InvigilatorBase import InvigilatorBase
148
+ >>> i = InvigilatorBase.example()
149
+ >>> i.prompt_constructor.prior_answers_dict()
150
+ {'q0': ..., 'q1': ...}
151
+ """
150
152
  return self._add_answers(
151
153
  self.survey.question_names_to_questions(), self.current_answers
152
154
  )
153
155
 
154
156
  @staticmethod
155
- def _extract_quetion_and_entry_type(key_entry) -> tuple[str, str]:
157
+ def _extract_question_and_entry_type(key_entry) -> tuple[str, str]:
156
158
  """
157
159
  Extracts the question name and type for the current answer dictionary key entry.
158
160
 
159
- >>> PromptConstructor._extract_quetion_and_entry_type("q0")
161
+ >>> PromptConstructor._extract_question_and_entry_type("q0")
160
162
  ('q0', 'answer')
161
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_comment")
163
+ >>> PromptConstructor._extract_question_and_entry_type("q0_comment")
162
164
  ('q0', 'comment')
163
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_alternate_generated_tokens")
165
+ >>> PromptConstructor._extract_question_and_entry_type("q0_alternate_generated_tokens")
164
166
  ('q0_alternate', 'generated_tokens')
165
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_alt_comment")
167
+ >>> PromptConstructor._extract_question_and_entry_type("q0_alt_comment")
166
168
  ('q0_alt', 'comment')
167
169
  """
168
170
  split_list = key_entry.rsplit("_", maxsplit=1)
@@ -192,7 +194,7 @@ class PromptConstructor:
192
194
  d = defaultdict(dict)
193
195
  for key, value in current_answers.items():
194
196
  question_name, entry_type = (
195
- PromptConstructor._extract_quetion_and_entry_type(key)
197
+ PromptConstructor._extract_question_and_entry_type(key)
196
198
  )
197
199
  d[question_name][entry_type] = value
198
200
  return dict(d)
@@ -273,99 +275,44 @@ class PromptConstructor:
273
275
  question_name, self.current_answers
274
276
  )
275
277
 
276
- def get_prompts(self, parallel: Literal["thread", "process", None] = None) -> Dict[str, Any]:
278
+ def get_prompts(self) -> Dict[str, Any]:
277
279
  """Get the prompts for the question."""
278
280
  start = time.time()
279
281
 
280
282
  # Build all the components
281
- instr_start = time.time()
282
283
  agent_instructions = self.agent_instructions_prompt
283
- instr_end = time.time()
284
- logger.debug(f"Time taken for agent instructions: {instr_end - instr_start:.4f}s")
285
-
286
- persona_start = time.time()
287
284
  agent_persona = self.agent_persona_prompt
288
- persona_end = time.time()
289
- logger.debug(f"Time taken for agent persona: {persona_end - persona_start:.4f}s")
290
-
291
- q_instr_start = time.time()
292
285
  question_instructions = self.question_instructions_prompt
293
- q_instr_end = time.time()
294
- logger.debug(f"Time taken for question instructions: {q_instr_end - q_instr_start:.4f}s")
295
-
296
- memory_start = time.time()
297
286
  prior_question_memory = self.prior_question_memory_prompt
298
- memory_end = time.time()
299
- logger.debug(f"Time taken for prior question memory: {memory_end - memory_start:.4f}s")
300
-
287
+
301
288
  # Get components dict
302
289
  components = {
303
290
  "agent_instructions": agent_instructions.text,
304
291
  "agent_persona": agent_persona.text,
305
292
  "question_instructions": question_instructions.text,
306
293
  "prior_question_memory": prior_question_memory.text,
307
- }
308
-
309
- # Use PromptPlan's get_prompts method
310
- plan_start = time.time()
311
-
294
+ }
312
295
  # Get arranged components first
313
296
  arranged = self.prompt_plan.arrange_components(**components)
314
297
 
315
- if parallel == "process":
316
- pass
317
- # ctx = get_context('fork')
318
- # with ctx.Pool() as pool:
319
- # results = pool.map(_process_prompt, [
320
- # (arranged["user_prompt"], {}),
321
- # (arranged["system_prompt"], {})
322
- # ])
323
- # prompts = {
324
- # "user_prompt": results[0],
325
- # "system_prompt": results[1]
326
- # }
327
-
328
- elif parallel == "thread":
329
- pass
330
- # with ThreadPoolExecutor() as executor:
331
- # user_prompt_list = arranged["user_prompt"]
332
- # system_prompt_list = arranged["system_prompt"]
333
-
334
- # # Process both prompt lists in parallel
335
- # rendered_user = executor.submit(_process_prompt, (user_prompt_list, {}))
336
- # rendered_system = executor.submit(_process_prompt, (system_prompt_list, {}))
337
-
338
- # prompts = {
339
- # "user_prompt": rendered_user.result(),
340
- # "system_prompt": rendered_system.result()
341
- # }
342
-
343
- else: # sequential processing
344
- prompts = self.prompt_plan.get_prompts(**components)
345
-
346
- plan_end = time.time()
347
- logger.debug(f"Time taken for prompt processing: {plan_end - plan_start:.4f}s")
298
+ prompts = self.prompt_plan.get_prompts(**components)
348
299
 
349
300
  # Handle file keys if present
350
301
  if hasattr(self, 'question_file_keys') and self.question_file_keys:
351
- files_start = time.time()
352
302
  files_list = []
353
303
  for key in self.question_file_keys:
354
304
  files_list.append(self.scenario[key])
355
305
  prompts["files_list"] = files_list
356
- files_end = time.time()
357
- logger.debug(f"Time taken for file key processing: {files_end - files_start:.4f}s")
358
306
 
359
- end = time.time()
360
- logger.debug(f"Total time in get_prompts: {end - start:.4f}s")
361
307
  return prompts
362
308
 
363
309
 
364
- def _process_prompt(args):
365
- """Helper function to process a single prompt list with its replacements."""
366
- prompt_list, replacements = args
367
- return prompt_list.reduce()
310
+ # def _process_prompt(args):
311
+ # """Helper function to process a single prompt list with its replacements."""
312
+ # prompt_list, replacements = args
313
+ # return prompt_list.reduce()
368
314
 
369
315
 
370
316
  if __name__ == '__main__':
371
- freeze_support()
317
+ import doctest
318
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,8 +1,16 @@
1
- from typing import Dict, List, Set, Any, Union
1
+ from typing import Dict, List, Set, Any, Union, TYPE_CHECKING
2
2
  from warnings import warn
3
3
  import logging
4
4
  from edsl.prompts.Prompt import Prompt
5
5
 
6
+ if TYPE_CHECKING:
7
+ from edsl.agents.PromptConstructor import PromptConstructor
8
+ from edsl import Model
9
+ from edsl import Survey
10
+ from edsl.questions.QuestionBase import QuestionBase
11
+ from edsl import Scenario
12
+ from edsl import Agent
13
+
6
14
  from edsl.agents.QuestionTemplateReplacementsBuilder import (
7
15
  QuestionTemplateReplacementsBuilder as QTRB,
8
16
  )
@@ -13,23 +21,42 @@ class QuestionInstructionPromptBuilder:
13
21
 
14
22
  @classmethod
15
23
  def from_prompt_constructor(cls, prompt_constructor: "PromptConstructor"):
16
-
24
+
17
25
  model = prompt_constructor.model
18
26
  survey = prompt_constructor.survey
19
27
  question = prompt_constructor.question
20
- return cls(prompt_constructor, model, survey, question)
28
+ scenario = prompt_constructor.scenario
29
+ prior_answers_dict = prompt_constructor.prior_answers_dict()
30
+ agent = prompt_constructor.agent
31
+ return cls(
32
+ prompt_constructor,
33
+ model,
34
+ survey,
35
+ question,
36
+ scenario,
37
+ prior_answers_dict,
38
+ agent,
39
+ )
40
+
41
+ def __init__(
42
+ self,
43
+ prompt_constructor: "PromptConstructor",
44
+ model: "Model",
45
+ survey: "Survey",
46
+ question: "QuestionBase",
47
+ scenario: "Scenario",
48
+ prior_answers_dict: Dict[str, Any],
49
+ agent: "Agent",
50
+ ):
51
+
52
+ self.qtrb = QTRB(scenario, question, prior_answers_dict, agent)
21
53
 
22
- def __init__(self, prompt_constructor: "PromptConstructor", model:"Model", survey:"Survey", question:"QuestionBase"):
23
- self.prompt_constructor = prompt_constructor
24
54
  self.model = model
25
55
  self.survey = survey
26
56
  self.question = question
27
-
28
- self.scenario = prompt_constructor.scenario
29
- self.prior_answers_dict = prompt_constructor.prior_answers_dict()
30
-
31
- self.qtrb = QTRB.from_prompt_constructor(self.prompt_constructor)
32
-
57
+ self.agent = agent
58
+ self.scenario = scenario
59
+ self.prior_answers_dict = prior_answers_dict
33
60
 
34
61
  def build(self) -> Prompt:
35
62
  """Builds the complete question instructions prompt with all necessary components.
@@ -78,19 +105,23 @@ class QuestionInstructionPromptBuilder:
78
105
  """
79
106
  # Create base prompt
80
107
  base_prompt = self._create_base_prompt()
81
-
108
+
82
109
  # Enrich with options
83
- enriched_prompt = self._enrich_with_question_options(prompt_data=base_prompt, scenario=self.scenario, prior_answers_dict=self.prior_answers_dict)
84
-
110
+ enriched_prompt = self._enrich_with_question_options(
111
+ prompt_data=base_prompt,
112
+ scenario=self.scenario,
113
+ prior_answers_dict=self.prior_answers_dict,
114
+ )
115
+
85
116
  # Render prompt
86
117
  rendered_prompt = self._render_prompt(enriched_prompt)
87
-
118
+
88
119
  # Validate template variables
89
120
  self._validate_template_variables(rendered_prompt)
90
-
121
+
91
122
  # Append survey instructions
92
123
  final_prompt = self._append_survey_instructions(rendered_prompt)
93
-
124
+
94
125
  return final_prompt
95
126
 
96
127
  def _create_base_prompt(self) -> Dict[str, Union[Prompt, Dict[str, Any]]]:
@@ -111,7 +142,9 @@ class QuestionInstructionPromptBuilder:
111
142
  }
112
143
 
113
144
  @staticmethod
114
- def _process_question_options(question_data: Dict, scenario: 'Scenario', prior_answers_dict: Dict) -> Dict:
145
+ def _process_question_options(
146
+ question_data: Dict, scenario: "Scenario", prior_answers_dict: Dict
147
+ ) -> Dict:
115
148
  """Processes and replaces question options in the question data if they exist.
116
149
 
117
150
  The question_options could be intended to be replaced with data from a scenario or prior answers.
@@ -132,16 +165,18 @@ class QuestionInstructionPromptBuilder:
132
165
  """
133
166
  if "question_options" in question_data:
134
167
  from edsl.agents.question_option_processor import QuestionOptionProcessor
135
-
136
- question_options = (QuestionOptionProcessor(scenario, prior_answers_dict)
137
- .get_question_options(question_data=question_data)
138
- )
168
+
169
+ question_options = QuestionOptionProcessor(
170
+ scenario, prior_answers_dict
171
+ ).get_question_options(question_data=question_data)
139
172
  question_data["question_options"] = question_options
140
-
173
+
141
174
  return question_data
142
175
 
143
176
  @staticmethod
144
- def _enrich_with_question_options(prompt_data: Dict, scenario: 'Scenario', prior_answers_dict: Dict) -> Dict:
177
+ def _enrich_with_question_options(
178
+ prompt_data: Dict, scenario: "Scenario", prior_answers_dict: Dict
179
+ ) -> Dict:
145
180
  """Enriches the prompt data with processed question options if they exist.
146
181
 
147
182
  Args:
@@ -152,8 +187,10 @@ class QuestionInstructionPromptBuilder:
152
187
  Returns:
153
188
  Dict: Enriched prompt data
154
189
  """
155
- prompt_data["data"] = QuestionInstructionPromptBuilder._process_question_options(
156
- prompt_data["data"], scenario, prior_answers_dict
190
+ prompt_data["data"] = (
191
+ QuestionInstructionPromptBuilder._process_question_options(
192
+ prompt_data["data"], scenario, prior_answers_dict
193
+ )
157
194
  )
158
195
  return prompt_data
159
196
 
@@ -167,10 +204,8 @@ class QuestionInstructionPromptBuilder:
167
204
  Prompt: Rendered instructions
168
205
  """
169
206
  # Build replacement dict
170
- replacement_dict = self.qtrb.build_replacement_dict(
171
- prompt_data["data"]
172
- )
173
-
207
+ replacement_dict = self.qtrb.build_replacement_dict(prompt_data["data"])
208
+
174
209
  # Render with dict
175
210
  return prompt_data["prompt"].render(replacement_dict)
176
211
 
@@ -200,7 +235,9 @@ class QuestionInstructionPromptBuilder:
200
235
  """
201
236
  for question_name in self.survey.question_names:
202
237
  if question_name in undefined_vars:
203
- logging.warning(f"Question name found in undefined_template_variables: {question_name}")
238
+ logging.warning(
239
+ f"Question name found in undefined_template_variables: {question_name}"
240
+ )
204
241
 
205
242
  def _append_survey_instructions(self, rendered_prompt: Prompt) -> Prompt:
206
243
  """Appends any relevant survey instructions to the rendered prompt.
@@ -227,4 +264,5 @@ class QuestionInstructionPromptBuilder:
227
264
 
228
265
  if __name__ == "__main__":
229
266
  import doctest
230
- doctest.testmod()
267
+
268
+ doctest.testmod()
@@ -125,6 +125,14 @@ class QuestionTemplateReplacementsBuilder:
125
125
  def _scenario_replacements(
126
126
  self, replacement_string: str = "<see file {key}>"
127
127
  ) -> dict[str, Any]:
128
+ """
129
+ >>> from edsl import Scenario
130
+ >>> from edsl import QuestionFreeText;
131
+ >>> q = QuestionFreeText(question_text = "How are you {{ scenario.friend }}?", question_name = "test")
132
+ >>> s = Scenario({'friend':'john'})
133
+ >>> q.by(s).prompts().select('user_prompt')
134
+ Dataset([{'user_prompt': [Prompt(text=\"""How are you john?\""")]}])
135
+ """
128
136
  # File references dictionary
129
137
  file_refs = {
130
138
  key: replacement_string.format(key=key) for key in self.scenario_file_keys()
@@ -134,7 +142,9 @@ class QuestionTemplateReplacementsBuilder:
134
142
  scenario_items = {
135
143
  k: v for k, v in self.scenario.items() if k not in self.scenario_file_keys()
136
144
  }
137
- return {**file_refs, **scenario_items}
145
+ scenario_items_with_prefix = {'scenario': scenario_items}
146
+
147
+ return {**file_refs, **scenario_items, **scenario_items_with_prefix}
138
148
 
139
149
  @staticmethod
140
150
  def _question_data_replacements(
@@ -169,7 +179,7 @@ class QuestionTemplateReplacementsBuilder:
169
179
  >>> q = QuestionMultipleChoice(question_text="What do you think of this file: {{ file1 }}, {{ first_name}}", question_name = "q0", question_options = ["good", "bad"])
170
180
  >>> qtrb = QuestionTemplateReplacementsBuilder(scenario = s, question = q, prior_answers_dict = {'q0': 'q0'}, agent = "agent")
171
181
  >>> qtrb.build_replacement_dict(q.data)
172
- {'file1': '<see file file1>', 'first_name': 'John', 'use_code': False, 'include_comment': True, 'question_name': 'q0', 'question_text': 'What do you think of this file: {{ file1 }}, {{ first_name}}', 'question_options': ['good', 'bad'], 'q0': 'q0', 'agent': 'agent'}
182
+ {'file1': '<see file file1>', 'first_name': 'John', 'scenario': {'first_name': 'John'}, 'use_code': False, 'include_comment': True, 'question_name': 'q0', 'question_text': 'What do you think of this file: {{ file1 }}, {{ first_name}}', 'question_options': ['good', 'bad'], 'q0': 'q0', 'agent': 'agent'}
173
183
 
174
184
 
175
185
  """