edsl 0.1.39__py3-none-any.whl → 0.1.39.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. edsl/Base.py +116 -197
  2. edsl/__init__.py +7 -15
  3. edsl/__version__.py +1 -1
  4. edsl/agents/Agent.py +147 -351
  5. edsl/agents/AgentList.py +73 -211
  6. edsl/agents/Invigilator.py +50 -101
  7. edsl/agents/InvigilatorBase.py +70 -62
  8. edsl/agents/PromptConstructor.py +225 -143
  9. edsl/agents/__init__.py +1 -0
  10. edsl/agents/prompt_helpers.py +3 -3
  11. edsl/auto/AutoStudy.py +5 -18
  12. edsl/auto/StageBase.py +40 -53
  13. edsl/auto/StageQuestions.py +1 -2
  14. edsl/auto/utilities.py +6 -0
  15. edsl/config.py +2 -22
  16. edsl/conversation/car_buying.py +1 -2
  17. edsl/coop/PriceFetcher.py +1 -1
  18. edsl/coop/coop.py +47 -125
  19. edsl/coop/utils.py +14 -14
  20. edsl/data/Cache.py +27 -45
  21. edsl/data/CacheEntry.py +15 -12
  22. edsl/data/CacheHandler.py +12 -31
  23. edsl/data/RemoteCacheSync.py +46 -154
  24. edsl/data/__init__.py +3 -4
  25. edsl/data_transfer_models.py +1 -2
  26. edsl/enums.py +0 -27
  27. edsl/exceptions/__init__.py +50 -50
  28. edsl/exceptions/agents.py +0 -12
  29. edsl/exceptions/questions.py +6 -24
  30. edsl/exceptions/scenarios.py +0 -7
  31. edsl/inference_services/AnthropicService.py +19 -38
  32. edsl/inference_services/AwsBedrock.py +2 -0
  33. edsl/inference_services/AzureAI.py +2 -0
  34. edsl/inference_services/GoogleService.py +12 -7
  35. edsl/inference_services/InferenceServiceABC.py +85 -18
  36. edsl/inference_services/InferenceServicesCollection.py +79 -120
  37. edsl/inference_services/MistralAIService.py +3 -0
  38. edsl/inference_services/OpenAIService.py +35 -47
  39. edsl/inference_services/PerplexityService.py +3 -0
  40. edsl/inference_services/TestService.py +10 -11
  41. edsl/inference_services/TogetherAIService.py +3 -5
  42. edsl/jobs/Answers.py +14 -1
  43. edsl/jobs/Jobs.py +431 -356
  44. edsl/jobs/JobsChecks.py +10 -35
  45. edsl/jobs/JobsPrompts.py +4 -6
  46. edsl/jobs/JobsRemoteInferenceHandler.py +133 -205
  47. edsl/jobs/buckets/BucketCollection.py +3 -44
  48. edsl/jobs/buckets/TokenBucket.py +21 -53
  49. edsl/jobs/interviews/Interview.py +408 -143
  50. edsl/jobs/runners/JobsRunnerAsyncio.py +403 -88
  51. edsl/jobs/runners/JobsRunnerStatus.py +165 -133
  52. edsl/jobs/tasks/QuestionTaskCreator.py +19 -21
  53. edsl/jobs/tasks/TaskHistory.py +18 -38
  54. edsl/jobs/tasks/task_status_enum.py +2 -0
  55. edsl/language_models/KeyLookup.py +30 -0
  56. edsl/language_models/LanguageModel.py +236 -194
  57. edsl/language_models/ModelList.py +19 -28
  58. edsl/language_models/__init__.py +2 -1
  59. edsl/language_models/registry.py +190 -0
  60. edsl/language_models/repair.py +2 -2
  61. edsl/language_models/unused/ReplicateBase.py +83 -0
  62. edsl/language_models/utilities.py +4 -5
  63. edsl/notebooks/Notebook.py +14 -19
  64. edsl/prompts/Prompt.py +39 -29
  65. edsl/questions/{answer_validator_mixin.py → AnswerValidatorMixin.py} +2 -47
  66. edsl/questions/QuestionBase.py +214 -68
  67. edsl/questions/{question_base_gen_mixin.py → QuestionBaseGenMixin.py} +50 -57
  68. edsl/questions/QuestionBasePromptsMixin.py +3 -7
  69. edsl/questions/QuestionBudget.py +1 -1
  70. edsl/questions/QuestionCheckBox.py +3 -3
  71. edsl/questions/QuestionExtract.py +7 -5
  72. edsl/questions/QuestionFreeText.py +3 -2
  73. edsl/questions/QuestionList.py +18 -10
  74. edsl/questions/QuestionMultipleChoice.py +23 -67
  75. edsl/questions/QuestionNumerical.py +4 -2
  76. edsl/questions/QuestionRank.py +17 -7
  77. edsl/questions/{response_validator_abc.py → ResponseValidatorABC.py} +26 -40
  78. edsl/questions/SimpleAskMixin.py +3 -4
  79. edsl/questions/__init__.py +1 -2
  80. edsl/questions/derived/QuestionLinearScale.py +3 -6
  81. edsl/questions/derived/QuestionTopK.py +1 -1
  82. edsl/questions/descriptors.py +3 -17
  83. edsl/questions/question_registry.py +1 -1
  84. edsl/results/CSSParameterizer.py +1 -1
  85. edsl/results/Dataset.py +7 -170
  86. edsl/results/DatasetExportMixin.py +305 -168
  87. edsl/results/DatasetTree.py +8 -28
  88. edsl/results/Result.py +206 -298
  89. edsl/results/Results.py +131 -149
  90. edsl/results/ResultsDBMixin.py +238 -0
  91. edsl/results/ResultsExportMixin.py +0 -2
  92. edsl/results/{results_selector.py → Selector.py} +13 -23
  93. edsl/results/TableDisplay.py +171 -98
  94. edsl/results/__init__.py +1 -1
  95. edsl/scenarios/FileStore.py +239 -150
  96. edsl/scenarios/Scenario.py +193 -90
  97. edsl/scenarios/ScenarioHtmlMixin.py +3 -4
  98. edsl/scenarios/{scenario_join.py → ScenarioJoin.py} +6 -10
  99. edsl/scenarios/ScenarioList.py +244 -415
  100. edsl/scenarios/ScenarioListExportMixin.py +7 -0
  101. edsl/scenarios/ScenarioListPdfMixin.py +37 -15
  102. edsl/scenarios/__init__.py +2 -1
  103. edsl/study/ObjectEntry.py +1 -1
  104. edsl/study/SnapShot.py +1 -1
  105. edsl/study/Study.py +12 -5
  106. edsl/surveys/Rule.py +4 -5
  107. edsl/surveys/RuleCollection.py +27 -25
  108. edsl/surveys/Survey.py +791 -270
  109. edsl/surveys/SurveyCSS.py +8 -20
  110. edsl/surveys/{SurveyFlowVisualization.py → SurveyFlowVisualizationMixin.py} +9 -11
  111. edsl/surveys/__init__.py +2 -4
  112. edsl/surveys/descriptors.py +2 -6
  113. edsl/surveys/instructions/ChangeInstruction.py +2 -1
  114. edsl/surveys/instructions/Instruction.py +13 -4
  115. edsl/surveys/instructions/InstructionCollection.py +6 -11
  116. edsl/templates/error_reporting/interview_details.html +1 -1
  117. edsl/templates/error_reporting/report.html +1 -1
  118. edsl/tools/plotting.py +1 -1
  119. edsl/utilities/utilities.py +23 -35
  120. {edsl-0.1.39.dist-info → edsl-0.1.39.dev1.dist-info}/METADATA +10 -12
  121. edsl-0.1.39.dev1.dist-info/RECORD +277 -0
  122. {edsl-0.1.39.dist-info → edsl-0.1.39.dev1.dist-info}/WHEEL +1 -1
  123. edsl/agents/QuestionInstructionPromptBuilder.py +0 -128
  124. edsl/agents/QuestionTemplateReplacementsBuilder.py +0 -137
  125. edsl/agents/question_option_processor.py +0 -172
  126. edsl/coop/CoopFunctionsMixin.py +0 -15
  127. edsl/coop/ExpectedParrotKeyHandler.py +0 -125
  128. edsl/exceptions/inference_services.py +0 -5
  129. edsl/inference_services/AvailableModelCacheHandler.py +0 -184
  130. edsl/inference_services/AvailableModelFetcher.py +0 -215
  131. edsl/inference_services/ServiceAvailability.py +0 -135
  132. edsl/inference_services/data_structures.py +0 -134
  133. edsl/jobs/AnswerQuestionFunctionConstructor.py +0 -223
  134. edsl/jobs/FetchInvigilator.py +0 -47
  135. edsl/jobs/InterviewTaskManager.py +0 -98
  136. edsl/jobs/InterviewsConstructor.py +0 -50
  137. edsl/jobs/JobsComponentConstructor.py +0 -189
  138. edsl/jobs/JobsRemoteInferenceLogger.py +0 -239
  139. edsl/jobs/RequestTokenEstimator.py +0 -30
  140. edsl/jobs/async_interview_runner.py +0 -138
  141. edsl/jobs/buckets/TokenBucketAPI.py +0 -211
  142. edsl/jobs/buckets/TokenBucketClient.py +0 -191
  143. edsl/jobs/check_survey_scenario_compatibility.py +0 -85
  144. edsl/jobs/data_structures.py +0 -120
  145. edsl/jobs/decorators.py +0 -35
  146. edsl/jobs/jobs_status_enums.py +0 -9
  147. edsl/jobs/loggers/HTMLTableJobLogger.py +0 -304
  148. edsl/jobs/results_exceptions_handler.py +0 -98
  149. edsl/language_models/ComputeCost.py +0 -63
  150. edsl/language_models/PriceManager.py +0 -127
  151. edsl/language_models/RawResponseHandler.py +0 -106
  152. edsl/language_models/ServiceDataSources.py +0 -0
  153. edsl/language_models/key_management/KeyLookup.py +0 -63
  154. edsl/language_models/key_management/KeyLookupBuilder.py +0 -273
  155. edsl/language_models/key_management/KeyLookupCollection.py +0 -38
  156. edsl/language_models/key_management/__init__.py +0 -0
  157. edsl/language_models/key_management/models.py +0 -131
  158. edsl/language_models/model.py +0 -256
  159. edsl/notebooks/NotebookToLaTeX.py +0 -142
  160. edsl/questions/ExceptionExplainer.py +0 -77
  161. edsl/questions/HTMLQuestion.py +0 -103
  162. edsl/questions/QuestionMatrix.py +0 -265
  163. edsl/questions/data_structures.py +0 -20
  164. edsl/questions/loop_processor.py +0 -149
  165. edsl/questions/response_validator_factory.py +0 -34
  166. edsl/questions/templates/matrix/__init__.py +0 -1
  167. edsl/questions/templates/matrix/answering_instructions.jinja +0 -5
  168. edsl/questions/templates/matrix/question_presentation.jinja +0 -20
  169. edsl/results/MarkdownToDocx.py +0 -122
  170. edsl/results/MarkdownToPDF.py +0 -111
  171. edsl/results/TextEditor.py +0 -50
  172. edsl/results/file_exports.py +0 -252
  173. edsl/results/smart_objects.py +0 -96
  174. edsl/results/table_data_class.py +0 -12
  175. edsl/results/table_renderers.py +0 -118
  176. edsl/scenarios/ConstructDownloadLink.py +0 -109
  177. edsl/scenarios/DocumentChunker.py +0 -102
  178. edsl/scenarios/DocxScenario.py +0 -16
  179. edsl/scenarios/PdfExtractor.py +0 -40
  180. edsl/scenarios/directory_scanner.py +0 -96
  181. edsl/scenarios/file_methods.py +0 -85
  182. edsl/scenarios/handlers/__init__.py +0 -13
  183. edsl/scenarios/handlers/csv.py +0 -49
  184. edsl/scenarios/handlers/docx.py +0 -76
  185. edsl/scenarios/handlers/html.py +0 -37
  186. edsl/scenarios/handlers/json.py +0 -111
  187. edsl/scenarios/handlers/latex.py +0 -5
  188. edsl/scenarios/handlers/md.py +0 -51
  189. edsl/scenarios/handlers/pdf.py +0 -68
  190. edsl/scenarios/handlers/png.py +0 -39
  191. edsl/scenarios/handlers/pptx.py +0 -105
  192. edsl/scenarios/handlers/py.py +0 -294
  193. edsl/scenarios/handlers/sql.py +0 -313
  194. edsl/scenarios/handlers/sqlite.py +0 -149
  195. edsl/scenarios/handlers/txt.py +0 -33
  196. edsl/scenarios/scenario_selector.py +0 -156
  197. edsl/surveys/ConstructDAG.py +0 -92
  198. edsl/surveys/EditSurvey.py +0 -221
  199. edsl/surveys/InstructionHandler.py +0 -100
  200. edsl/surveys/MemoryManagement.py +0 -72
  201. edsl/surveys/RuleManager.py +0 -172
  202. edsl/surveys/Simulator.py +0 -75
  203. edsl/surveys/SurveyToApp.py +0 -141
  204. edsl/utilities/PrettyList.py +0 -56
  205. edsl/utilities/is_notebook.py +0 -18
  206. edsl/utilities/is_valid_variable_name.py +0 -11
  207. edsl/utilities/remove_edsl_version.py +0 -24
  208. edsl-0.1.39.dist-info/RECORD +0 -358
  209. /edsl/questions/{register_questions_meta.py → RegisterQuestionsMeta.py} +0 -0
  210. /edsl/results/{results_fetch_mixin.py → ResultsFetchMixin.py} +0 -0
  211. /edsl/results/{results_tools_mixin.py → ResultsToolsMixin.py} +0 -0
  212. {edsl-0.1.39.dist-info → edsl-0.1.39.dev1.dist-info}/LICENSE +0 -0
@@ -1,78 +1,57 @@
1
1
  from __future__ import annotations
2
- from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING
3
- from functools import cached_property
2
+ from typing import Dict, Any, Optional, Set
4
3
 
5
- from edsl.prompts.Prompt import Prompt
6
-
7
- from dataclasses import dataclass
4
+ from jinja2 import Environment, meta
8
5
 
9
- from .prompt_helpers import PromptPlan
10
- from .QuestionTemplateReplacementsBuilder import (
11
- QuestionTemplateReplacementsBuilder,
12
- )
13
- from .question_option_processor import QuestionOptionProcessor
14
-
15
- if TYPE_CHECKING:
16
- from edsl.agents.InvigilatorBase import InvigilatorBase
17
- from edsl.questions.QuestionBase import QuestionBase
18
- from edsl.agents.Agent import Agent
19
- from edsl.surveys.Survey import Survey
20
- from edsl.language_models.LanguageModel import LanguageModel
21
- from edsl.surveys.MemoryPlan import MemoryPlan
22
- from edsl.questions.QuestionBase import QuestionBase
23
- from edsl.scenarios.Scenario import Scenario
6
+ from edsl.prompts.Prompt import Prompt
7
+ from edsl.agents.prompt_helpers import PromptPlan
24
8
 
25
9
 
26
- class BasePlaceholder:
27
- """Base class for placeholder values when a question is not yet answered."""
10
+ class PlaceholderAnswer:
11
+ """A placeholder answer for when a question is not yet answered."""
28
12
 
29
- def __init__(self, placeholder_type: str = "answer"):
30
- self.value = "N/A"
13
+ def __init__(self):
14
+ self.answer = "N/A"
31
15
  self.comment = "Will be populated by prior answer"
32
- self._type = placeholder_type
33
16
 
34
17
  def __getitem__(self, index):
35
18
  return ""
36
19
 
37
20
  def __str__(self):
38
- return f"<<{self.__class__.__name__}:{self._type}>>"
21
+ return "<<PlaceholderAnswer>>"
39
22
 
40
23
  def __repr__(self):
41
- return self.__str__()
42
-
24
+ return "<<PlaceholderAnswer>>"
43
25
 
44
- class PlaceholderAnswer(BasePlaceholder):
45
- def __init__(self):
46
- super().__init__("answer")
47
26
 
27
+ def get_jinja2_variables(template_str: str) -> Set[str]:
28
+ """
29
+ Extracts all variable names from a Jinja2 template using Jinja2's built-in parsing.
48
30
 
49
- class PlaceholderComment(BasePlaceholder):
50
- def __init__(self):
51
- super().__init__("comment")
52
-
31
+ Args:
32
+ template_str (str): The Jinja2 template string
53
33
 
54
- class PlaceholderGeneratedTokens(BasePlaceholder):
55
- def __init__(self):
56
- super().__init__("generated_tokens")
34
+ Returns:
35
+ Set[str]: A set of variable names found in the template
36
+ """
37
+ env = Environment()
38
+ ast = env.parse(template_str)
39
+ return meta.find_undeclared_variables(ast)
57
40
 
58
41
 
59
42
  class PromptConstructor:
60
43
  """
61
- This class constructs the prompts for the language model.
62
-
63
44
  The pieces of a prompt are:
64
45
  - The agent instructions - "You are answering questions as if you were a human. Do not break character."
65
46
  - The persona prompt - "You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}"
66
47
  - The question instructions - "You are being asked the following question: Do you like school? The options are 0: yes 1: no Return a valid JSON formatted like this, selecting only the number of the option: {"answer": <put answer code here>, "comment": "<put explanation here>"} Only 1 option may be selected."
67
48
  - The memory prompt - "Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer"
49
+
50
+ This is mixed into the Invigilator class.
68
51
  """
69
52
 
70
- def __init__(
71
- self, invigilator: "InvigilatorBase", prompt_plan: Optional["PromptPlan"] = None
72
- ):
53
+ def __init__(self, invigilator, prompt_plan: Optional["PromptPlan"] = None):
73
54
  self.invigilator = invigilator
74
- self.prompt_plan = prompt_plan or PromptPlan()
75
-
76
55
  self.agent = invigilator.agent
77
56
  self.question = invigilator.question
78
57
  self.scenario = invigilator.scenario
@@ -80,12 +59,22 @@ class PromptConstructor:
80
59
  self.model = invigilator.model
81
60
  self.current_answers = invigilator.current_answers
82
61
  self.memory_plan = invigilator.memory_plan
62
+ self.prompt_plan = prompt_plan or PromptPlan()
83
63
 
84
- def get_question_options(self, question_data):
85
- """Get the question options."""
86
- return QuestionOptionProcessor(self).get_question_options(question_data)
64
+ @property
65
+ def scenario_file_keys(self) -> list:
66
+ """We need to find all the keys in the scenario that refer to FileStore objects.
67
+ These will be used to append to the prompt a list of files that are part of the scenario.
68
+ """
69
+ from edsl.scenarios.FileStore import FileStore
70
+
71
+ file_entries = []
72
+ for key, value in self.scenario.items():
73
+ if isinstance(value, FileStore):
74
+ file_entries.append(key)
75
+ return file_entries
87
76
 
88
- @cached_property
77
+ @property
89
78
  def agent_instructions_prompt(self) -> Prompt:
90
79
  """
91
80
  >>> from edsl.agents.InvigilatorBase import InvigilatorBase
@@ -93,14 +82,14 @@ class PromptConstructor:
93
82
  >>> i.prompt_constructor.agent_instructions_prompt
94
83
  Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
95
84
  """
96
- from edsl.agents.Agent import Agent
85
+ from edsl import Agent
97
86
 
98
87
  if self.agent == Agent(): # if agent is empty, then return an empty prompt
99
88
  return Prompt(text="")
100
89
 
101
90
  return Prompt(text=self.agent.instruction)
102
91
 
103
- @cached_property
92
+ @property
104
93
  def agent_persona_prompt(self) -> Prompt:
105
94
  """
106
95
  >>> from edsl.agents.InvigilatorBase import InvigilatorBase
@@ -108,96 +97,159 @@ class PromptConstructor:
108
97
  >>> i.prompt_constructor.agent_persona_prompt
109
98
  Prompt(text=\"""Your traits: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
110
99
  """
111
- from edsl.agents.Agent import Agent
100
+ from edsl import Agent
112
101
 
113
102
  if self.agent == Agent(): # if agent is empty, then return an empty prompt
114
103
  return Prompt(text="")
115
104
 
116
105
  return self.agent.prompt()
117
106
 
118
- def prior_answers_dict(self) -> dict[str, "QuestionBase"]:
119
- """This is a dictionary of prior answers, if they exist."""
120
- return self._add_answers(
121
- self.survey.question_names_to_questions(), self.current_answers
122
- )
123
-
124
- @staticmethod
125
- def _extract_quetion_and_entry_type(key_entry) -> tuple[str, str]:
126
- """
127
- Extracts the question name and type for the current answer dictionary key entry.
128
-
129
- >>> PromptConstructor._extract_quetion_and_entry_type("q0")
130
- ('q0', 'answer')
131
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_comment")
132
- ('q0', 'comment')
133
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_alternate_generated_tokens")
134
- ('q0_alternate', 'generated_tokens')
135
- >>> PromptConstructor._extract_quetion_and_entry_type("q0_alt_comment")
136
- ('q0_alt', 'comment')
137
- """
138
- split_list = key_entry.rsplit("_", maxsplit=1)
139
- if len(split_list) == 1:
140
- question_name = split_list[0]
141
- entry_type = "answer"
142
- else:
143
- if split_list[1] == "comment":
144
- question_name = split_list[0]
145
- entry_type = "comment"
146
- elif split_list[1] == "tokens": # it's actually 'generated_tokens'
147
- question_name = key_entry.replace("_generated_tokens", "")
148
- entry_type = "generated_tokens"
107
+ def prior_answers_dict(self) -> dict:
108
+ # this is all questions
109
+ d = self.survey.question_names_to_questions()
110
+ # This attaches the answer to the question
111
+ for question in d:
112
+ if question in self.current_answers:
113
+ d[question].answer = self.current_answers[question]
149
114
  else:
150
- question_name = key_entry
151
- entry_type = "answer"
152
- return question_name, entry_type
153
-
154
- @staticmethod
155
- def _augmented_answers_dict(current_answers: dict) -> dict:
115
+ d[question].answer = PlaceholderAnswer()
116
+
117
+ # if (new_question := question.split("_comment")[0]) in d:
118
+ # d[new_question].comment = answer
119
+ # d[question].answer = PlaceholderAnswer()
120
+
121
+ # breakpoint()
122
+ return d
123
+
124
+ @property
125
+ def question_file_keys(self):
126
+ raw_question_text = self.question.question_text
127
+ variables = get_jinja2_variables(raw_question_text)
128
+ question_file_keys = []
129
+ for var in variables:
130
+ if var in self.scenario_file_keys:
131
+ question_file_keys.append(var)
132
+ return question_file_keys
133
+
134
+ def build_replacement_dict(self, question_data: dict):
156
135
  """
157
- >>> PromptConstructor._augmented_answers_dict({"q0": "LOVE IT!", "q0_comment": "I love school!"})
158
- {'q0': {'answer': 'LOVE IT!', 'comment': 'I love school!'}}
136
+ Builds a dictionary of replacement values by combining multiple data sources.
159
137
  """
160
- from collections import defaultdict
138
+ # File references dictionary
139
+ file_refs = {key: f"<see file {key}>" for key in self.scenario_file_keys}
140
+
141
+ # Scenario items excluding file keys
142
+ scenario_items = {
143
+ k: v for k, v in self.scenario.items() if k not in self.scenario_file_keys
144
+ }
145
+
146
+ # Question settings with defaults
147
+ question_settings = {
148
+ "use_code": getattr(self.question, "_use_code", True),
149
+ "include_comment": getattr(self.question, "_include_comment", False),
150
+ }
151
+
152
+ # Combine all dictionaries using dict.update() for clarity
153
+ replacement_dict = {}
154
+ for d in [
155
+ file_refs,
156
+ question_data,
157
+ scenario_items,
158
+ self.prior_answers_dict(),
159
+ {"agent": self.agent},
160
+ question_settings,
161
+ ]:
162
+ replacement_dict.update(d)
163
+
164
+ return replacement_dict
165
+
166
+ def _get_question_options(self, question_data):
167
+ question_options_entry = question_data.get("question_options", None)
168
+ question_options = question_options_entry
169
+
170
+ placeholder = ["<< Option 1 - Placholder >>", "<< Option 2 - Placholder >>"]
171
+
172
+ # print("Question options entry: ", question_options_entry)
173
+
174
+ if isinstance(question_options_entry, str):
175
+ env = Environment()
176
+ parsed_content = env.parse(question_options_entry)
177
+ question_option_key = list(meta.find_undeclared_variables(parsed_content))[
178
+ 0
179
+ ]
180
+ if isinstance(self.scenario.get(question_option_key), list):
181
+ question_options = self.scenario.get(question_option_key)
182
+
183
+ # might be getting it from the prior answers
184
+ if self.prior_answers_dict().get(question_option_key) is not None:
185
+ prior_question = self.prior_answers_dict().get(question_option_key)
186
+ if hasattr(prior_question, "answer"):
187
+ if isinstance(prior_question.answer, list):
188
+ question_options = prior_question.answer
189
+ else:
190
+ question_options = placeholder
191
+ else:
192
+ question_options = placeholder
193
+
194
+ return question_options
195
+
196
+ def build_question_instructions_prompt(self):
197
+ """Buils the question instructions prompt."""
161
198
 
162
- d = defaultdict(dict)
163
- for key, value in current_answers.items():
164
- question_name, entry_type = (
165
- PromptConstructor._extract_quetion_and_entry_type(key)
166
- )
167
- d[question_name][entry_type] = value
168
- return dict(d)
199
+ question_prompt = Prompt(self.question.get_instructions(model=self.model.model))
169
200
 
170
- @staticmethod
171
- def _add_answers(
172
- answer_dict: dict, current_answers: dict
173
- ) -> dict[str, "QuestionBase"]:
174
- """
175
- >>> from edsl import QuestionFreeText
176
- >>> d = {"q0": QuestionFreeText(question_text="Do you like school?", question_name = "q0")}
177
- >>> current_answers = {"q0": "LOVE IT!"}
178
- >>> PromptConstructor._add_answers(d, current_answers)['q0'].answer
179
- 'LOVE IT!'
180
- """
181
- augmented_answers = PromptConstructor._augmented_answers_dict(current_answers)
201
+ # Get the data for the question - this is a dictionary of the question data
202
+ # e.g., {'question_text': 'Do you like school?', 'question_name': 'q0', 'question_options': ['yes', 'no']}
203
+ question_data = self.question.data.copy()
182
204
 
183
- for question in answer_dict:
184
- if question in augmented_answers:
185
- for entry_type, value in augmented_answers[question].items():
186
- setattr(answer_dict[question], entry_type, value)
187
- else:
188
- answer_dict[question].answer = PlaceholderAnswer()
189
- answer_dict[question].comment = PlaceholderComment()
190
- answer_dict[question].generated_tokens = PlaceholderGeneratedTokens()
191
- return answer_dict
192
-
193
- @cached_property
194
- def question_file_keys(self) -> list:
195
- """Extracts the file keys from the question text.
196
- It checks if the variables in the question text are in the scenario file keys.
197
- """
198
- return QuestionTemplateReplacementsBuilder(self).question_file_keys()
205
+ if (
206
+ "question_options" in question_data
207
+ ): # is this a question with question options?
208
+ question_options = self._get_question_options(question_data)
209
+ question_data["question_options"] = question_options
210
+
211
+ replacement_dict = self.build_replacement_dict(question_data)
212
+ rendered_instructions = question_prompt.render(replacement_dict)
199
213
 
200
- @cached_property
214
+ # is there anything left to render?
215
+ undefined_template_variables = (
216
+ rendered_instructions.undefined_template_variables({})
217
+ )
218
+
219
+ # Check if it's the name of a question in the survey
220
+ for question_name in self.survey.question_names:
221
+ if question_name in undefined_template_variables:
222
+ print(
223
+ "Question name found in undefined_template_variables: ",
224
+ question_name,
225
+ )
226
+
227
+ if undefined_template_variables:
228
+ msg = f"Question instructions still has variables: {undefined_template_variables}."
229
+ import warnings
230
+
231
+ warnings.warn(msg)
232
+ # raise QuestionScenarioRenderError(
233
+ # f"Question instructions still has variables: {undefined_template_variables}."
234
+ # )
235
+
236
+ # Check if question has instructions - these are instructions in a Survey that can apply to multiple follow-on questions
237
+ relevant_instructions = self.survey.relevant_instructions(
238
+ self.question.question_name
239
+ )
240
+
241
+ if relevant_instructions != []:
242
+ # preamble_text = Prompt(
243
+ # text="You were given the following instructions: "
244
+ # )
245
+ preamble_text = Prompt(text="")
246
+ for instruction in relevant_instructions:
247
+ preamble_text += instruction.text
248
+ rendered_instructions = preamble_text + rendered_instructions
249
+
250
+ return rendered_instructions
251
+
252
+ @property
201
253
  def question_instructions_prompt(self) -> Prompt:
202
254
  """
203
255
  >>> from edsl.agents.InvigilatorBase import InvigilatorBase
@@ -206,24 +258,25 @@ class PromptConstructor:
206
258
  Prompt(text=\"""...
207
259
  ...
208
260
  """
209
- return self.build_question_instructions_prompt()
210
-
211
- def build_question_instructions_prompt(self) -> Prompt:
212
- """Buils the question instructions prompt."""
213
- from edsl.agents.QuestionInstructionPromptBuilder import (
214
- QuestionInstructionPromptBuilder,
215
- )
261
+ if not hasattr(self, "_question_instructions_prompt"):
262
+ self._question_instructions_prompt = (
263
+ self.build_question_instructions_prompt()
264
+ )
216
265
 
217
- return QuestionInstructionPromptBuilder(self).build()
266
+ return self._question_instructions_prompt
218
267
 
219
- @cached_property
268
+ @property
220
269
  def prior_question_memory_prompt(self) -> Prompt:
221
- memory_prompt = Prompt(text="")
222
- if self.memory_plan is not None:
223
- memory_prompt += self.create_memory_prompt(
224
- self.question.question_name
225
- ).render(self.scenario | self.prior_answers_dict())
226
- return memory_prompt
270
+ if not hasattr(self, "_prior_question_memory_prompt"):
271
+ from edsl.prompts.Prompt import Prompt
272
+
273
+ memory_prompt = Prompt(text="")
274
+ if self.memory_plan is not None:
275
+ memory_prompt += self.create_memory_prompt(
276
+ self.question.question_name
277
+ ).render(self.scenario | self.prior_answers_dict())
278
+ self._prior_question_memory_prompt = memory_prompt
279
+ return self._prior_question_memory_prompt
227
280
 
228
281
  def create_memory_prompt(self, question_name: str) -> Prompt:
229
282
  """Create a memory for the agent.
@@ -242,6 +295,24 @@ class PromptConstructor:
242
295
  question_name, self.current_answers
243
296
  )
244
297
 
298
+ def construct_system_prompt(self) -> Prompt:
299
+ """Construct the system prompt for the LLM call."""
300
+ import warnings
301
+
302
+ warnings.warn(
303
+ "This method is deprecated. Use get_prompts instead.", DeprecationWarning
304
+ )
305
+ return self.get_prompts()["system_prompt"]
306
+
307
+ def construct_user_prompt(self) -> Prompt:
308
+ """Construct the user prompt for the LLM call."""
309
+ import warnings
310
+
311
+ warnings.warn(
312
+ "This method is deprecated. Use get_prompts instead.", DeprecationWarning
313
+ )
314
+ return self.get_prompts()["user_prompt"]
315
+
245
316
  def get_prompts(self) -> Dict[str, Prompt]:
246
317
  """Get both prompts for the LLM call.
247
318
 
@@ -252,6 +323,7 @@ class PromptConstructor:
252
323
  >>> i.get_prompts()
253
324
  {'user_prompt': ..., 'system_prompt': ...}
254
325
  """
326
+ # breakpoint()
255
327
  prompts = self.prompt_plan.get_prompts(
256
328
  agent_instructions=self.agent_instructions_prompt,
257
329
  agent_persona=self.agent_persona_prompt,
@@ -265,6 +337,16 @@ class PromptConstructor:
265
337
  prompts["files_list"] = files_list
266
338
  return prompts
267
339
 
340
+ def _get_scenario_with_image(self) -> Scenario:
341
+ """This is a helper function to get a scenario with an image, for testing purposes."""
342
+ from edsl import Scenario
343
+
344
+ try:
345
+ scenario = Scenario.from_image("../../static/logo.png")
346
+ except FileNotFoundError:
347
+ scenario = Scenario.from_image("static/logo.png")
348
+ return scenario
349
+
268
350
 
269
351
  if __name__ == "__main__":
270
352
  import doctest
edsl/agents/__init__.py CHANGED
@@ -1,2 +1,3 @@
1
1
  from edsl.agents.Agent import Agent
2
2
  from edsl.agents.AgentList import AgentList
3
+ from edsl.agents.InvigilatorBase import InvigilatorBase
@@ -1,7 +1,7 @@
1
1
  import enum
2
2
  from typing import Dict, Optional
3
3
  from collections import UserList
4
- from edsl.prompts.Prompt import Prompt
4
+ from edsl.prompts import Prompt
5
5
 
6
6
 
7
7
  class PromptComponent(enum.Enum):
@@ -12,14 +12,14 @@ class PromptComponent(enum.Enum):
12
12
 
13
13
 
14
14
  class PromptList(UserList):
15
- separator = Prompt("")
15
+ separator = Prompt(" ")
16
16
 
17
17
  def reduce(self):
18
18
  """Reduce the list of prompts to a single prompt.
19
19
 
20
20
  >>> p = PromptList([Prompt("You are a happy-go lucky agent."), Prompt("You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}")])
21
21
  >>> p.reduce()
22
- Prompt(text=\"""You are a happy-go lucky agent.You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
22
+ Prompt(text=\"""You are a happy-go lucky agent. You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
23
23
 
24
24
  """
25
25
  p = self[0]
edsl/auto/AutoStudy.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Optional, TYPE_CHECKING
1
+ from typing import Optional
2
2
 
3
3
  from edsl import Model
4
4
  from edsl.auto.StageQuestions import StageQuestions
@@ -11,11 +11,9 @@ from edsl.auto.StagePersonaDimensionValueRanges import (
11
11
  from edsl.auto.StageLabelQuestions import StageLabelQuestions
12
12
  from edsl.auto.StageGenerateSurvey import StageGenerateSurvey
13
13
 
14
- from edsl.auto.utilities import agent_generator, create_agents, gen_pipeline
14
+ # from edsl.auto.StageBase import gen_pipeline
15
15
 
16
- if TYPE_CHECKING:
17
- from edsl.surveys.Survey import Survey
18
- from edsl.agents.AgentList import AgentList
16
+ from edsl.auto.utilities import agent_generator, create_agents, gen_pipeline
19
17
 
20
18
 
21
19
  class AutoStudy:
@@ -26,10 +24,8 @@ class AutoStudy:
26
24
  model: Optional["Model"] = None,
27
25
  survey: Optional["Survey"] = None,
28
26
  agent_list: Optional["AgentList"] = None,
29
- default_num_agents: int = 11,
27
+ default_num_agents=11,
30
28
  ):
31
- """AutoStudy class for generating surveys and agents."""
32
-
33
29
  self.overall_question = overall_question
34
30
  self.population = population
35
31
  self._survey = survey
@@ -40,15 +36,6 @@ class AutoStudy:
40
36
  self.default_num_agents = default_num_agents
41
37
  self.model = model or Model()
42
38
 
43
- def to_dict(self):
44
- return {
45
- "overall_question": self.overall_question,
46
- "population": self.population,
47
- "survey": self.survey.to_dict(),
48
- "persona_mapping": self.persona_mapping.to_dict(),
49
- "results": self.results.to_dict(),
50
- }
51
-
52
39
  @property
53
40
  def survey(self):
54
41
  if self._survey is None:
@@ -124,7 +111,7 @@ class AutoStudy:
124
111
 
125
112
 
126
113
  if __name__ == "__main__":
127
- overall_question = "I have an open source Python library for working with LLMs. What are some ways we can market this to others?"
114
+ overall_question = "Should online platforms be regulated with respect to selling electric scooters?"
128
115
  auto_study = AutoStudy(overall_question, population="US Adults")
129
116
 
130
117
  results = auto_study.results