edsl 0.1.38.dev2__py3-none-any.whl → 0.1.38.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (248) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +858 -858
  7. edsl/agents/AgentList.py +362 -362
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -284
  10. edsl/agents/PromptConstructor.py +353 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +149 -149
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +961 -961
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +530 -530
  37. edsl/data/CacheEntry.py +228 -228
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +97 -97
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +173 -173
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -42
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +156 -156
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/TestService.py +89 -89
  72. edsl/inference_services/TogetherAIService.py +170 -170
  73. edsl/inference_services/models_available_cache.py +118 -118
  74. edsl/inference_services/rate_limits_cache.py +25 -25
  75. edsl/inference_services/registry.py +39 -39
  76. edsl/inference_services/write_available.py +10 -10
  77. edsl/jobs/Answers.py +56 -56
  78. edsl/jobs/Jobs.py +1358 -1358
  79. edsl/jobs/__init__.py +1 -1
  80. edsl/jobs/buckets/BucketCollection.py +63 -63
  81. edsl/jobs/buckets/ModelBuckets.py +65 -65
  82. edsl/jobs/buckets/TokenBucket.py +251 -251
  83. edsl/jobs/interviews/Interview.py +661 -661
  84. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  85. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  86. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  87. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  88. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  89. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  90. edsl/jobs/interviews/ReportErrors.py +66 -66
  91. edsl/jobs/interviews/interview_status_enum.py +9 -9
  92. edsl/jobs/runners/JobsRunnerAsyncio.py +361 -361
  93. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  94. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  95. edsl/jobs/tasks/TaskCreators.py +64 -64
  96. edsl/jobs/tasks/TaskHistory.py +451 -451
  97. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  98. edsl/jobs/tasks/task_status_enum.py +163 -163
  99. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  100. edsl/jobs/tokens/TokenUsage.py +34 -34
  101. edsl/language_models/KeyLookup.py +30 -30
  102. edsl/language_models/LanguageModel.py +708 -708
  103. edsl/language_models/ModelList.py +109 -109
  104. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  105. edsl/language_models/__init__.py +3 -3
  106. edsl/language_models/fake_openai_call.py +15 -15
  107. edsl/language_models/fake_openai_service.py +61 -61
  108. edsl/language_models/registry.py +137 -137
  109. edsl/language_models/repair.py +156 -156
  110. edsl/language_models/unused/ReplicateBase.py +83 -83
  111. edsl/language_models/utilities.py +64 -64
  112. edsl/notebooks/Notebook.py +258 -258
  113. edsl/notebooks/__init__.py +1 -1
  114. edsl/prompts/Prompt.py +357 -357
  115. edsl/prompts/__init__.py +2 -2
  116. edsl/questions/AnswerValidatorMixin.py +289 -289
  117. edsl/questions/QuestionBase.py +660 -660
  118. edsl/questions/QuestionBaseGenMixin.py +161 -161
  119. edsl/questions/QuestionBasePromptsMixin.py +217 -217
  120. edsl/questions/QuestionBudget.py +227 -227
  121. edsl/questions/QuestionCheckBox.py +359 -359
  122. edsl/questions/QuestionExtract.py +183 -183
  123. edsl/questions/QuestionFreeText.py +114 -114
  124. edsl/questions/QuestionFunctional.py +166 -166
  125. edsl/questions/QuestionList.py +231 -231
  126. edsl/questions/QuestionMultipleChoice.py +286 -286
  127. edsl/questions/QuestionNumerical.py +153 -153
  128. edsl/questions/QuestionRank.py +324 -324
  129. edsl/questions/Quick.py +41 -41
  130. edsl/questions/RegisterQuestionsMeta.py +71 -71
  131. edsl/questions/ResponseValidatorABC.py +174 -174
  132. edsl/questions/SimpleAskMixin.py +73 -73
  133. edsl/questions/__init__.py +26 -26
  134. edsl/questions/compose_questions.py +98 -98
  135. edsl/questions/decorators.py +21 -21
  136. edsl/questions/derived/QuestionLikertFive.py +76 -76
  137. edsl/questions/derived/QuestionLinearScale.py +87 -87
  138. edsl/questions/derived/QuestionTopK.py +93 -93
  139. edsl/questions/derived/QuestionYesNo.py +82 -82
  140. edsl/questions/descriptors.py +413 -413
  141. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  142. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  143. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  144. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  145. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  146. edsl/questions/prompt_templates/question_list.jinja +17 -17
  147. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  148. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  149. edsl/questions/question_registry.py +147 -147
  150. edsl/questions/settings.py +12 -12
  151. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  152. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  153. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  154. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  155. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  157. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  158. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  159. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  160. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  161. edsl/questions/templates/list/question_presentation.jinja +5 -5
  162. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  163. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  164. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  165. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  166. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  167. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  168. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  169. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  170. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  171. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  172. edsl/results/Dataset.py +293 -293
  173. edsl/results/DatasetExportMixin.py +717 -717
  174. edsl/results/DatasetTree.py +145 -145
  175. edsl/results/Result.py +456 -456
  176. edsl/results/Results.py +1071 -1071
  177. edsl/results/ResultsDBMixin.py +238 -238
  178. edsl/results/ResultsExportMixin.py +43 -43
  179. edsl/results/ResultsFetchMixin.py +33 -33
  180. edsl/results/ResultsGGMixin.py +121 -121
  181. edsl/results/ResultsToolsMixin.py +98 -98
  182. edsl/results/Selector.py +135 -135
  183. edsl/results/__init__.py +2 -2
  184. edsl/results/tree_explore.py +115 -115
  185. edsl/scenarios/FileStore.py +458 -458
  186. edsl/scenarios/Scenario.py +544 -544
  187. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  188. edsl/scenarios/ScenarioList.py +1112 -1112
  189. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  190. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  191. edsl/scenarios/__init__.py +4 -4
  192. edsl/shared.py +1 -1
  193. edsl/study/ObjectEntry.py +173 -173
  194. edsl/study/ProofOfWork.py +113 -113
  195. edsl/study/SnapShot.py +80 -80
  196. edsl/study/Study.py +528 -528
  197. edsl/study/__init__.py +4 -4
  198. edsl/surveys/DAG.py +148 -148
  199. edsl/surveys/Memory.py +31 -31
  200. edsl/surveys/MemoryPlan.py +244 -244
  201. edsl/surveys/Rule.py +326 -326
  202. edsl/surveys/RuleCollection.py +387 -387
  203. edsl/surveys/Survey.py +1787 -1787
  204. edsl/surveys/SurveyCSS.py +261 -261
  205. edsl/surveys/SurveyExportMixin.py +259 -259
  206. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  207. edsl/surveys/SurveyQualtricsImport.py +284 -284
  208. edsl/surveys/__init__.py +3 -3
  209. edsl/surveys/base.py +53 -53
  210. edsl/surveys/descriptors.py +56 -56
  211. edsl/surveys/instructions/ChangeInstruction.py +49 -49
  212. edsl/surveys/instructions/Instruction.py +53 -53
  213. edsl/surveys/instructions/InstructionCollection.py +77 -77
  214. edsl/templates/error_reporting/base.html +23 -23
  215. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  216. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  217. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  218. edsl/templates/error_reporting/interview_details.html +115 -115
  219. edsl/templates/error_reporting/interviews.html +9 -9
  220. edsl/templates/error_reporting/overview.html +4 -4
  221. edsl/templates/error_reporting/performance_plot.html +1 -1
  222. edsl/templates/error_reporting/report.css +73 -73
  223. edsl/templates/error_reporting/report.html +117 -117
  224. edsl/templates/error_reporting/report.js +25 -25
  225. edsl/tools/__init__.py +1 -1
  226. edsl/tools/clusters.py +192 -192
  227. edsl/tools/embeddings.py +27 -27
  228. edsl/tools/embeddings_plotting.py +118 -118
  229. edsl/tools/plotting.py +112 -112
  230. edsl/tools/summarize.py +18 -18
  231. edsl/utilities/SystemInfo.py +28 -28
  232. edsl/utilities/__init__.py +22 -22
  233. edsl/utilities/ast_utilities.py +25 -25
  234. edsl/utilities/data/Registry.py +6 -6
  235. edsl/utilities/data/__init__.py +1 -1
  236. edsl/utilities/data/scooter_results.json +1 -1
  237. edsl/utilities/decorators.py +77 -77
  238. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  239. edsl/utilities/interface.py +627 -627
  240. edsl/utilities/naming_utilities.py +263 -263
  241. edsl/utilities/repair_functions.py +28 -28
  242. edsl/utilities/restricted_python.py +70 -70
  243. edsl/utilities/utilities.py +409 -409
  244. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev3.dist-info}/LICENSE +21 -21
  245. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev3.dist-info}/METADATA +1 -1
  246. edsl-0.1.38.dev3.dist-info/RECORD +269 -0
  247. edsl-0.1.38.dev2.dist-info/RECORD +0 -269
  248. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev3.dist-info}/WHEEL +0 -0
@@ -1,284 +1,284 @@
1
- from abc import ABC, abstractmethod
2
- import asyncio
3
- from typing import Coroutine, Dict, Any, Optional
4
-
5
- from edsl.prompts.Prompt import Prompt
6
- from edsl.utilities.decorators import jupyter_nb_handler
7
- from edsl.data_transfer_models import AgentResponseDict
8
-
9
- from edsl.data.Cache import Cache
10
-
11
- from edsl.questions.QuestionBase import QuestionBase
12
- from edsl.scenarios.Scenario import Scenario
13
- from edsl.surveys.MemoryPlan import MemoryPlan
14
- from edsl.language_models.LanguageModel import LanguageModel
15
-
16
- from edsl.data_transfer_models import EDSLResultObjectInput
17
- from edsl.agents.PromptConstructor import PromptConstructor
18
-
19
- from edsl.agents.prompt_helpers import PromptPlan
20
-
21
-
22
- class InvigilatorBase(ABC):
23
- """An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
24
-
25
- >>> InvigilatorBase.example().answer_question()
26
- {'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
27
-
28
- >>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
29
- 'Failed to get response'
30
-
31
- This returns an empty prompt because there is no memory the agent needs to have at q0.
32
-
33
-
34
- """
35
-
36
- def __init__(
37
- self,
38
- agent: "Agent",
39
- question: QuestionBase,
40
- scenario: Scenario,
41
- model: LanguageModel,
42
- memory_plan: MemoryPlan,
43
- current_answers: dict,
44
- survey: Optional["Survey"],
45
- cache: Optional[Cache] = None,
46
- iteration: Optional[int] = 1,
47
- additional_prompt_data: Optional[dict] = None,
48
- sidecar_model: Optional[LanguageModel] = None,
49
- raise_validation_errors: Optional[bool] = True,
50
- prompt_plan: Optional["PromptPlan"] = None,
51
- ):
52
- """Initialize a new Invigilator."""
53
- self.agent = agent
54
- self.question = question
55
- self.scenario = scenario
56
- self.model = model
57
- self.memory_plan = memory_plan
58
- self.current_answers = current_answers or {}
59
- self.iteration = iteration
60
- self.additional_prompt_data = additional_prompt_data
61
- self.cache = cache
62
- self.sidecar_model = sidecar_model
63
- self.survey = survey
64
- self.raise_validation_errors = raise_validation_errors
65
- if prompt_plan is None:
66
- self.prompt_plan = PromptPlan()
67
- else:
68
- self.prompt_plan = prompt_plan
69
-
70
- self.raw_model_response = (
71
- None # placeholder for the raw response from the model
72
- )
73
-
74
- @property
75
- def prompt_constructor(self) -> PromptConstructor:
76
- """Return the prompt constructor."""
77
- return PromptConstructor(self, prompt_plan=self.prompt_plan)
78
-
79
- def to_dict(self):
80
- attributes = [
81
- "agent",
82
- "question",
83
- "scenario",
84
- "model",
85
- "memory_plan",
86
- "current_answers",
87
- "iteration",
88
- "additional_prompt_data",
89
- "cache",
90
- "sidecar_model",
91
- "survey",
92
- ]
93
-
94
- def serialize_attribute(attr):
95
- value = getattr(self, attr)
96
- if value is None:
97
- return None
98
- if hasattr(value, "to_dict"):
99
- return value.to_dict()
100
- if isinstance(value, (int, float, str, bool, dict, list)):
101
- return value
102
- return str(value)
103
-
104
- return {attr: serialize_attribute(attr) for attr in attributes}
105
-
106
- @classmethod
107
- def from_dict(cls, data):
108
- from edsl.agents.Agent import Agent
109
- from edsl.questions import QuestionBase
110
- from edsl.scenarios.Scenario import Scenario
111
- from edsl.surveys.MemoryPlan import MemoryPlan
112
- from edsl.language_models.LanguageModel import LanguageModel
113
- from edsl.surveys.Survey import Survey
114
-
115
- agent = Agent.from_dict(data["agent"])
116
- question = QuestionBase.from_dict(data["question"])
117
- scenario = Scenario.from_dict(data["scenario"])
118
- model = LanguageModel.from_dict(data["model"])
119
- memory_plan = MemoryPlan.from_dict(data["memory_plan"])
120
- survey = Survey.from_dict(data["survey"])
121
- current_answers = data["current_answers"]
122
- iteration = data["iteration"]
123
- additional_prompt_data = data["additional_prompt_data"]
124
- cache = Cache.from_dict(data["cache"])
125
-
126
- if data["sidecar_model"] is None:
127
- sidecar_model = None
128
- else:
129
- sidecar_model = LanguageModel.from_dict(data["sidecar_model"])
130
-
131
- return cls(
132
- agent=agent,
133
- question=question,
134
- scenario=scenario,
135
- model=model,
136
- memory_plan=memory_plan,
137
- current_answers=current_answers,
138
- survey=survey,
139
- iteration=iteration,
140
- additional_prompt_data=additional_prompt_data,
141
- cache=cache,
142
- sidecar_model=sidecar_model,
143
- )
144
-
145
- def __repr__(self) -> str:
146
- """Return a string representation of the Invigilator.
147
-
148
- >>> InvigilatorBase.example().__repr__()
149
- 'InvigilatorExample(...)'
150
-
151
- """
152
- return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)}, sidecarmodel={repr(self.sidecar_model)})"
153
-
154
- def get_failed_task_result(self, failure_reason) -> EDSLResultObjectInput:
155
- """Return an AgentResponseDict used in case the question-asking fails.
156
-
157
- Possible reasons include:
158
- - Legimately skipped because of skip logic
159
- - Failed to get response from the model
160
-
161
- """
162
- data = {
163
- "answer": None,
164
- "generated_tokens": None,
165
- "comment": failure_reason,
166
- "question_name": self.question.question_name,
167
- "prompts": self.get_prompts(),
168
- "cached_response": None,
169
- "raw_model_response": None,
170
- "cache_used": None,
171
- "cache_key": None,
172
- }
173
- return EDSLResultObjectInput(**data)
174
-
175
- # breakpoint()
176
- # if hasattr(self, "augmented_model_response"):
177
- # import json
178
-
179
- # generated_tokens = json.loads(self.augmented_model_response)["answer"][
180
- # "generated_tokens"
181
- # ]
182
- # else:
183
- # generated_tokens = "Filled in by InvigilatorBase.get_failed_task_result"
184
- # agent_response_dict = AgentResponseDict(
185
- # answer=None,
186
- # comment="Failed to get usable response",
187
- # generated_tokens=generated_tokens,
188
- # question_name=self.question.question_name,
189
- # prompts=self.get_prompts(),
190
- # )
191
- # # breakpoint()
192
- # return agent_response_dict
193
-
194
- def get_prompts(self) -> Dict[str, Prompt]:
195
- """Return the prompt used."""
196
-
197
- return {
198
- "user_prompt": Prompt("NA"),
199
- "system_prompt": Prompt("NA"),
200
- }
201
-
202
- @abstractmethod
203
- async def async_answer_question(self):
204
- """Asnwer a question."""
205
- pass
206
-
207
- @jupyter_nb_handler
208
- def answer_question(self) -> Coroutine:
209
- """Return a function that gets the answers to the question."""
210
-
211
- async def main():
212
- """Return the answer to the question."""
213
- results = await asyncio.gather(self.async_answer_question())
214
- return results[0] # Since there's only one task, return its result
215
-
216
- return main()
217
-
218
- @classmethod
219
- def example(
220
- cls, throw_an_exception=False, question=None, scenario=None, survey=None
221
- ) -> "InvigilatorBase":
222
- """Return an example invigilator.
223
-
224
- >>> InvigilatorBase.example()
225
- InvigilatorExample(...)
226
-
227
- """
228
- from edsl.agents.Agent import Agent
229
- from edsl.questions import QuestionMultipleChoice
230
- from edsl.scenarios.Scenario import Scenario
231
- from edsl.language_models import LanguageModel
232
- from edsl.surveys.MemoryPlan import MemoryPlan
233
-
234
- from edsl.enums import InferenceServiceType
235
-
236
- from edsl import Model
237
-
238
- model = Model("test", canned_response="SPAM!")
239
-
240
- if throw_an_exception:
241
- model.throw_an_exception = True
242
- agent = Agent.example()
243
- # question = QuestionMultipleChoice.example()
244
- from edsl.surveys import Survey
245
-
246
- if not survey:
247
- survey = Survey.example()
248
-
249
- if question not in survey.questions and question is not None:
250
- survey.add_question(question)
251
-
252
- question = question or survey.questions[0]
253
- scenario = scenario or Scenario.example()
254
- # memory_plan = None #memory_plan = MemoryPlan()
255
- from edsl import Survey
256
-
257
- memory_plan = MemoryPlan(survey=survey)
258
- current_answers = None
259
- from edsl.agents.PromptConstructor import PromptConstructor
260
-
261
- class InvigilatorExample(InvigilatorBase):
262
- """An example invigilator."""
263
-
264
- async def async_answer_question(self):
265
- """Answer a question."""
266
- return await self.model.async_execute_model_call(
267
- user_prompt="Hello", system_prompt="Hi"
268
- )
269
-
270
- return InvigilatorExample(
271
- agent=agent,
272
- question=question,
273
- scenario=scenario,
274
- survey=survey,
275
- model=model,
276
- memory_plan=memory_plan,
277
- current_answers=current_answers,
278
- )
279
-
280
-
281
- if __name__ == "__main__":
282
- import doctest
283
-
284
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from abc import ABC, abstractmethod
2
+ import asyncio
3
+ from typing import Coroutine, Dict, Any, Optional
4
+
5
+ from edsl.prompts.Prompt import Prompt
6
+ from edsl.utilities.decorators import jupyter_nb_handler
7
+ from edsl.data_transfer_models import AgentResponseDict
8
+
9
+ from edsl.data.Cache import Cache
10
+
11
+ from edsl.questions.QuestionBase import QuestionBase
12
+ from edsl.scenarios.Scenario import Scenario
13
+ from edsl.surveys.MemoryPlan import MemoryPlan
14
+ from edsl.language_models.LanguageModel import LanguageModel
15
+
16
+ from edsl.data_transfer_models import EDSLResultObjectInput
17
+ from edsl.agents.PromptConstructor import PromptConstructor
18
+
19
+ from edsl.agents.prompt_helpers import PromptPlan
20
+
21
+
22
+ class InvigilatorBase(ABC):
23
+ """An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
24
+
25
+ >>> InvigilatorBase.example().answer_question()
26
+ {'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
27
+
28
+ >>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
29
+ 'Failed to get response'
30
+
31
+ This returns an empty prompt because there is no memory the agent needs to have at q0.
32
+
33
+
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ agent: "Agent",
39
+ question: QuestionBase,
40
+ scenario: Scenario,
41
+ model: LanguageModel,
42
+ memory_plan: MemoryPlan,
43
+ current_answers: dict,
44
+ survey: Optional["Survey"],
45
+ cache: Optional[Cache] = None,
46
+ iteration: Optional[int] = 1,
47
+ additional_prompt_data: Optional[dict] = None,
48
+ sidecar_model: Optional[LanguageModel] = None,
49
+ raise_validation_errors: Optional[bool] = True,
50
+ prompt_plan: Optional["PromptPlan"] = None,
51
+ ):
52
+ """Initialize a new Invigilator."""
53
+ self.agent = agent
54
+ self.question = question
55
+ self.scenario = scenario
56
+ self.model = model
57
+ self.memory_plan = memory_plan
58
+ self.current_answers = current_answers or {}
59
+ self.iteration = iteration
60
+ self.additional_prompt_data = additional_prompt_data
61
+ self.cache = cache
62
+ self.sidecar_model = sidecar_model
63
+ self.survey = survey
64
+ self.raise_validation_errors = raise_validation_errors
65
+ if prompt_plan is None:
66
+ self.prompt_plan = PromptPlan()
67
+ else:
68
+ self.prompt_plan = prompt_plan
69
+
70
+ self.raw_model_response = (
71
+ None # placeholder for the raw response from the model
72
+ )
73
+
74
+ @property
75
+ def prompt_constructor(self) -> PromptConstructor:
76
+ """Return the prompt constructor."""
77
+ return PromptConstructor(self, prompt_plan=self.prompt_plan)
78
+
79
+ def to_dict(self):
80
+ attributes = [
81
+ "agent",
82
+ "question",
83
+ "scenario",
84
+ "model",
85
+ "memory_plan",
86
+ "current_answers",
87
+ "iteration",
88
+ "additional_prompt_data",
89
+ "cache",
90
+ "sidecar_model",
91
+ "survey",
92
+ ]
93
+
94
+ def serialize_attribute(attr):
95
+ value = getattr(self, attr)
96
+ if value is None:
97
+ return None
98
+ if hasattr(value, "to_dict"):
99
+ return value.to_dict()
100
+ if isinstance(value, (int, float, str, bool, dict, list)):
101
+ return value
102
+ return str(value)
103
+
104
+ return {attr: serialize_attribute(attr) for attr in attributes}
105
+
106
+ @classmethod
107
+ def from_dict(cls, data):
108
+ from edsl.agents.Agent import Agent
109
+ from edsl.questions import QuestionBase
110
+ from edsl.scenarios.Scenario import Scenario
111
+ from edsl.surveys.MemoryPlan import MemoryPlan
112
+ from edsl.language_models.LanguageModel import LanguageModel
113
+ from edsl.surveys.Survey import Survey
114
+
115
+ agent = Agent.from_dict(data["agent"])
116
+ question = QuestionBase.from_dict(data["question"])
117
+ scenario = Scenario.from_dict(data["scenario"])
118
+ model = LanguageModel.from_dict(data["model"])
119
+ memory_plan = MemoryPlan.from_dict(data["memory_plan"])
120
+ survey = Survey.from_dict(data["survey"])
121
+ current_answers = data["current_answers"]
122
+ iteration = data["iteration"]
123
+ additional_prompt_data = data["additional_prompt_data"]
124
+ cache = Cache.from_dict(data["cache"])
125
+
126
+ if data["sidecar_model"] is None:
127
+ sidecar_model = None
128
+ else:
129
+ sidecar_model = LanguageModel.from_dict(data["sidecar_model"])
130
+
131
+ return cls(
132
+ agent=agent,
133
+ question=question,
134
+ scenario=scenario,
135
+ model=model,
136
+ memory_plan=memory_plan,
137
+ current_answers=current_answers,
138
+ survey=survey,
139
+ iteration=iteration,
140
+ additional_prompt_data=additional_prompt_data,
141
+ cache=cache,
142
+ sidecar_model=sidecar_model,
143
+ )
144
+
145
+ def __repr__(self) -> str:
146
+ """Return a string representation of the Invigilator.
147
+
148
+ >>> InvigilatorBase.example().__repr__()
149
+ 'InvigilatorExample(...)'
150
+
151
+ """
152
+ return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)}, sidecarmodel={repr(self.sidecar_model)})"
153
+
154
+ def get_failed_task_result(self, failure_reason) -> EDSLResultObjectInput:
155
+ """Return an AgentResponseDict used in case the question-asking fails.
156
+
157
+ Possible reasons include:
158
+ - Legimately skipped because of skip logic
159
+ - Failed to get response from the model
160
+
161
+ """
162
+ data = {
163
+ "answer": None,
164
+ "generated_tokens": None,
165
+ "comment": failure_reason,
166
+ "question_name": self.question.question_name,
167
+ "prompts": self.get_prompts(),
168
+ "cached_response": None,
169
+ "raw_model_response": None,
170
+ "cache_used": None,
171
+ "cache_key": None,
172
+ }
173
+ return EDSLResultObjectInput(**data)
174
+
175
+ # breakpoint()
176
+ # if hasattr(self, "augmented_model_response"):
177
+ # import json
178
+
179
+ # generated_tokens = json.loads(self.augmented_model_response)["answer"][
180
+ # "generated_tokens"
181
+ # ]
182
+ # else:
183
+ # generated_tokens = "Filled in by InvigilatorBase.get_failed_task_result"
184
+ # agent_response_dict = AgentResponseDict(
185
+ # answer=None,
186
+ # comment="Failed to get usable response",
187
+ # generated_tokens=generated_tokens,
188
+ # question_name=self.question.question_name,
189
+ # prompts=self.get_prompts(),
190
+ # )
191
+ # # breakpoint()
192
+ # return agent_response_dict
193
+
194
+ def get_prompts(self) -> Dict[str, Prompt]:
195
+ """Return the prompt used."""
196
+
197
+ return {
198
+ "user_prompt": Prompt("NA"),
199
+ "system_prompt": Prompt("NA"),
200
+ }
201
+
202
+ @abstractmethod
203
+ async def async_answer_question(self):
204
+ """Asnwer a question."""
205
+ pass
206
+
207
+ @jupyter_nb_handler
208
+ def answer_question(self) -> Coroutine:
209
+ """Return a function that gets the answers to the question."""
210
+
211
+ async def main():
212
+ """Return the answer to the question."""
213
+ results = await asyncio.gather(self.async_answer_question())
214
+ return results[0] # Since there's only one task, return its result
215
+
216
+ return main()
217
+
218
+ @classmethod
219
+ def example(
220
+ cls, throw_an_exception=False, question=None, scenario=None, survey=None
221
+ ) -> "InvigilatorBase":
222
+ """Return an example invigilator.
223
+
224
+ >>> InvigilatorBase.example()
225
+ InvigilatorExample(...)
226
+
227
+ """
228
+ from edsl.agents.Agent import Agent
229
+ from edsl.questions import QuestionMultipleChoice
230
+ from edsl.scenarios.Scenario import Scenario
231
+ from edsl.language_models import LanguageModel
232
+ from edsl.surveys.MemoryPlan import MemoryPlan
233
+
234
+ from edsl.enums import InferenceServiceType
235
+
236
+ from edsl import Model
237
+
238
+ model = Model("test", canned_response="SPAM!")
239
+
240
+ if throw_an_exception:
241
+ model.throw_an_exception = True
242
+ agent = Agent.example()
243
+ # question = QuestionMultipleChoice.example()
244
+ from edsl.surveys import Survey
245
+
246
+ if not survey:
247
+ survey = Survey.example()
248
+
249
+ if question not in survey.questions and question is not None:
250
+ survey.add_question(question)
251
+
252
+ question = question or survey.questions[0]
253
+ scenario = scenario or Scenario.example()
254
+ # memory_plan = None #memory_plan = MemoryPlan()
255
+ from edsl import Survey
256
+
257
+ memory_plan = MemoryPlan(survey=survey)
258
+ current_answers = None
259
+ from edsl.agents.PromptConstructor import PromptConstructor
260
+
261
+ class InvigilatorExample(InvigilatorBase):
262
+ """An example invigilator."""
263
+
264
+ async def async_answer_question(self):
265
+ """Answer a question."""
266
+ return await self.model.async_execute_model_call(
267
+ user_prompt="Hello", system_prompt="Hi"
268
+ )
269
+
270
+ return InvigilatorExample(
271
+ agent=agent,
272
+ question=question,
273
+ scenario=scenario,
274
+ survey=survey,
275
+ model=model,
276
+ memory_plan=memory_plan,
277
+ current_answers=current_answers,
278
+ )
279
+
280
+
281
+ if __name__ == "__main__":
282
+ import doctest
283
+
284
+ doctest.testmod(optionflags=doctest.ELLIPSIS)