edsl 0.1.39.dev1__py3-none-any.whl → 0.1.39.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (256) hide show
  1. edsl/Base.py +332 -332
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +867 -867
  7. edsl/agents/AgentList.py +413 -413
  8. edsl/agents/Invigilator.py +233 -233
  9. edsl/agents/InvigilatorBase.py +270 -265
  10. edsl/agents/PromptConstructor.py +354 -354
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +157 -157
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +1028 -1028
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +555 -555
  37. edsl/data/CacheEntry.py +233 -233
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +78 -78
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +175 -175
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -42
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +148 -148
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/PerplexityService.py +163 -163
  72. edsl/inference_services/TestService.py +89 -89
  73. edsl/inference_services/TogetherAIService.py +170 -170
  74. edsl/inference_services/models_available_cache.py +118 -118
  75. edsl/inference_services/rate_limits_cache.py +25 -25
  76. edsl/inference_services/registry.py +41 -41
  77. edsl/inference_services/write_available.py +10 -10
  78. edsl/jobs/Answers.py +56 -56
  79. edsl/jobs/Jobs.py +898 -898
  80. edsl/jobs/JobsChecks.py +147 -147
  81. edsl/jobs/JobsPrompts.py +268 -268
  82. edsl/jobs/JobsRemoteInferenceHandler.py +239 -239
  83. edsl/jobs/__init__.py +1 -1
  84. edsl/jobs/buckets/BucketCollection.py +63 -63
  85. edsl/jobs/buckets/ModelBuckets.py +65 -65
  86. edsl/jobs/buckets/TokenBucket.py +251 -251
  87. edsl/jobs/interviews/Interview.py +661 -661
  88. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  89. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  90. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  91. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  92. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  93. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  94. edsl/jobs/interviews/ReportErrors.py +66 -66
  95. edsl/jobs/interviews/interview_status_enum.py +9 -9
  96. edsl/jobs/runners/JobsRunnerAsyncio.py +466 -466
  97. edsl/jobs/runners/JobsRunnerStatus.py +330 -330
  98. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  99. edsl/jobs/tasks/TaskCreators.py +64 -64
  100. edsl/jobs/tasks/TaskHistory.py +450 -450
  101. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  102. edsl/jobs/tasks/task_status_enum.py +163 -163
  103. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  104. edsl/jobs/tokens/TokenUsage.py +34 -34
  105. edsl/language_models/KeyLookup.py +30 -30
  106. edsl/language_models/LanguageModel.py +668 -668
  107. edsl/language_models/ModelList.py +155 -155
  108. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  109. edsl/language_models/__init__.py +3 -3
  110. edsl/language_models/fake_openai_call.py +15 -15
  111. edsl/language_models/fake_openai_service.py +61 -61
  112. edsl/language_models/registry.py +190 -190
  113. edsl/language_models/repair.py +156 -156
  114. edsl/language_models/unused/ReplicateBase.py +83 -83
  115. edsl/language_models/utilities.py +64 -64
  116. edsl/notebooks/Notebook.py +258 -258
  117. edsl/notebooks/__init__.py +1 -1
  118. edsl/prompts/Prompt.py +362 -362
  119. edsl/prompts/__init__.py +2 -2
  120. edsl/questions/AnswerValidatorMixin.py +289 -289
  121. edsl/questions/QuestionBase.py +664 -664
  122. edsl/questions/QuestionBaseGenMixin.py +161 -161
  123. edsl/questions/QuestionBasePromptsMixin.py +217 -217
  124. edsl/questions/QuestionBudget.py +227 -227
  125. edsl/questions/QuestionCheckBox.py +359 -359
  126. edsl/questions/QuestionExtract.py +182 -182
  127. edsl/questions/QuestionFreeText.py +114 -114
  128. edsl/questions/QuestionFunctional.py +166 -166
  129. edsl/questions/QuestionList.py +231 -231
  130. edsl/questions/QuestionMultipleChoice.py +286 -286
  131. edsl/questions/QuestionNumerical.py +153 -153
  132. edsl/questions/QuestionRank.py +324 -324
  133. edsl/questions/Quick.py +41 -41
  134. edsl/questions/RegisterQuestionsMeta.py +71 -71
  135. edsl/questions/ResponseValidatorABC.py +174 -174
  136. edsl/questions/SimpleAskMixin.py +73 -73
  137. edsl/questions/__init__.py +26 -26
  138. edsl/questions/compose_questions.py +98 -98
  139. edsl/questions/decorators.py +21 -21
  140. edsl/questions/derived/QuestionLikertFive.py +76 -76
  141. edsl/questions/derived/QuestionLinearScale.py +87 -87
  142. edsl/questions/derived/QuestionTopK.py +93 -93
  143. edsl/questions/derived/QuestionYesNo.py +82 -82
  144. edsl/questions/descriptors.py +413 -413
  145. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  146. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  147. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  148. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  149. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  150. edsl/questions/prompt_templates/question_list.jinja +17 -17
  151. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  152. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  153. edsl/questions/question_registry.py +177 -177
  154. edsl/questions/settings.py +12 -12
  155. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  157. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  158. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  159. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  160. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  161. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  162. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  163. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  164. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  165. edsl/questions/templates/list/question_presentation.jinja +5 -5
  166. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  167. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  168. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  169. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  170. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  171. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  172. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  173. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  174. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  176. edsl/results/CSSParameterizer.py +108 -108
  177. edsl/results/Dataset.py +424 -424
  178. edsl/results/DatasetExportMixin.py +731 -731
  179. edsl/results/DatasetTree.py +275 -275
  180. edsl/results/Result.py +465 -465
  181. edsl/results/Results.py +1165 -1165
  182. edsl/results/ResultsDBMixin.py +238 -238
  183. edsl/results/ResultsExportMixin.py +43 -43
  184. edsl/results/ResultsFetchMixin.py +33 -33
  185. edsl/results/ResultsGGMixin.py +121 -121
  186. edsl/results/ResultsToolsMixin.py +98 -98
  187. edsl/results/Selector.py +135 -135
  188. edsl/results/TableDisplay.py +198 -198
  189. edsl/results/__init__.py +2 -2
  190. edsl/results/table_display.css +77 -77
  191. edsl/results/tree_explore.py +115 -115
  192. edsl/scenarios/FileStore.py +632 -632
  193. edsl/scenarios/Scenario.py +601 -601
  194. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  195. edsl/scenarios/ScenarioJoin.py +127 -127
  196. edsl/scenarios/ScenarioList.py +1287 -1287
  197. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  198. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  199. edsl/scenarios/__init__.py +4 -4
  200. edsl/shared.py +1 -1
  201. edsl/study/ObjectEntry.py +173 -173
  202. edsl/study/ProofOfWork.py +113 -113
  203. edsl/study/SnapShot.py +80 -80
  204. edsl/study/Study.py +528 -528
  205. edsl/study/__init__.py +4 -4
  206. edsl/surveys/DAG.py +148 -148
  207. edsl/surveys/Memory.py +31 -31
  208. edsl/surveys/MemoryPlan.py +244 -244
  209. edsl/surveys/Rule.py +326 -326
  210. edsl/surveys/RuleCollection.py +387 -387
  211. edsl/surveys/Survey.py +1801 -1801
  212. edsl/surveys/SurveyCSS.py +261 -261
  213. edsl/surveys/SurveyExportMixin.py +259 -259
  214. edsl/surveys/SurveyFlowVisualizationMixin.py +179 -179
  215. edsl/surveys/SurveyQualtricsImport.py +284 -284
  216. edsl/surveys/__init__.py +3 -3
  217. edsl/surveys/base.py +53 -53
  218. edsl/surveys/descriptors.py +56 -56
  219. edsl/surveys/instructions/ChangeInstruction.py +49 -49
  220. edsl/surveys/instructions/Instruction.py +65 -65
  221. edsl/surveys/instructions/InstructionCollection.py +77 -77
  222. edsl/templates/error_reporting/base.html +23 -23
  223. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  224. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  225. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  226. edsl/templates/error_reporting/interview_details.html +115 -115
  227. edsl/templates/error_reporting/interviews.html +19 -19
  228. edsl/templates/error_reporting/overview.html +4 -4
  229. edsl/templates/error_reporting/performance_plot.html +1 -1
  230. edsl/templates/error_reporting/report.css +73 -73
  231. edsl/templates/error_reporting/report.html +117 -117
  232. edsl/templates/error_reporting/report.js +25 -25
  233. edsl/tools/__init__.py +1 -1
  234. edsl/tools/clusters.py +192 -192
  235. edsl/tools/embeddings.py +27 -27
  236. edsl/tools/embeddings_plotting.py +118 -118
  237. edsl/tools/plotting.py +112 -112
  238. edsl/tools/summarize.py +18 -18
  239. edsl/utilities/SystemInfo.py +28 -28
  240. edsl/utilities/__init__.py +22 -22
  241. edsl/utilities/ast_utilities.py +25 -25
  242. edsl/utilities/data/Registry.py +6 -6
  243. edsl/utilities/data/__init__.py +1 -1
  244. edsl/utilities/data/scooter_results.json +1 -1
  245. edsl/utilities/decorators.py +77 -77
  246. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  247. edsl/utilities/interface.py +627 -627
  248. edsl/utilities/naming_utilities.py +263 -263
  249. edsl/utilities/repair_functions.py +28 -28
  250. edsl/utilities/restricted_python.py +70 -70
  251. edsl/utilities/utilities.py +424 -424
  252. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/LICENSE +21 -21
  253. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/METADATA +1 -1
  254. edsl-0.1.39.dev3.dist-info/RECORD +277 -0
  255. edsl-0.1.39.dev1.dist-info/RECORD +0 -277
  256. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/WHEEL +0 -0
@@ -1,265 +1,270 @@
1
- from abc import ABC, abstractmethod
2
- import asyncio
3
- from typing import Coroutine, Dict, Any, Optional
4
-
5
- from edsl.prompts.Prompt import Prompt
6
- from edsl.utilities.decorators import jupyter_nb_handler
7
- from edsl.data_transfer_models import AgentResponseDict
8
-
9
- from edsl.data.Cache import Cache
10
-
11
- from edsl.questions.QuestionBase import QuestionBase
12
- from edsl.scenarios.Scenario import Scenario
13
- from edsl.surveys.MemoryPlan import MemoryPlan
14
- from edsl.language_models.LanguageModel import LanguageModel
15
-
16
- from edsl.data_transfer_models import EDSLResultObjectInput
17
- from edsl.agents.PromptConstructor import PromptConstructor
18
-
19
- from edsl.agents.prompt_helpers import PromptPlan
20
-
21
-
22
- class InvigilatorBase(ABC):
23
- """An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
24
-
25
- >>> InvigilatorBase.example().answer_question()
26
- {'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
27
-
28
- >>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
29
- 'Failed to get response'
30
-
31
- This returns an empty prompt because there is no memory the agent needs to have at q0.
32
-
33
-
34
- """
35
-
36
- def __init__(
37
- self,
38
- agent: "Agent",
39
- question: QuestionBase,
40
- scenario: Scenario,
41
- model: LanguageModel,
42
- memory_plan: MemoryPlan,
43
- current_answers: dict,
44
- survey: Optional["Survey"],
45
- cache: Optional[Cache] = None,
46
- iteration: Optional[int] = 1,
47
- additional_prompt_data: Optional[dict] = None,
48
- sidecar_model: Optional[LanguageModel] = None,
49
- raise_validation_errors: Optional[bool] = True,
50
- prompt_plan: Optional["PromptPlan"] = None,
51
- ):
52
- """Initialize a new Invigilator."""
53
- self.agent = agent
54
- self.question = question
55
- self.scenario = scenario
56
- self.model = model
57
- self.memory_plan = memory_plan
58
- self.current_answers = current_answers or {}
59
- self.iteration = iteration
60
- self.additional_prompt_data = additional_prompt_data
61
- self.cache = cache
62
- self.sidecar_model = sidecar_model
63
- self.survey = survey
64
- self.raise_validation_errors = raise_validation_errors
65
- if prompt_plan is None:
66
- self.prompt_plan = PromptPlan()
67
- else:
68
- self.prompt_plan = prompt_plan
69
-
70
- self.raw_model_response = (
71
- None # placeholder for the raw response from the model
72
- )
73
-
74
- @property
75
- def prompt_constructor(self) -> PromptConstructor:
76
- """Return the prompt constructor."""
77
- return PromptConstructor(self, prompt_plan=self.prompt_plan)
78
-
79
- def to_dict(self):
80
- attributes = [
81
- "agent",
82
- "question",
83
- "scenario",
84
- "model",
85
- "memory_plan",
86
- "current_answers",
87
- "iteration",
88
- "additional_prompt_data",
89
- "cache",
90
- "sidecar_model",
91
- "survey",
92
- ]
93
-
94
- def serialize_attribute(attr):
95
- value = getattr(self, attr)
96
- if value is None:
97
- return None
98
- if hasattr(value, "to_dict"):
99
- return value.to_dict()
100
- if isinstance(value, (int, float, str, bool, dict, list)):
101
- return value
102
- return str(value)
103
-
104
- return {attr: serialize_attribute(attr) for attr in attributes}
105
-
106
- @classmethod
107
- def from_dict(cls, data):
108
- from edsl.agents.Agent import Agent
109
- from edsl.questions import QuestionBase
110
- from edsl.scenarios.Scenario import Scenario
111
- from edsl.surveys.MemoryPlan import MemoryPlan
112
- from edsl.language_models.LanguageModel import LanguageModel
113
- from edsl.surveys.Survey import Survey
114
-
115
- agent = Agent.from_dict(data["agent"])
116
- question = QuestionBase.from_dict(data["question"])
117
- scenario = Scenario.from_dict(data["scenario"])
118
- model = LanguageModel.from_dict(data["model"])
119
- memory_plan = MemoryPlan.from_dict(data["memory_plan"])
120
- survey = Survey.from_dict(data["survey"])
121
- current_answers = data["current_answers"]
122
- iteration = data["iteration"]
123
- additional_prompt_data = data["additional_prompt_data"]
124
- cache = Cache.from_dict(data["cache"])
125
-
126
- if data["sidecar_model"] is None:
127
- sidecar_model = None
128
- else:
129
- sidecar_model = LanguageModel.from_dict(data["sidecar_model"])
130
-
131
- return cls(
132
- agent=agent,
133
- question=question,
134
- scenario=scenario,
135
- model=model,
136
- memory_plan=memory_plan,
137
- current_answers=current_answers,
138
- survey=survey,
139
- iteration=iteration,
140
- additional_prompt_data=additional_prompt_data,
141
- cache=cache,
142
- sidecar_model=sidecar_model,
143
- )
144
-
145
- def __repr__(self) -> str:
146
- """Return a string representation of the Invigilator.
147
-
148
- >>> InvigilatorBase.example().__repr__()
149
- 'InvigilatorExample(...)'
150
-
151
- """
152
- return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)}, sidecarmodel={repr(self.sidecar_model)})"
153
-
154
- def get_failed_task_result(self, failure_reason) -> EDSLResultObjectInput:
155
- """Return an AgentResponseDict used in case the question-asking fails.
156
-
157
- Possible reasons include:
158
- - Legimately skipped because of skip logic
159
- - Failed to get response from the model
160
-
161
- """
162
- data = {
163
- "answer": None,
164
- "generated_tokens": None,
165
- "comment": failure_reason,
166
- "question_name": self.question.question_name,
167
- "prompts": self.get_prompts(),
168
- "cached_response": None,
169
- "raw_model_response": None,
170
- "cache_used": None,
171
- "cache_key": None,
172
- }
173
- return EDSLResultObjectInput(**data)
174
-
175
- def get_prompts(self) -> Dict[str, Prompt]:
176
- """Return the prompt used."""
177
-
178
- return {
179
- "user_prompt": Prompt("NA"),
180
- "system_prompt": Prompt("NA"),
181
- }
182
-
183
- @abstractmethod
184
- async def async_answer_question(self):
185
- """Asnwer a question."""
186
- pass
187
-
188
- @jupyter_nb_handler
189
- def answer_question(self) -> Coroutine:
190
- """Return a function that gets the answers to the question."""
191
-
192
- async def main():
193
- """Return the answer to the question."""
194
- results = await asyncio.gather(self.async_answer_question())
195
- return results[0] # Since there's only one task, return its result
196
-
197
- return main()
198
-
199
- @classmethod
200
- def example(
201
- cls, throw_an_exception=False, question=None, scenario=None, survey=None
202
- ) -> "InvigilatorBase":
203
- """Return an example invigilator.
204
-
205
- >>> InvigilatorBase.example()
206
- InvigilatorExample(...)
207
-
208
- """
209
- from edsl.agents.Agent import Agent
210
- from edsl.questions import QuestionMultipleChoice
211
- from edsl.scenarios.Scenario import Scenario
212
- from edsl.language_models import LanguageModel
213
- from edsl.surveys.MemoryPlan import MemoryPlan
214
-
215
- from edsl.enums import InferenceServiceType
216
-
217
- from edsl import Model
218
-
219
- model = Model("test", canned_response="SPAM!")
220
-
221
- if throw_an_exception:
222
- model.throw_an_exception = True
223
- agent = Agent.example()
224
- # question = QuestionMultipleChoice.example()
225
- from edsl.surveys import Survey
226
-
227
- if not survey:
228
- survey = Survey.example()
229
-
230
- if question not in survey.questions and question is not None:
231
- survey.add_question(question)
232
-
233
- question = question or survey.questions[0]
234
- scenario = scenario or Scenario.example()
235
- # memory_plan = None #memory_plan = MemoryPlan()
236
- from edsl import Survey
237
-
238
- memory_plan = MemoryPlan(survey=survey)
239
- current_answers = None
240
- from edsl.agents.PromptConstructor import PromptConstructor
241
-
242
- class InvigilatorExample(InvigilatorBase):
243
- """An example invigilator."""
244
-
245
- async def async_answer_question(self):
246
- """Answer a question."""
247
- return await self.model.async_execute_model_call(
248
- user_prompt="Hello", system_prompt="Hi"
249
- )
250
-
251
- return InvigilatorExample(
252
- agent=agent,
253
- question=question,
254
- scenario=scenario,
255
- survey=survey,
256
- model=model,
257
- memory_plan=memory_plan,
258
- current_answers=current_answers,
259
- )
260
-
261
-
262
- if __name__ == "__main__":
263
- import doctest
264
-
265
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from abc import ABC, abstractmethod
2
+ import asyncio
3
+ from typing import Coroutine, Dict, Any, Optional
4
+
5
+ from edsl.prompts.Prompt import Prompt
6
+ from edsl.utilities.decorators import jupyter_nb_handler
7
+ from edsl.data_transfer_models import AgentResponseDict
8
+
9
+ from edsl.data.Cache import Cache
10
+
11
+ from edsl.questions.QuestionBase import QuestionBase
12
+ from edsl.scenarios.Scenario import Scenario
13
+ from edsl.surveys.MemoryPlan import MemoryPlan
14
+ from edsl.language_models.LanguageModel import LanguageModel
15
+
16
+ from edsl.data_transfer_models import EDSLResultObjectInput
17
+ from edsl.agents.PromptConstructor import PromptConstructor
18
+
19
+ from edsl.agents.prompt_helpers import PromptPlan
20
+
21
+
22
+ class InvigilatorBase(ABC):
23
+ """An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
24
+
25
+ >>> InvigilatorBase.example().answer_question()
26
+ {'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
27
+
28
+ >>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
29
+ 'Failed to get response'
30
+
31
+ This returns an empty prompt because there is no memory the agent needs to have at q0.
32
+
33
+
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ agent: "Agent",
39
+ question: QuestionBase,
40
+ scenario: Scenario,
41
+ model: LanguageModel,
42
+ memory_plan: MemoryPlan,
43
+ current_answers: dict,
44
+ survey: Optional["Survey"],
45
+ cache: Optional[Cache] = None,
46
+ iteration: Optional[int] = 1,
47
+ additional_prompt_data: Optional[dict] = None,
48
+ sidecar_model: Optional[LanguageModel] = None,
49
+ raise_validation_errors: Optional[bool] = True,
50
+ prompt_plan: Optional["PromptPlan"] = None,
51
+ ):
52
+ """Initialize a new Invigilator."""
53
+ self.agent = agent
54
+ self.question = question
55
+ self.scenario = scenario
56
+ self.model = model
57
+ self.memory_plan = memory_plan
58
+ self.current_answers = current_answers or {}
59
+ self.iteration = iteration
60
+ self.additional_prompt_data = additional_prompt_data
61
+ self.cache = cache
62
+ self.sidecar_model = sidecar_model
63
+ self.survey = survey
64
+ self.raise_validation_errors = raise_validation_errors
65
+ if prompt_plan is None:
66
+ self.prompt_plan = PromptPlan()
67
+ else:
68
+ self.prompt_plan = prompt_plan
69
+
70
+ self.raw_model_response = (
71
+ None # placeholder for the raw response from the model
72
+ )
73
+
74
+ @property
75
+ def prompt_constructor(self) -> PromptConstructor:
76
+ """Return the prompt constructor."""
77
+ return PromptConstructor(self, prompt_plan=self.prompt_plan)
78
+
79
+ def to_dict(self, include_cache=False):
80
+ attributes = [
81
+ "agent",
82
+ "question",
83
+ "scenario",
84
+ "model",
85
+ "memory_plan",
86
+ "current_answers",
87
+ "iteration",
88
+ "additional_prompt_data",
89
+ "sidecar_model",
90
+ "survey",
91
+ ]
92
+ if include_cache:
93
+ attributes.append("cache")
94
+
95
+ def serialize_attribute(attr):
96
+ value = getattr(self, attr)
97
+ if value is None:
98
+ return None
99
+ if hasattr(value, "to_dict"):
100
+ return value.to_dict()
101
+ if isinstance(value, (int, float, str, bool, dict, list)):
102
+ return value
103
+ return str(value)
104
+
105
+ return {attr: serialize_attribute(attr) for attr in attributes}
106
+
107
+ @classmethod
108
+ def from_dict(cls, data):
109
+ from edsl.agents.Agent import Agent
110
+ from edsl.questions import QuestionBase
111
+ from edsl.scenarios.Scenario import Scenario
112
+ from edsl.surveys.MemoryPlan import MemoryPlan
113
+ from edsl.language_models.LanguageModel import LanguageModel
114
+ from edsl.surveys.Survey import Survey
115
+ from edsl.data.Cache import Cache
116
+
117
+ agent = Agent.from_dict(data["agent"])
118
+ question = QuestionBase.from_dict(data["question"])
119
+ scenario = Scenario.from_dict(data["scenario"])
120
+ model = LanguageModel.from_dict(data["model"])
121
+ memory_plan = MemoryPlan.from_dict(data["memory_plan"])
122
+ survey = Survey.from_dict(data["survey"])
123
+ current_answers = data["current_answers"]
124
+ iteration = data["iteration"]
125
+ additional_prompt_data = data["additional_prompt_data"]
126
+ if "cache" not in data:
127
+ cache = {}
128
+ else:
129
+ cache = Cache.from_dict(data["cache"])
130
+
131
+ if data["sidecar_model"] is None:
132
+ sidecar_model = None
133
+ else:
134
+ sidecar_model = LanguageModel.from_dict(data["sidecar_model"])
135
+
136
+ return cls(
137
+ agent=agent,
138
+ question=question,
139
+ scenario=scenario,
140
+ model=model,
141
+ memory_plan=memory_plan,
142
+ current_answers=current_answers,
143
+ survey=survey,
144
+ iteration=iteration,
145
+ additional_prompt_data=additional_prompt_data,
146
+ cache=cache,
147
+ sidecar_model=sidecar_model,
148
+ )
149
+
150
+ def __repr__(self) -> str:
151
+ """Return a string representation of the Invigilator.
152
+
153
+ >>> InvigilatorBase.example().__repr__()
154
+ 'InvigilatorExample(...)'
155
+
156
+ """
157
+ return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)}, sidecarmodel={repr(self.sidecar_model)})"
158
+
159
+ def get_failed_task_result(self, failure_reason) -> EDSLResultObjectInput:
160
+ """Return an AgentResponseDict used in case the question-asking fails.
161
+
162
+ Possible reasons include:
163
+ - Legimately skipped because of skip logic
164
+ - Failed to get response from the model
165
+
166
+ """
167
+ data = {
168
+ "answer": None,
169
+ "generated_tokens": None,
170
+ "comment": failure_reason,
171
+ "question_name": self.question.question_name,
172
+ "prompts": self.get_prompts(),
173
+ "cached_response": None,
174
+ "raw_model_response": None,
175
+ "cache_used": None,
176
+ "cache_key": None,
177
+ }
178
+ return EDSLResultObjectInput(**data)
179
+
180
+ def get_prompts(self) -> Dict[str, Prompt]:
181
+ """Return the prompt used."""
182
+
183
+ return {
184
+ "user_prompt": Prompt("NA"),
185
+ "system_prompt": Prompt("NA"),
186
+ }
187
+
188
+ @abstractmethod
189
+ async def async_answer_question(self):
190
+ """Asnwer a question."""
191
+ pass
192
+
193
+ @jupyter_nb_handler
194
+ def answer_question(self) -> Coroutine:
195
+ """Return a function that gets the answers to the question."""
196
+
197
+ async def main():
198
+ """Return the answer to the question."""
199
+ results = await asyncio.gather(self.async_answer_question())
200
+ return results[0] # Since there's only one task, return its result
201
+
202
+ return main()
203
+
204
+ @classmethod
205
+ def example(
206
+ cls, throw_an_exception=False, question=None, scenario=None, survey=None
207
+ ) -> "InvigilatorBase":
208
+ """Return an example invigilator.
209
+
210
+ >>> InvigilatorBase.example()
211
+ InvigilatorExample(...)
212
+
213
+ """
214
+ from edsl.agents.Agent import Agent
215
+ from edsl.questions import QuestionMultipleChoice
216
+ from edsl.scenarios.Scenario import Scenario
217
+ from edsl.language_models import LanguageModel
218
+ from edsl.surveys.MemoryPlan import MemoryPlan
219
+
220
+ from edsl.enums import InferenceServiceType
221
+
222
+ from edsl import Model
223
+
224
+ model = Model("test", canned_response="SPAM!")
225
+
226
+ if throw_an_exception:
227
+ model.throw_an_exception = True
228
+ agent = Agent.example()
229
+ # question = QuestionMultipleChoice.example()
230
+ from edsl.surveys import Survey
231
+
232
+ if not survey:
233
+ survey = Survey.example()
234
+
235
+ if question not in survey.questions and question is not None:
236
+ survey.add_question(question)
237
+
238
+ question = question or survey.questions[0]
239
+ scenario = scenario or Scenario.example()
240
+ # memory_plan = None #memory_plan = MemoryPlan()
241
+ from edsl import Survey
242
+
243
+ memory_plan = MemoryPlan(survey=survey)
244
+ current_answers = None
245
+ from edsl.agents.PromptConstructor import PromptConstructor
246
+
247
+ class InvigilatorExample(InvigilatorBase):
248
+ """An example invigilator."""
249
+
250
+ async def async_answer_question(self):
251
+ """Answer a question."""
252
+ return await self.model.async_execute_model_call(
253
+ user_prompt="Hello", system_prompt="Hi"
254
+ )
255
+
256
+ return InvigilatorExample(
257
+ agent=agent,
258
+ question=question,
259
+ scenario=scenario,
260
+ survey=survey,
261
+ model=model,
262
+ memory_plan=memory_plan,
263
+ current_answers=current_answers,
264
+ )
265
+
266
+
267
+ if __name__ == "__main__":
268
+ import doctest
269
+
270
+ doctest.testmod(optionflags=doctest.ELLIPSIS)