edsl 0.1.37.dev3__py3-none-any.whl → 0.1.37.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +48 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +855 -804
  7. edsl/agents/AgentList.py +350 -345
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -305
  10. edsl/agents/PromptConstructor.py +353 -312
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -86
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +160 -152
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +290 -238
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/chips.py +95 -0
  45. edsl/conversation/mug_negotiation.py +81 -81
  46. edsl/conversation/next_speaker_utilities.py +93 -93
  47. edsl/coop/PriceFetcher.py +54 -54
  48. edsl/coop/__init__.py +2 -2
  49. edsl/coop/coop.py +958 -824
  50. edsl/coop/utils.py +131 -131
  51. edsl/data/Cache.py +527 -527
  52. edsl/data/CacheEntry.py +228 -228
  53. edsl/data/CacheHandler.py +149 -149
  54. edsl/data/RemoteCacheSync.py +97 -97
  55. edsl/data/SQLiteDict.py +292 -292
  56. edsl/data/__init__.py +4 -4
  57. edsl/data/orm.py +10 -10
  58. edsl/data_transfer_models.py +73 -73
  59. edsl/enums.py +173 -173
  60. edsl/exceptions/BaseException.py +21 -0
  61. edsl/exceptions/__init__.py +54 -50
  62. edsl/exceptions/agents.py +38 -40
  63. edsl/exceptions/configuration.py +16 -16
  64. edsl/exceptions/coop.py +10 -10
  65. edsl/exceptions/data.py +14 -14
  66. edsl/exceptions/general.py +34 -34
  67. edsl/exceptions/jobs.py +33 -33
  68. edsl/exceptions/language_models.py +63 -63
  69. edsl/exceptions/prompts.py +15 -15
  70. edsl/exceptions/questions.py +91 -91
  71. edsl/exceptions/results.py +29 -26
  72. edsl/exceptions/scenarios.py +22 -0
  73. edsl/exceptions/surveys.py +37 -34
  74. edsl/inference_services/AnthropicService.py +87 -87
  75. edsl/inference_services/AwsBedrock.py +120 -115
  76. edsl/inference_services/AzureAI.py +217 -217
  77. edsl/inference_services/DeepInfraService.py +18 -18
  78. edsl/inference_services/GoogleService.py +156 -156
  79. edsl/inference_services/GroqService.py +20 -20
  80. edsl/inference_services/InferenceServiceABC.py +147 -147
  81. edsl/inference_services/InferenceServicesCollection.py +97 -74
  82. edsl/inference_services/MistralAIService.py +123 -123
  83. edsl/inference_services/OllamaService.py +18 -18
  84. edsl/inference_services/OpenAIService.py +224 -224
  85. edsl/inference_services/TestService.py +89 -89
  86. edsl/inference_services/TogetherAIService.py +170 -170
  87. edsl/inference_services/models_available_cache.py +118 -118
  88. edsl/inference_services/rate_limits_cache.py +25 -25
  89. edsl/inference_services/registry.py +39 -39
  90. edsl/inference_services/write_available.py +10 -10
  91. edsl/jobs/Answers.py +56 -56
  92. edsl/jobs/Jobs.py +1347 -1121
  93. edsl/jobs/__init__.py +1 -1
  94. edsl/jobs/buckets/BucketCollection.py +63 -63
  95. edsl/jobs/buckets/ModelBuckets.py +65 -65
  96. edsl/jobs/buckets/TokenBucket.py +248 -248
  97. edsl/jobs/interviews/Interview.py +661 -661
  98. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  99. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -182
  100. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  101. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  102. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  103. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  104. edsl/jobs/interviews/ReportErrors.py +66 -66
  105. edsl/jobs/interviews/interview_status_enum.py +9 -9
  106. edsl/jobs/runners/JobsRunnerAsyncio.py +338 -338
  107. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  108. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  109. edsl/jobs/tasks/TaskCreators.py +64 -64
  110. edsl/jobs/tasks/TaskHistory.py +442 -441
  111. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  112. edsl/jobs/tasks/task_status_enum.py +163 -163
  113. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  114. edsl/jobs/tokens/TokenUsage.py +34 -34
  115. edsl/language_models/KeyLookup.py +30 -0
  116. edsl/language_models/LanguageModel.py +706 -718
  117. edsl/language_models/ModelList.py +102 -102
  118. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  119. edsl/language_models/__init__.py +3 -2
  120. edsl/language_models/fake_openai_call.py +15 -15
  121. edsl/language_models/fake_openai_service.py +61 -61
  122. edsl/language_models/registry.py +137 -137
  123. edsl/language_models/repair.py +156 -156
  124. edsl/language_models/unused/ReplicateBase.py +83 -83
  125. edsl/language_models/utilities.py +64 -64
  126. edsl/notebooks/Notebook.py +259 -259
  127. edsl/notebooks/__init__.py +1 -1
  128. edsl/prompts/Prompt.py +357 -353
  129. edsl/prompts/__init__.py +2 -2
  130. edsl/questions/AnswerValidatorMixin.py +289 -289
  131. edsl/questions/QuestionBase.py +656 -616
  132. edsl/questions/QuestionBaseGenMixin.py +161 -161
  133. edsl/questions/QuestionBasePromptsMixin.py +234 -266
  134. edsl/questions/QuestionBudget.py +227 -227
  135. edsl/questions/QuestionCheckBox.py +359 -359
  136. edsl/questions/QuestionExtract.py +183 -183
  137. edsl/questions/QuestionFreeText.py +114 -114
  138. edsl/questions/QuestionFunctional.py +159 -159
  139. edsl/questions/QuestionList.py +231 -231
  140. edsl/questions/QuestionMultipleChoice.py +286 -286
  141. edsl/questions/QuestionNumerical.py +153 -153
  142. edsl/questions/QuestionRank.py +324 -324
  143. edsl/questions/Quick.py +41 -41
  144. edsl/questions/RegisterQuestionsMeta.py +71 -71
  145. edsl/questions/ResponseValidatorABC.py +174 -174
  146. edsl/questions/SimpleAskMixin.py +73 -73
  147. edsl/questions/__init__.py +26 -26
  148. edsl/questions/compose_questions.py +98 -98
  149. edsl/questions/decorators.py +21 -21
  150. edsl/questions/derived/QuestionLikertFive.py +76 -76
  151. edsl/questions/derived/QuestionLinearScale.py +87 -87
  152. edsl/questions/derived/QuestionTopK.py +91 -91
  153. edsl/questions/derived/QuestionYesNo.py +82 -82
  154. edsl/questions/descriptors.py +413 -418
  155. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  156. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  157. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  158. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  159. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  160. edsl/questions/prompt_templates/question_list.jinja +17 -17
  161. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  162. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  163. edsl/questions/question_registry.py +147 -147
  164. edsl/questions/settings.py +12 -12
  165. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  167. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  168. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  169. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  170. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  171. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  172. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  173. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  174. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  175. edsl/questions/templates/list/question_presentation.jinja +5 -5
  176. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  177. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  178. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  179. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  180. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  181. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  182. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  183. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  184. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  185. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  186. edsl/results/Dataset.py +293 -293
  187. edsl/results/DatasetExportMixin.py +717 -693
  188. edsl/results/DatasetTree.py +145 -145
  189. edsl/results/Result.py +450 -435
  190. edsl/results/Results.py +1071 -1160
  191. edsl/results/ResultsDBMixin.py +238 -238
  192. edsl/results/ResultsExportMixin.py +43 -43
  193. edsl/results/ResultsFetchMixin.py +33 -33
  194. edsl/results/ResultsGGMixin.py +121 -121
  195. edsl/results/ResultsToolsMixin.py +98 -98
  196. edsl/results/Selector.py +135 -118
  197. edsl/results/__init__.py +2 -2
  198. edsl/results/tree_explore.py +115 -115
  199. edsl/scenarios/FileStore.py +458 -458
  200. edsl/scenarios/Scenario.py +546 -510
  201. edsl/scenarios/ScenarioHtmlMixin.py +64 -59
  202. edsl/scenarios/ScenarioList.py +1112 -1101
  203. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  204. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  205. edsl/scenarios/__init__.py +4 -4
  206. edsl/shared.py +1 -1
  207. edsl/study/ObjectEntry.py +173 -173
  208. edsl/study/ProofOfWork.py +113 -113
  209. edsl/study/SnapShot.py +80 -80
  210. edsl/study/Study.py +528 -528
  211. edsl/study/__init__.py +4 -4
  212. edsl/surveys/DAG.py +148 -148
  213. edsl/surveys/Memory.py +31 -31
  214. edsl/surveys/MemoryPlan.py +244 -244
  215. edsl/surveys/Rule.py +330 -324
  216. edsl/surveys/RuleCollection.py +387 -387
  217. edsl/surveys/Survey.py +1795 -1772
  218. edsl/surveys/SurveyCSS.py +261 -261
  219. edsl/surveys/SurveyExportMixin.py +259 -259
  220. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  221. edsl/surveys/SurveyQualtricsImport.py +284 -284
  222. edsl/surveys/__init__.py +3 -3
  223. edsl/surveys/base.py +53 -53
  224. edsl/surveys/descriptors.py +56 -56
  225. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  226. edsl/surveys/instructions/Instruction.py +51 -51
  227. edsl/surveys/instructions/InstructionCollection.py +77 -77
  228. edsl/templates/error_reporting/base.html +23 -23
  229. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  230. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  231. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  232. edsl/templates/error_reporting/interview_details.html +115 -115
  233. edsl/templates/error_reporting/interviews.html +9 -9
  234. edsl/templates/error_reporting/overview.html +4 -4
  235. edsl/templates/error_reporting/performance_plot.html +1 -1
  236. edsl/templates/error_reporting/report.css +73 -73
  237. edsl/templates/error_reporting/report.html +117 -117
  238. edsl/templates/error_reporting/report.js +25 -25
  239. edsl/tools/__init__.py +1 -1
  240. edsl/tools/clusters.py +192 -192
  241. edsl/tools/embeddings.py +27 -27
  242. edsl/tools/embeddings_plotting.py +118 -118
  243. edsl/tools/plotting.py +112 -112
  244. edsl/tools/summarize.py +18 -18
  245. edsl/utilities/SystemInfo.py +28 -28
  246. edsl/utilities/__init__.py +22 -22
  247. edsl/utilities/ast_utilities.py +25 -25
  248. edsl/utilities/data/Registry.py +6 -6
  249. edsl/utilities/data/__init__.py +1 -1
  250. edsl/utilities/data/scooter_results.json +1 -1
  251. edsl/utilities/decorators.py +77 -77
  252. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  253. edsl/utilities/interface.py +627 -627
  254. edsl/utilities/repair_functions.py +28 -28
  255. edsl/utilities/restricted_python.py +70 -70
  256. edsl/utilities/utilities.py +409 -391
  257. {edsl-0.1.37.dev3.dist-info → edsl-0.1.37.dev5.dist-info}/LICENSE +21 -21
  258. {edsl-0.1.37.dev3.dist-info → edsl-0.1.37.dev5.dist-info}/METADATA +1 -1
  259. edsl-0.1.37.dev5.dist-info/RECORD +283 -0
  260. edsl-0.1.37.dev3.dist-info/RECORD +0 -279
  261. {edsl-0.1.37.dev3.dist-info → edsl-0.1.37.dev5.dist-info}/WHEEL +0 -0
@@ -1,305 +1,284 @@
1
- from abc import ABC, abstractmethod
2
- import asyncio
3
- from typing import Coroutine, Dict, Any, Optional
4
-
5
- from edsl.prompts.Prompt import Prompt
6
- from edsl.utilities.decorators import jupyter_nb_handler
7
- from edsl.data_transfer_models import AgentResponseDict
8
-
9
- from edsl.data.Cache import Cache
10
-
11
- from edsl.questions.QuestionBase import QuestionBase
12
- from edsl.scenarios.Scenario import Scenario
13
- from edsl.surveys.MemoryPlan import MemoryPlan
14
- from edsl.language_models.LanguageModel import LanguageModel
15
-
16
- from edsl.data_transfer_models import EDSLResultObjectInput
17
- from edsl.agents.PromptConstructor import PromptConstructor
18
-
19
- from edsl.agents.prompt_helpers import PromptPlan
20
-
21
-
22
- class InvigilatorBase(ABC):
23
- """An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
24
-
25
- >>> InvigilatorBase.example().answer_question()
26
- {'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
27
-
28
- >>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
29
- 'Failed to get response'
30
-
31
- This returns an empty prompt because there is no memory the agent needs to have at q0.
32
-
33
-
34
- """
35
-
36
- def __init__(
37
- self,
38
- agent: "Agent",
39
- question: QuestionBase,
40
- scenario: Scenario,
41
- model: LanguageModel,
42
- memory_plan: MemoryPlan,
43
- current_answers: dict,
44
- survey: Optional["Survey"],
45
- cache: Optional[Cache] = None,
46
- iteration: Optional[int] = 1,
47
- additional_prompt_data: Optional[dict] = None,
48
- sidecar_model: Optional[LanguageModel] = None,
49
- raise_validation_errors: Optional[bool] = True,
50
- prompt_plan: Optional["PromptPlan"] = None,
51
- ):
52
- """Initialize a new Invigilator."""
53
- self.agent = agent
54
- self.question = question
55
- self.scenario = scenario
56
- self.model = model
57
- self.memory_plan = memory_plan
58
- self.current_answers = current_answers or {}
59
- self.iteration = iteration
60
- self.additional_prompt_data = additional_prompt_data
61
- self.cache = cache
62
- self.sidecar_model = sidecar_model
63
- self.survey = survey
64
- self.raise_validation_errors = raise_validation_errors
65
- if prompt_plan is None:
66
- self.prompt_plan = PromptPlan()
67
- else:
68
- self.prompt_plan = prompt_plan
69
-
70
- self.raw_model_response = (
71
- None # placeholder for the raw response from the model
72
- )
73
-
74
- @property
75
- def prompt_constructor(self) -> PromptConstructor:
76
- """Return the prompt constructor."""
77
- return PromptConstructor(self, prompt_plan=self.prompt_plan)
78
-
79
- def to_dict(self):
80
- attributes = [
81
- "agent",
82
- "question",
83
- "scenario",
84
- "model",
85
- "memory_plan",
86
- "current_answers",
87
- "iteration",
88
- "additional_prompt_data",
89
- "cache",
90
- "sidecar_model",
91
- "survey",
92
- ]
93
-
94
- def serialize_attribute(attr):
95
- value = getattr(self, attr)
96
- if value is None:
97
- return None
98
- if hasattr(value, "to_dict"):
99
- return value.to_dict()
100
- if isinstance(value, (int, float, str, bool, dict, list)):
101
- return value
102
- return str(value)
103
-
104
- return {attr: serialize_attribute(attr) for attr in attributes}
105
-
106
- @classmethod
107
- def from_dict(cls, data):
108
- from edsl.agents.Agent import Agent
109
- from edsl.questions import QuestionBase
110
- from edsl.scenarios.Scenario import Scenario
111
- from edsl.surveys.MemoryPlan import MemoryPlan
112
- from edsl.language_models.LanguageModel import LanguageModel
113
- from edsl.surveys.Survey import Survey
114
-
115
- agent = Agent.from_dict(data["agent"])
116
- question = QuestionBase.from_dict(data["question"])
117
- scenario = Scenario.from_dict(data["scenario"])
118
- model = LanguageModel.from_dict(data["model"])
119
- memory_plan = MemoryPlan.from_dict(data["memory_plan"])
120
- survey = Survey.from_dict(data["survey"])
121
- current_answers = data["current_answers"]
122
- iteration = data["iteration"]
123
- additional_prompt_data = data["additional_prompt_data"]
124
- cache = Cache.from_dict(data["cache"])
125
-
126
- if data["sidecar_model"] is None:
127
- sidecar_model = None
128
- else:
129
- sidecar_model = LanguageModel.from_dict(data["sidecar_model"])
130
-
131
- return cls(
132
- agent=agent,
133
- question=question,
134
- scenario=scenario,
135
- model=model,
136
- memory_plan=memory_plan,
137
- current_answers=current_answers,
138
- survey=survey,
139
- iteration=iteration,
140
- additional_prompt_data=additional_prompt_data,
141
- cache=cache,
142
- sidecar_model=sidecar_model,
143
- )
144
-
145
- def __repr__(self) -> str:
146
- """Return a string representation of the Invigilator.
147
-
148
- >>> InvigilatorBase.example().__repr__()
149
- 'InvigilatorExample(...)'
150
-
151
- """
152
- return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)}, sidecarmodel={repr(self.sidecar_model)})"
153
-
154
- def get_failed_task_result(self, failure_reason) -> EDSLResultObjectInput:
155
- """Return an AgentResponseDict used in case the question-asking fails.
156
-
157
- Possible reasons include:
158
- - Legimately skipped because of skip logic
159
- - Failed to get response from the model
160
-
161
- """
162
- data = {
163
- "answer": None,
164
- "generated_tokens": None,
165
- "comment": failure_reason,
166
- "question_name": self.question.question_name,
167
- "prompts": self.get_prompts(),
168
- "cached_response": None,
169
- "raw_model_response": None,
170
- "cache_used": None,
171
- "cache_key": None,
172
- }
173
- return EDSLResultObjectInput(**data)
174
-
175
- # breakpoint()
176
- # if hasattr(self, "augmented_model_response"):
177
- # import json
178
-
179
- # generated_tokens = json.loads(self.augmented_model_response)["answer"][
180
- # "generated_tokens"
181
- # ]
182
- # else:
183
- # generated_tokens = "Filled in by InvigilatorBase.get_failed_task_result"
184
- # agent_response_dict = AgentResponseDict(
185
- # answer=None,
186
- # comment="Failed to get usable response",
187
- # generated_tokens=generated_tokens,
188
- # question_name=self.question.question_name,
189
- # prompts=self.get_prompts(),
190
- # )
191
- # # breakpoint()
192
- # return agent_response_dict
193
-
194
- def get_prompts(self) -> Dict[str, Prompt]:
195
- """Return the prompt used."""
196
-
197
- return {
198
- "user_prompt": Prompt("NA"),
199
- "system_prompt": Prompt("NA"),
200
- }
201
-
202
- @abstractmethod
203
- async def async_answer_question(self):
204
- """Asnwer a question."""
205
- pass
206
-
207
- @jupyter_nb_handler
208
- def answer_question(self) -> Coroutine:
209
- """Return a function that gets the answers to the question."""
210
-
211
- async def main():
212
- """Return the answer to the question."""
213
- results = await asyncio.gather(self.async_answer_question())
214
- return results[0] # Since there's only one task, return its result
215
-
216
- return main()
217
-
218
- @classmethod
219
- def example(
220
- cls, throw_an_exception=False, question=None, scenario=None, survey=None
221
- ) -> "InvigilatorBase":
222
- """Return an example invigilator.
223
-
224
- >>> InvigilatorBase.example()
225
- InvigilatorExample(...)
226
-
227
- """
228
- from edsl.agents.Agent import Agent
229
- from edsl.questions import QuestionMultipleChoice
230
- from edsl.scenarios.Scenario import Scenario
231
- from edsl.language_models import LanguageModel
232
- from edsl.surveys.MemoryPlan import MemoryPlan
233
-
234
- from edsl.enums import InferenceServiceType
235
-
236
- from edsl import Model
237
-
238
- model = Model("test", canned_response="SPAM!")
239
- # class TestLanguageModelGood(LanguageModel):
240
- # """A test language model."""
241
-
242
- # _model_ = "test"
243
- # _parameters_ = {"temperature": 0.5}
244
- # _inference_service_ = InferenceServiceType.TEST.value
245
-
246
- # async def async_execute_model_call(
247
- # self, user_prompt: str, system_prompt: str
248
- # ) -> dict[str, Any]:
249
- # await asyncio.sleep(0.1)
250
- # if hasattr(self, "throw_an_exception"):
251
- # raise Exception("Error!")
252
- # return {"message": """{"answer": "SPAM!"}"""}
253
-
254
- # def parse_response(self, raw_response: dict[str, Any]) -> str:
255
- # """Parse the response from the model."""
256
- # return raw_response["message"]
257
-
258
- if throw_an_exception:
259
- model.throw_an_exception = True
260
- agent = Agent.example()
261
- # question = QuestionMultipleChoice.example()
262
- from edsl.surveys import Survey
263
-
264
- if not survey:
265
- survey = Survey.example()
266
- # if question:
267
- # need to have the focal question name in the list of names
268
- # survey._questions[0].question_name = question.question_name
269
- # survey.add_question(question)
270
- if question:
271
- survey.add_question(question)
272
-
273
- question = question or survey.questions[0]
274
- scenario = scenario or Scenario.example()
275
- # memory_plan = None #memory_plan = MemoryPlan()
276
- from edsl import Survey
277
-
278
- memory_plan = MemoryPlan(survey=survey)
279
- current_answers = None
280
- from edsl.agents.PromptConstructor import PromptConstructor
281
-
282
- class InvigilatorExample(InvigilatorBase):
283
- """An example invigilator."""
284
-
285
- async def async_answer_question(self):
286
- """Answer a question."""
287
- return await self.model.async_execute_model_call(
288
- user_prompt="Hello", system_prompt="Hi"
289
- )
290
-
291
- return InvigilatorExample(
292
- agent=agent,
293
- question=question,
294
- scenario=scenario,
295
- survey=survey,
296
- model=model,
297
- memory_plan=memory_plan,
298
- current_answers=current_answers,
299
- )
300
-
301
-
302
- if __name__ == "__main__":
303
- import doctest
304
-
305
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from abc import ABC, abstractmethod
2
+ import asyncio
3
+ from typing import Coroutine, Dict, Any, Optional
4
+
5
+ from edsl.prompts.Prompt import Prompt
6
+ from edsl.utilities.decorators import jupyter_nb_handler
7
+ from edsl.data_transfer_models import AgentResponseDict
8
+
9
+ from edsl.data.Cache import Cache
10
+
11
+ from edsl.questions.QuestionBase import QuestionBase
12
+ from edsl.scenarios.Scenario import Scenario
13
+ from edsl.surveys.MemoryPlan import MemoryPlan
14
+ from edsl.language_models.LanguageModel import LanguageModel
15
+
16
+ from edsl.data_transfer_models import EDSLResultObjectInput
17
+ from edsl.agents.PromptConstructor import PromptConstructor
18
+
19
+ from edsl.agents.prompt_helpers import PromptPlan
20
+
21
+
22
+ class InvigilatorBase(ABC):
23
+ """An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
24
+
25
+ >>> InvigilatorBase.example().answer_question()
26
+ {'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
27
+
28
+ >>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
29
+ 'Failed to get response'
30
+
31
+ This returns an empty prompt because there is no memory the agent needs to have at q0.
32
+
33
+
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ agent: "Agent",
39
+ question: QuestionBase,
40
+ scenario: Scenario,
41
+ model: LanguageModel,
42
+ memory_plan: MemoryPlan,
43
+ current_answers: dict,
44
+ survey: Optional["Survey"],
45
+ cache: Optional[Cache] = None,
46
+ iteration: Optional[int] = 1,
47
+ additional_prompt_data: Optional[dict] = None,
48
+ sidecar_model: Optional[LanguageModel] = None,
49
+ raise_validation_errors: Optional[bool] = True,
50
+ prompt_plan: Optional["PromptPlan"] = None,
51
+ ):
52
+ """Initialize a new Invigilator."""
53
+ self.agent = agent
54
+ self.question = question
55
+ self.scenario = scenario
56
+ self.model = model
57
+ self.memory_plan = memory_plan
58
+ self.current_answers = current_answers or {}
59
+ self.iteration = iteration
60
+ self.additional_prompt_data = additional_prompt_data
61
+ self.cache = cache
62
+ self.sidecar_model = sidecar_model
63
+ self.survey = survey
64
+ self.raise_validation_errors = raise_validation_errors
65
+ if prompt_plan is None:
66
+ self.prompt_plan = PromptPlan()
67
+ else:
68
+ self.prompt_plan = prompt_plan
69
+
70
+ self.raw_model_response = (
71
+ None # placeholder for the raw response from the model
72
+ )
73
+
74
+ @property
75
+ def prompt_constructor(self) -> PromptConstructor:
76
+ """Return the prompt constructor."""
77
+ return PromptConstructor(self, prompt_plan=self.prompt_plan)
78
+
79
+ def to_dict(self):
80
+ attributes = [
81
+ "agent",
82
+ "question",
83
+ "scenario",
84
+ "model",
85
+ "memory_plan",
86
+ "current_answers",
87
+ "iteration",
88
+ "additional_prompt_data",
89
+ "cache",
90
+ "sidecar_model",
91
+ "survey",
92
+ ]
93
+
94
+ def serialize_attribute(attr):
95
+ value = getattr(self, attr)
96
+ if value is None:
97
+ return None
98
+ if hasattr(value, "to_dict"):
99
+ return value.to_dict()
100
+ if isinstance(value, (int, float, str, bool, dict, list)):
101
+ return value
102
+ return str(value)
103
+
104
+ return {attr: serialize_attribute(attr) for attr in attributes}
105
+
106
+ @classmethod
107
+ def from_dict(cls, data):
108
+ from edsl.agents.Agent import Agent
109
+ from edsl.questions import QuestionBase
110
+ from edsl.scenarios.Scenario import Scenario
111
+ from edsl.surveys.MemoryPlan import MemoryPlan
112
+ from edsl.language_models.LanguageModel import LanguageModel
113
+ from edsl.surveys.Survey import Survey
114
+
115
+ agent = Agent.from_dict(data["agent"])
116
+ question = QuestionBase.from_dict(data["question"])
117
+ scenario = Scenario.from_dict(data["scenario"])
118
+ model = LanguageModel.from_dict(data["model"])
119
+ memory_plan = MemoryPlan.from_dict(data["memory_plan"])
120
+ survey = Survey.from_dict(data["survey"])
121
+ current_answers = data["current_answers"]
122
+ iteration = data["iteration"]
123
+ additional_prompt_data = data["additional_prompt_data"]
124
+ cache = Cache.from_dict(data["cache"])
125
+
126
+ if data["sidecar_model"] is None:
127
+ sidecar_model = None
128
+ else:
129
+ sidecar_model = LanguageModel.from_dict(data["sidecar_model"])
130
+
131
+ return cls(
132
+ agent=agent,
133
+ question=question,
134
+ scenario=scenario,
135
+ model=model,
136
+ memory_plan=memory_plan,
137
+ current_answers=current_answers,
138
+ survey=survey,
139
+ iteration=iteration,
140
+ additional_prompt_data=additional_prompt_data,
141
+ cache=cache,
142
+ sidecar_model=sidecar_model,
143
+ )
144
+
145
+ def __repr__(self) -> str:
146
+ """Return a string representation of the Invigilator.
147
+
148
+ >>> InvigilatorBase.example().__repr__()
149
+ 'InvigilatorExample(...)'
150
+
151
+ """
152
+ return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)}, sidecarmodel={repr(self.sidecar_model)})"
153
+
154
+ def get_failed_task_result(self, failure_reason) -> EDSLResultObjectInput:
155
+ """Return an AgentResponseDict used in case the question-asking fails.
156
+
157
+ Possible reasons include:
158
+ - Legimately skipped because of skip logic
159
+ - Failed to get response from the model
160
+
161
+ """
162
+ data = {
163
+ "answer": None,
164
+ "generated_tokens": None,
165
+ "comment": failure_reason,
166
+ "question_name": self.question.question_name,
167
+ "prompts": self.get_prompts(),
168
+ "cached_response": None,
169
+ "raw_model_response": None,
170
+ "cache_used": None,
171
+ "cache_key": None,
172
+ }
173
+ return EDSLResultObjectInput(**data)
174
+
175
+ # breakpoint()
176
+ # if hasattr(self, "augmented_model_response"):
177
+ # import json
178
+
179
+ # generated_tokens = json.loads(self.augmented_model_response)["answer"][
180
+ # "generated_tokens"
181
+ # ]
182
+ # else:
183
+ # generated_tokens = "Filled in by InvigilatorBase.get_failed_task_result"
184
+ # agent_response_dict = AgentResponseDict(
185
+ # answer=None,
186
+ # comment="Failed to get usable response",
187
+ # generated_tokens=generated_tokens,
188
+ # question_name=self.question.question_name,
189
+ # prompts=self.get_prompts(),
190
+ # )
191
+ # # breakpoint()
192
+ # return agent_response_dict
193
+
194
+ def get_prompts(self) -> Dict[str, Prompt]:
195
+ """Return the prompt used."""
196
+
197
+ return {
198
+ "user_prompt": Prompt("NA"),
199
+ "system_prompt": Prompt("NA"),
200
+ }
201
+
202
+ @abstractmethod
203
+ async def async_answer_question(self):
204
+ """Asnwer a question."""
205
+ pass
206
+
207
+ @jupyter_nb_handler
208
+ def answer_question(self) -> Coroutine:
209
+ """Return a function that gets the answers to the question."""
210
+
211
+ async def main():
212
+ """Return the answer to the question."""
213
+ results = await asyncio.gather(self.async_answer_question())
214
+ return results[0] # Since there's only one task, return its result
215
+
216
+ return main()
217
+
218
+ @classmethod
219
+ def example(
220
+ cls, throw_an_exception=False, question=None, scenario=None, survey=None
221
+ ) -> "InvigilatorBase":
222
+ """Return an example invigilator.
223
+
224
+ >>> InvigilatorBase.example()
225
+ InvigilatorExample(...)
226
+
227
+ """
228
+ from edsl.agents.Agent import Agent
229
+ from edsl.questions import QuestionMultipleChoice
230
+ from edsl.scenarios.Scenario import Scenario
231
+ from edsl.language_models import LanguageModel
232
+ from edsl.surveys.MemoryPlan import MemoryPlan
233
+
234
+ from edsl.enums import InferenceServiceType
235
+
236
+ from edsl import Model
237
+
238
+ model = Model("test", canned_response="SPAM!")
239
+
240
+ if throw_an_exception:
241
+ model.throw_an_exception = True
242
+ agent = Agent.example()
243
+ # question = QuestionMultipleChoice.example()
244
+ from edsl.surveys import Survey
245
+
246
+ if not survey:
247
+ survey = Survey.example()
248
+
249
+ if question not in survey.questions and question is not None:
250
+ survey.add_question(question)
251
+
252
+ question = question or survey.questions[0]
253
+ scenario = scenario or Scenario.example()
254
+ # memory_plan = None #memory_plan = MemoryPlan()
255
+ from edsl import Survey
256
+
257
+ memory_plan = MemoryPlan(survey=survey)
258
+ current_answers = None
259
+ from edsl.agents.PromptConstructor import PromptConstructor
260
+
261
+ class InvigilatorExample(InvigilatorBase):
262
+ """An example invigilator."""
263
+
264
+ async def async_answer_question(self):
265
+ """Answer a question."""
266
+ return await self.model.async_execute_model_call(
267
+ user_prompt="Hello", system_prompt="Hi"
268
+ )
269
+
270
+ return InvigilatorExample(
271
+ agent=agent,
272
+ question=question,
273
+ scenario=scenario,
274
+ survey=survey,
275
+ model=model,
276
+ memory_plan=memory_plan,
277
+ current_answers=current_answers,
278
+ )
279
+
280
+
281
+ if __name__ == "__main__":
282
+ import doctest
283
+
284
+ doctest.testmod(optionflags=doctest.ELLIPSIS)