edsl 0.1.37.dev5__py3-none-any.whl → 0.1.37.dev6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +48 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +855 -855
  7. edsl/agents/AgentList.py +350 -350
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -284
  10. edsl/agents/PromptConstructor.py +353 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +160 -160
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +290 -290
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/chips.py +95 -95
  45. edsl/conversation/mug_negotiation.py +81 -81
  46. edsl/conversation/next_speaker_utilities.py +93 -93
  47. edsl/coop/PriceFetcher.py +54 -54
  48. edsl/coop/__init__.py +2 -2
  49. edsl/coop/coop.py +958 -958
  50. edsl/coop/utils.py +131 -131
  51. edsl/data/Cache.py +527 -527
  52. edsl/data/CacheEntry.py +228 -228
  53. edsl/data/CacheHandler.py +149 -149
  54. edsl/data/RemoteCacheSync.py +97 -97
  55. edsl/data/SQLiteDict.py +292 -292
  56. edsl/data/__init__.py +4 -4
  57. edsl/data/orm.py +10 -10
  58. edsl/data_transfer_models.py +73 -73
  59. edsl/enums.py +173 -173
  60. edsl/exceptions/BaseException.py +21 -21
  61. edsl/exceptions/__init__.py +54 -54
  62. edsl/exceptions/agents.py +38 -38
  63. edsl/exceptions/configuration.py +16 -16
  64. edsl/exceptions/coop.py +10 -10
  65. edsl/exceptions/data.py +14 -14
  66. edsl/exceptions/general.py +34 -34
  67. edsl/exceptions/jobs.py +33 -33
  68. edsl/exceptions/language_models.py +63 -63
  69. edsl/exceptions/prompts.py +15 -15
  70. edsl/exceptions/questions.py +91 -91
  71. edsl/exceptions/results.py +29 -29
  72. edsl/exceptions/scenarios.py +22 -22
  73. edsl/exceptions/surveys.py +37 -37
  74. edsl/inference_services/AnthropicService.py +87 -87
  75. edsl/inference_services/AwsBedrock.py +120 -120
  76. edsl/inference_services/AzureAI.py +217 -217
  77. edsl/inference_services/DeepInfraService.py +18 -18
  78. edsl/inference_services/GoogleService.py +156 -156
  79. edsl/inference_services/GroqService.py +20 -20
  80. edsl/inference_services/InferenceServiceABC.py +147 -147
  81. edsl/inference_services/InferenceServicesCollection.py +97 -97
  82. edsl/inference_services/MistralAIService.py +123 -123
  83. edsl/inference_services/OllamaService.py +18 -18
  84. edsl/inference_services/OpenAIService.py +224 -224
  85. edsl/inference_services/TestService.py +89 -89
  86. edsl/inference_services/TogetherAIService.py +170 -170
  87. edsl/inference_services/models_available_cache.py +118 -118
  88. edsl/inference_services/rate_limits_cache.py +25 -25
  89. edsl/inference_services/registry.py +39 -39
  90. edsl/inference_services/write_available.py +10 -10
  91. edsl/jobs/Answers.py +56 -56
  92. edsl/jobs/Jobs.py +1347 -1347
  93. edsl/jobs/__init__.py +1 -1
  94. edsl/jobs/buckets/BucketCollection.py +63 -63
  95. edsl/jobs/buckets/ModelBuckets.py +65 -65
  96. edsl/jobs/buckets/TokenBucket.py +248 -248
  97. edsl/jobs/interviews/Interview.py +661 -661
  98. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  99. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  100. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  101. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  102. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  103. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  104. edsl/jobs/interviews/ReportErrors.py +66 -66
  105. edsl/jobs/interviews/interview_status_enum.py +9 -9
  106. edsl/jobs/runners/JobsRunnerAsyncio.py +338 -338
  107. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  108. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  109. edsl/jobs/tasks/TaskCreators.py +64 -64
  110. edsl/jobs/tasks/TaskHistory.py +442 -442
  111. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  112. edsl/jobs/tasks/task_status_enum.py +163 -163
  113. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  114. edsl/jobs/tokens/TokenUsage.py +34 -34
  115. edsl/language_models/KeyLookup.py +30 -30
  116. edsl/language_models/LanguageModel.py +706 -706
  117. edsl/language_models/ModelList.py +102 -102
  118. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  119. edsl/language_models/__init__.py +3 -3
  120. edsl/language_models/fake_openai_call.py +15 -15
  121. edsl/language_models/fake_openai_service.py +61 -61
  122. edsl/language_models/registry.py +137 -137
  123. edsl/language_models/repair.py +156 -156
  124. edsl/language_models/unused/ReplicateBase.py +83 -83
  125. edsl/language_models/utilities.py +64 -64
  126. edsl/notebooks/Notebook.py +259 -259
  127. edsl/notebooks/__init__.py +1 -1
  128. edsl/prompts/Prompt.py +357 -357
  129. edsl/prompts/__init__.py +2 -2
  130. edsl/questions/AnswerValidatorMixin.py +289 -289
  131. edsl/questions/QuestionBase.py +656 -656
  132. edsl/questions/QuestionBaseGenMixin.py +161 -161
  133. edsl/questions/QuestionBasePromptsMixin.py +234 -234
  134. edsl/questions/QuestionBudget.py +227 -227
  135. edsl/questions/QuestionCheckBox.py +359 -359
  136. edsl/questions/QuestionExtract.py +183 -183
  137. edsl/questions/QuestionFreeText.py +114 -114
  138. edsl/questions/QuestionFunctional.py +159 -159
  139. edsl/questions/QuestionList.py +231 -231
  140. edsl/questions/QuestionMultipleChoice.py +286 -286
  141. edsl/questions/QuestionNumerical.py +153 -153
  142. edsl/questions/QuestionRank.py +324 -324
  143. edsl/questions/Quick.py +41 -41
  144. edsl/questions/RegisterQuestionsMeta.py +71 -71
  145. edsl/questions/ResponseValidatorABC.py +174 -174
  146. edsl/questions/SimpleAskMixin.py +73 -73
  147. edsl/questions/__init__.py +26 -26
  148. edsl/questions/compose_questions.py +98 -98
  149. edsl/questions/decorators.py +21 -21
  150. edsl/questions/derived/QuestionLikertFive.py +76 -76
  151. edsl/questions/derived/QuestionLinearScale.py +87 -87
  152. edsl/questions/derived/QuestionTopK.py +91 -91
  153. edsl/questions/derived/QuestionYesNo.py +82 -82
  154. edsl/questions/descriptors.py +413 -413
  155. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  156. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  157. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  158. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  159. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  160. edsl/questions/prompt_templates/question_list.jinja +17 -17
  161. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  162. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  163. edsl/questions/question_registry.py +147 -147
  164. edsl/questions/settings.py +12 -12
  165. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  167. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  168. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  169. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  170. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  171. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  172. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  173. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  174. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  175. edsl/questions/templates/list/question_presentation.jinja +5 -5
  176. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  177. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  178. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  179. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  180. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  181. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  182. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  183. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  184. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  185. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  186. edsl/results/Dataset.py +293 -293
  187. edsl/results/DatasetExportMixin.py +717 -717
  188. edsl/results/DatasetTree.py +145 -145
  189. edsl/results/Result.py +450 -450
  190. edsl/results/Results.py +1071 -1071
  191. edsl/results/ResultsDBMixin.py +238 -238
  192. edsl/results/ResultsExportMixin.py +43 -43
  193. edsl/results/ResultsFetchMixin.py +33 -33
  194. edsl/results/ResultsGGMixin.py +121 -121
  195. edsl/results/ResultsToolsMixin.py +98 -98
  196. edsl/results/Selector.py +135 -135
  197. edsl/results/__init__.py +2 -2
  198. edsl/results/tree_explore.py +115 -115
  199. edsl/scenarios/FileStore.py +458 -458
  200. edsl/scenarios/Scenario.py +546 -546
  201. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  202. edsl/scenarios/ScenarioList.py +1112 -1112
  203. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  204. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  205. edsl/scenarios/__init__.py +4 -4
  206. edsl/shared.py +1 -1
  207. edsl/study/ObjectEntry.py +173 -173
  208. edsl/study/ProofOfWork.py +113 -113
  209. edsl/study/SnapShot.py +80 -80
  210. edsl/study/Study.py +528 -528
  211. edsl/study/__init__.py +4 -4
  212. edsl/surveys/DAG.py +148 -148
  213. edsl/surveys/Memory.py +31 -31
  214. edsl/surveys/MemoryPlan.py +244 -244
  215. edsl/surveys/Rule.py +330 -330
  216. edsl/surveys/RuleCollection.py +387 -387
  217. edsl/surveys/Survey.py +1795 -1795
  218. edsl/surveys/SurveyCSS.py +261 -261
  219. edsl/surveys/SurveyExportMixin.py +259 -259
  220. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  221. edsl/surveys/SurveyQualtricsImport.py +284 -284
  222. edsl/surveys/__init__.py +3 -3
  223. edsl/surveys/base.py +53 -53
  224. edsl/surveys/descriptors.py +56 -56
  225. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  226. edsl/surveys/instructions/Instruction.py +51 -51
  227. edsl/surveys/instructions/InstructionCollection.py +77 -77
  228. edsl/templates/error_reporting/base.html +23 -23
  229. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  230. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  231. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  232. edsl/templates/error_reporting/interview_details.html +115 -115
  233. edsl/templates/error_reporting/interviews.html +9 -9
  234. edsl/templates/error_reporting/overview.html +4 -4
  235. edsl/templates/error_reporting/performance_plot.html +1 -1
  236. edsl/templates/error_reporting/report.css +73 -73
  237. edsl/templates/error_reporting/report.html +117 -117
  238. edsl/templates/error_reporting/report.js +25 -25
  239. edsl/tools/__init__.py +1 -1
  240. edsl/tools/clusters.py +192 -192
  241. edsl/tools/embeddings.py +27 -27
  242. edsl/tools/embeddings_plotting.py +118 -118
  243. edsl/tools/plotting.py +112 -112
  244. edsl/tools/summarize.py +18 -18
  245. edsl/utilities/SystemInfo.py +28 -28
  246. edsl/utilities/__init__.py +22 -22
  247. edsl/utilities/ast_utilities.py +25 -25
  248. edsl/utilities/data/Registry.py +6 -6
  249. edsl/utilities/data/__init__.py +1 -1
  250. edsl/utilities/data/scooter_results.json +1 -1
  251. edsl/utilities/decorators.py +77 -77
  252. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  253. edsl/utilities/interface.py +627 -627
  254. edsl/utilities/repair_functions.py +28 -28
  255. edsl/utilities/restricted_python.py +70 -70
  256. edsl/utilities/utilities.py +409 -409
  257. {edsl-0.1.37.dev5.dist-info → edsl-0.1.37.dev6.dist-info}/LICENSE +21 -21
  258. {edsl-0.1.37.dev5.dist-info → edsl-0.1.37.dev6.dist-info}/METADATA +1 -1
  259. edsl-0.1.37.dev6.dist-info/RECORD +283 -0
  260. edsl-0.1.37.dev5.dist-info/RECORD +0 -283
  261. {edsl-0.1.37.dev5.dist-info → edsl-0.1.37.dev6.dist-info}/WHEEL +0 -0
@@ -1,338 +1,338 @@
1
- from __future__ import annotations
2
- import time
3
- import asyncio
4
- import threading
5
- from typing import Coroutine, List, AsyncGenerator, Optional, Union, Generator
6
- from contextlib import contextmanager
7
- from collections import UserList
8
-
9
- from edsl.results.Results import Results
10
- from edsl.jobs.interviews.Interview import Interview
11
- from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus
12
-
13
- from edsl.jobs.tasks.TaskHistory import TaskHistory
14
- from edsl.jobs.buckets.BucketCollection import BucketCollection
15
- from edsl.utilities.decorators import jupyter_nb_handler
16
- from edsl.data.Cache import Cache
17
- from edsl.results.Result import Result
18
- from edsl.results.Results import Results
19
- from edsl.language_models.LanguageModel import LanguageModel
20
- from edsl.data.Cache import Cache
21
-
22
-
23
- class StatusTracker(UserList):
24
- def __init__(self, total_tasks: int):
25
- self.total_tasks = total_tasks
26
- super().__init__()
27
-
28
- def current_status(self):
29
- return print(f"Completed: {len(self.data)} of {self.total_tasks}", end="\r")
30
-
31
-
32
- class JobsRunnerAsyncio:
33
- """A class for running a collection of interviews asynchronously.
34
-
35
- It gets instaniated from a Jobs object.
36
- The Jobs object is a collection of interviews that are to be run.
37
- """
38
-
39
- def __init__(self, jobs: "Jobs"):
40
- self.jobs = jobs
41
- self.interviews: List["Interview"] = jobs.interviews()
42
- self.bucket_collection: "BucketCollection" = jobs.bucket_collection
43
- self.total_interviews: List["Interview"] = []
44
-
45
- async def run_async_generator(
46
- self,
47
- cache: Cache,
48
- n: int = 1,
49
- stop_on_exception: bool = False,
50
- sidecar_model: Optional[LanguageModel] = None,
51
- total_interviews: Optional[List["Interview"]] = None,
52
- raise_validation_errors: bool = False,
53
- ) -> AsyncGenerator["Result", None]:
54
- """Creates the tasks, runs them asynchronously, and returns the results as a Results object.
55
-
56
- Completed tasks are yielded as they are completed.
57
-
58
- :param n: how many times to run each interview
59
- :param stop_on_exception: Whether to stop the interview if an exception is raised
60
- :param sidecar_model: a language model to use in addition to the interview's model
61
- :param total_interviews: A list of interviews to run can be provided instead.
62
- :param raise_validation_errors: Whether to raise validation errors
63
- """
64
- tasks = []
65
- if total_interviews: # was already passed in total interviews
66
- self.total_interviews = total_interviews
67
- else:
68
- self.total_interviews = list(
69
- self._populate_total_interviews(n=n)
70
- ) # Populate self.total_interviews before creating tasks
71
-
72
- for interview in self.total_interviews:
73
- interviewing_task = self._build_interview_task(
74
- interview=interview,
75
- stop_on_exception=stop_on_exception,
76
- sidecar_model=sidecar_model,
77
- raise_validation_errors=raise_validation_errors,
78
- )
79
- tasks.append(asyncio.create_task(interviewing_task))
80
-
81
- for task in asyncio.as_completed(tasks):
82
- result = await task
83
- self.jobs_runner_status.add_completed_interview(result)
84
- yield result
85
-
86
- def _populate_total_interviews(
87
- self, n: int = 1
88
- ) -> Generator["Interview", None, None]:
89
- """Populates self.total_interviews with n copies of each interview.
90
-
91
- :param n: how many times to run each interview.
92
- """
93
- for interview in self.interviews:
94
- for iteration in range(n):
95
- if iteration > 0:
96
- yield interview.duplicate(iteration=iteration, cache=self.cache)
97
- else:
98
- interview.cache = self.cache
99
- yield interview
100
-
101
- async def run_async(self, cache: Optional[Cache] = None, n: int = 1) -> Results:
102
- """Used for some other modules that have a non-standard way of running interviews."""
103
- self.jobs_runner_status = JobsRunnerStatus(self, n=n)
104
- self.cache = Cache() if cache is None else cache
105
- data = []
106
- async for result in self.run_async_generator(cache=self.cache, n=n):
107
- data.append(result)
108
- return Results(survey=self.jobs.survey, data=data)
109
-
110
- def simple_run(self):
111
- data = asyncio.run(self.run_async())
112
- return Results(survey=self.jobs.survey, data=data)
113
-
114
- async def _build_interview_task(
115
- self,
116
- *,
117
- interview: Interview,
118
- stop_on_exception: bool = False,
119
- sidecar_model: Optional["LanguageModel"] = None,
120
- raise_validation_errors: bool = False,
121
- ) -> "Result":
122
- """Conducts an interview and returns the result.
123
-
124
- :param interview: the interview to conduct
125
- :param stop_on_exception: stops the interview if an exception is raised
126
- :param sidecar_model: a language model to use in addition to the interview's model
127
- """
128
- # the model buckets are used to track usage rates
129
- model_buckets = self.bucket_collection[interview.model]
130
-
131
- # get the results of the interview
132
- answer, valid_results = await interview.async_conduct_interview(
133
- model_buckets=model_buckets,
134
- stop_on_exception=stop_on_exception,
135
- sidecar_model=sidecar_model,
136
- raise_validation_errors=raise_validation_errors,
137
- )
138
-
139
- question_results = {}
140
- for result in valid_results:
141
- question_results[result.question_name] = result
142
-
143
- answer_key_names = list(question_results.keys())
144
-
145
- generated_tokens_dict = {
146
- k + "_generated_tokens": question_results[k].generated_tokens
147
- for k in answer_key_names
148
- }
149
- comments_dict = {
150
- k + "_comment": question_results[k].comment for k in answer_key_names
151
- }
152
-
153
- # we should have a valid result for each question
154
- answer_dict = {k: answer[k] for k in answer_key_names}
155
- assert len(valid_results) == len(answer_key_names)
156
-
157
- # TODO: move this down into Interview
158
- question_name_to_prompts = dict({})
159
- for result in valid_results:
160
- question_name = result.question_name
161
- question_name_to_prompts[question_name] = {
162
- "user_prompt": result.prompts["user_prompt"],
163
- "system_prompt": result.prompts["system_prompt"],
164
- }
165
-
166
- prompt_dictionary = {}
167
- for answer_key_name in answer_key_names:
168
- prompt_dictionary[
169
- answer_key_name + "_user_prompt"
170
- ] = question_name_to_prompts[answer_key_name]["user_prompt"]
171
- prompt_dictionary[
172
- answer_key_name + "_system_prompt"
173
- ] = question_name_to_prompts[answer_key_name]["system_prompt"]
174
-
175
- raw_model_results_dictionary = {}
176
- cache_used_dictionary = {}
177
- for result in valid_results:
178
- question_name = result.question_name
179
- raw_model_results_dictionary[
180
- question_name + "_raw_model_response"
181
- ] = result.raw_model_response
182
- raw_model_results_dictionary[question_name + "_cost"] = result.cost
183
- one_use_buys = (
184
- "NA"
185
- if isinstance(result.cost, str)
186
- or result.cost == 0
187
- or result.cost is None
188
- else 1.0 / result.cost
189
- )
190
- raw_model_results_dictionary[question_name + "_one_usd_buys"] = one_use_buys
191
- cache_used_dictionary[question_name] = result.cache_used
192
-
193
- result = Result(
194
- agent=interview.agent,
195
- scenario=interview.scenario,
196
- model=interview.model,
197
- iteration=interview.iteration,
198
- answer=answer_dict,
199
- prompt=prompt_dictionary,
200
- raw_model_response=raw_model_results_dictionary,
201
- survey=interview.survey,
202
- generated_tokens=generated_tokens_dict,
203
- comments_dict=comments_dict,
204
- cache_used_dict=cache_used_dictionary,
205
- )
206
- result.interview_hash = hash(interview)
207
-
208
- return result
209
-
210
- @property
211
- def elapsed_time(self):
212
- return time.monotonic() - self.start_time
213
-
214
- def process_results(
215
- self, raw_results: Results, cache: Cache, print_exceptions: bool
216
- ):
217
- interview_lookup = {
218
- hash(interview): index
219
- for index, interview in enumerate(self.total_interviews)
220
- }
221
- interview_hashes = list(interview_lookup.keys())
222
-
223
- task_history = TaskHistory(self.total_interviews, include_traceback=False)
224
-
225
- results = Results(
226
- survey=self.jobs.survey,
227
- data=sorted(
228
- raw_results, key=lambda x: interview_hashes.index(x.interview_hash)
229
- ),
230
- task_history=task_history,
231
- cache=cache,
232
- )
233
- results.bucket_collection = self.bucket_collection
234
-
235
- if results.has_unfixed_exceptions and print_exceptions:
236
- from edsl.scenarios.FileStore import HTMLFileStore
237
- from edsl.config import CONFIG
238
- from edsl.coop.coop import Coop
239
-
240
- msg = f"Exceptions were raised in {len(results.task_history.indices)} out of {len(self.total_interviews)} interviews.\n"
241
-
242
- if len(results.task_history.indices) > 5:
243
- msg += f"Exceptions were raised in the following interviews: {results.task_history.indices}.\n"
244
-
245
- print(msg)
246
- # this is where exceptions are opening up
247
- filepath = results.task_history.html(
248
- cta="Open report to see details.",
249
- open_in_browser=True,
250
- return_link=True,
251
- )
252
-
253
- try:
254
- coop = Coop()
255
- user_edsl_settings = coop.edsl_settings
256
- remote_logging = user_edsl_settings["remote_logging"]
257
- except Exception as e:
258
- print(e)
259
- remote_logging = False
260
-
261
- if remote_logging:
262
- filestore = HTMLFileStore(filepath)
263
- coop_details = filestore.push(description="Error report")
264
- print(coop_details)
265
-
266
- print("Also see: https://docs.expectedparrot.com/en/latest/exceptions.html")
267
-
268
- return results
269
-
270
- @jupyter_nb_handler
271
- async def run(
272
- self,
273
- cache: Union[Cache, False, None],
274
- n: int = 1,
275
- stop_on_exception: bool = False,
276
- progress_bar: bool = False,
277
- sidecar_model: Optional[LanguageModel] = None,
278
- print_exceptions: bool = True,
279
- raise_validation_errors: bool = False,
280
- ) -> "Coroutine":
281
- """Runs a collection of interviews, handling both async and sync contexts."""
282
-
283
- self.results = []
284
- self.start_time = time.monotonic()
285
- self.completed = False
286
- self.cache = cache
287
- self.sidecar_model = sidecar_model
288
-
289
- self.jobs_runner_status = JobsRunnerStatus(self, n=n)
290
-
291
- stop_event = threading.Event()
292
-
293
- async def process_results(cache):
294
- """Processes results from interviews."""
295
- async for result in self.run_async_generator(
296
- n=n,
297
- stop_on_exception=stop_on_exception,
298
- cache=cache,
299
- sidecar_model=sidecar_model,
300
- raise_validation_errors=raise_validation_errors,
301
- ):
302
- self.results.append(result)
303
- self.completed = True
304
-
305
- def run_progress_bar(stop_event):
306
- """Runs the progress bar in a separate thread."""
307
- self.jobs_runner_status.update_progress(stop_event)
308
-
309
- if progress_bar:
310
- progress_thread = threading.Thread(
311
- target=run_progress_bar, args=(stop_event,)
312
- )
313
- progress_thread.start()
314
-
315
- exception_to_raise = None
316
- try:
317
- with cache as c:
318
- await process_results(cache=c)
319
- except KeyboardInterrupt:
320
- print("Keyboard interrupt received. Stopping gracefully...")
321
- stop_event.set()
322
- except Exception as e:
323
- if stop_on_exception:
324
- exception_to_raise = e
325
- stop_event.set()
326
- finally:
327
- stop_event.set()
328
- if progress_bar:
329
- # self.jobs_runner_status.stop_event.set()
330
- if progress_thread:
331
- progress_thread.join()
332
-
333
- if exception_to_raise:
334
- raise exception_to_raise
335
-
336
- return self.process_results(
337
- raw_results=self.results, cache=cache, print_exceptions=print_exceptions
338
- )
1
+ from __future__ import annotations
2
+ import time
3
+ import asyncio
4
+ import threading
5
+ from typing import Coroutine, List, AsyncGenerator, Optional, Union, Generator
6
+ from contextlib import contextmanager
7
+ from collections import UserList
8
+
9
+ from edsl.results.Results import Results
10
+ from edsl.jobs.interviews.Interview import Interview
11
+ from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus
12
+
13
+ from edsl.jobs.tasks.TaskHistory import TaskHistory
14
+ from edsl.jobs.buckets.BucketCollection import BucketCollection
15
+ from edsl.utilities.decorators import jupyter_nb_handler
16
+ from edsl.data.Cache import Cache
17
+ from edsl.results.Result import Result
18
+ from edsl.results.Results import Results
19
+ from edsl.language_models.LanguageModel import LanguageModel
20
+ from edsl.data.Cache import Cache
21
+
22
+
23
+ class StatusTracker(UserList):
24
+ def __init__(self, total_tasks: int):
25
+ self.total_tasks = total_tasks
26
+ super().__init__()
27
+
28
+ def current_status(self):
29
+ return print(f"Completed: {len(self.data)} of {self.total_tasks}", end="\r")
30
+
31
+
32
+ class JobsRunnerAsyncio:
33
+ """A class for running a collection of interviews asynchronously.
34
+
35
+ It gets instaniated from a Jobs object.
36
+ The Jobs object is a collection of interviews that are to be run.
37
+ """
38
+
39
+ def __init__(self, jobs: "Jobs"):
40
+ self.jobs = jobs
41
+ self.interviews: List["Interview"] = jobs.interviews()
42
+ self.bucket_collection: "BucketCollection" = jobs.bucket_collection
43
+ self.total_interviews: List["Interview"] = []
44
+
45
+ async def run_async_generator(
46
+ self,
47
+ cache: Cache,
48
+ n: int = 1,
49
+ stop_on_exception: bool = False,
50
+ sidecar_model: Optional[LanguageModel] = None,
51
+ total_interviews: Optional[List["Interview"]] = None,
52
+ raise_validation_errors: bool = False,
53
+ ) -> AsyncGenerator["Result", None]:
54
+ """Creates the tasks, runs them asynchronously, and returns the results as a Results object.
55
+
56
+ Completed tasks are yielded as they are completed.
57
+
58
+ :param n: how many times to run each interview
59
+ :param stop_on_exception: Whether to stop the interview if an exception is raised
60
+ :param sidecar_model: a language model to use in addition to the interview's model
61
+ :param total_interviews: A list of interviews to run can be provided instead.
62
+ :param raise_validation_errors: Whether to raise validation errors
63
+ """
64
+ tasks = []
65
+ if total_interviews: # was already passed in total interviews
66
+ self.total_interviews = total_interviews
67
+ else:
68
+ self.total_interviews = list(
69
+ self._populate_total_interviews(n=n)
70
+ ) # Populate self.total_interviews before creating tasks
71
+
72
+ for interview in self.total_interviews:
73
+ interviewing_task = self._build_interview_task(
74
+ interview=interview,
75
+ stop_on_exception=stop_on_exception,
76
+ sidecar_model=sidecar_model,
77
+ raise_validation_errors=raise_validation_errors,
78
+ )
79
+ tasks.append(asyncio.create_task(interviewing_task))
80
+
81
+ for task in asyncio.as_completed(tasks):
82
+ result = await task
83
+ self.jobs_runner_status.add_completed_interview(result)
84
+ yield result
85
+
86
+ def _populate_total_interviews(
87
+ self, n: int = 1
88
+ ) -> Generator["Interview", None, None]:
89
+ """Populates self.total_interviews with n copies of each interview.
90
+
91
+ :param n: how many times to run each interview.
92
+ """
93
+ for interview in self.interviews:
94
+ for iteration in range(n):
95
+ if iteration > 0:
96
+ yield interview.duplicate(iteration=iteration, cache=self.cache)
97
+ else:
98
+ interview.cache = self.cache
99
+ yield interview
100
+
101
+ async def run_async(self, cache: Optional[Cache] = None, n: int = 1) -> Results:
102
+ """Used for some other modules that have a non-standard way of running interviews."""
103
+ self.jobs_runner_status = JobsRunnerStatus(self, n=n)
104
+ self.cache = Cache() if cache is None else cache
105
+ data = []
106
+ async for result in self.run_async_generator(cache=self.cache, n=n):
107
+ data.append(result)
108
+ return Results(survey=self.jobs.survey, data=data)
109
+
110
+ def simple_run(self):
111
+ data = asyncio.run(self.run_async())
112
+ return Results(survey=self.jobs.survey, data=data)
113
+
114
+ async def _build_interview_task(
115
+ self,
116
+ *,
117
+ interview: Interview,
118
+ stop_on_exception: bool = False,
119
+ sidecar_model: Optional["LanguageModel"] = None,
120
+ raise_validation_errors: bool = False,
121
+ ) -> "Result":
122
+ """Conducts an interview and returns the result.
123
+
124
+ :param interview: the interview to conduct
125
+ :param stop_on_exception: stops the interview if an exception is raised
126
+ :param sidecar_model: a language model to use in addition to the interview's model
127
+ """
128
+ # the model buckets are used to track usage rates
129
+ model_buckets = self.bucket_collection[interview.model]
130
+
131
+ # get the results of the interview
132
+ answer, valid_results = await interview.async_conduct_interview(
133
+ model_buckets=model_buckets,
134
+ stop_on_exception=stop_on_exception,
135
+ sidecar_model=sidecar_model,
136
+ raise_validation_errors=raise_validation_errors,
137
+ )
138
+
139
+ question_results = {}
140
+ for result in valid_results:
141
+ question_results[result.question_name] = result
142
+
143
+ answer_key_names = list(question_results.keys())
144
+
145
+ generated_tokens_dict = {
146
+ k + "_generated_tokens": question_results[k].generated_tokens
147
+ for k in answer_key_names
148
+ }
149
+ comments_dict = {
150
+ k + "_comment": question_results[k].comment for k in answer_key_names
151
+ }
152
+
153
+ # we should have a valid result for each question
154
+ answer_dict = {k: answer[k] for k in answer_key_names}
155
+ assert len(valid_results) == len(answer_key_names)
156
+
157
+ # TODO: move this down into Interview
158
+ question_name_to_prompts = dict({})
159
+ for result in valid_results:
160
+ question_name = result.question_name
161
+ question_name_to_prompts[question_name] = {
162
+ "user_prompt": result.prompts["user_prompt"],
163
+ "system_prompt": result.prompts["system_prompt"],
164
+ }
165
+
166
+ prompt_dictionary = {}
167
+ for answer_key_name in answer_key_names:
168
+ prompt_dictionary[
169
+ answer_key_name + "_user_prompt"
170
+ ] = question_name_to_prompts[answer_key_name]["user_prompt"]
171
+ prompt_dictionary[
172
+ answer_key_name + "_system_prompt"
173
+ ] = question_name_to_prompts[answer_key_name]["system_prompt"]
174
+
175
+ raw_model_results_dictionary = {}
176
+ cache_used_dictionary = {}
177
+ for result in valid_results:
178
+ question_name = result.question_name
179
+ raw_model_results_dictionary[
180
+ question_name + "_raw_model_response"
181
+ ] = result.raw_model_response
182
+ raw_model_results_dictionary[question_name + "_cost"] = result.cost
183
+ one_use_buys = (
184
+ "NA"
185
+ if isinstance(result.cost, str)
186
+ or result.cost == 0
187
+ or result.cost is None
188
+ else 1.0 / result.cost
189
+ )
190
+ raw_model_results_dictionary[question_name + "_one_usd_buys"] = one_use_buys
191
+ cache_used_dictionary[question_name] = result.cache_used
192
+
193
+ result = Result(
194
+ agent=interview.agent,
195
+ scenario=interview.scenario,
196
+ model=interview.model,
197
+ iteration=interview.iteration,
198
+ answer=answer_dict,
199
+ prompt=prompt_dictionary,
200
+ raw_model_response=raw_model_results_dictionary,
201
+ survey=interview.survey,
202
+ generated_tokens=generated_tokens_dict,
203
+ comments_dict=comments_dict,
204
+ cache_used_dict=cache_used_dictionary,
205
+ )
206
+ result.interview_hash = hash(interview)
207
+
208
+ return result
209
+
210
+ @property
211
+ def elapsed_time(self):
212
+ return time.monotonic() - self.start_time
213
+
214
+ def process_results(
215
+ self, raw_results: Results, cache: Cache, print_exceptions: bool
216
+ ):
217
+ interview_lookup = {
218
+ hash(interview): index
219
+ for index, interview in enumerate(self.total_interviews)
220
+ }
221
+ interview_hashes = list(interview_lookup.keys())
222
+
223
+ task_history = TaskHistory(self.total_interviews, include_traceback=False)
224
+
225
+ results = Results(
226
+ survey=self.jobs.survey,
227
+ data=sorted(
228
+ raw_results, key=lambda x: interview_hashes.index(x.interview_hash)
229
+ ),
230
+ task_history=task_history,
231
+ cache=cache,
232
+ )
233
+ results.bucket_collection = self.bucket_collection
234
+
235
+ if results.has_unfixed_exceptions and print_exceptions:
236
+ from edsl.scenarios.FileStore import HTMLFileStore
237
+ from edsl.config import CONFIG
238
+ from edsl.coop.coop import Coop
239
+
240
+ msg = f"Exceptions were raised in {len(results.task_history.indices)} out of {len(self.total_interviews)} interviews.\n"
241
+
242
+ if len(results.task_history.indices) > 5:
243
+ msg += f"Exceptions were raised in the following interviews: {results.task_history.indices}.\n"
244
+
245
+ print(msg)
246
+ # this is where exceptions are opening up
247
+ filepath = results.task_history.html(
248
+ cta="Open report to see details.",
249
+ open_in_browser=True,
250
+ return_link=True,
251
+ )
252
+
253
+ try:
254
+ coop = Coop()
255
+ user_edsl_settings = coop.edsl_settings
256
+ remote_logging = user_edsl_settings["remote_logging"]
257
+ except Exception as e:
258
+ print(e)
259
+ remote_logging = False
260
+
261
+ if remote_logging:
262
+ filestore = HTMLFileStore(filepath)
263
+ coop_details = filestore.push(description="Error report")
264
+ print(coop_details)
265
+
266
+ print("Also see: https://docs.expectedparrot.com/en/latest/exceptions.html")
267
+
268
+ return results
269
+
270
+ @jupyter_nb_handler
271
+ async def run(
272
+ self,
273
+ cache: Union[Cache, False, None],
274
+ n: int = 1,
275
+ stop_on_exception: bool = False,
276
+ progress_bar: bool = False,
277
+ sidecar_model: Optional[LanguageModel] = None,
278
+ print_exceptions: bool = True,
279
+ raise_validation_errors: bool = False,
280
+ ) -> "Coroutine":
281
+ """Runs a collection of interviews, handling both async and sync contexts."""
282
+
283
+ self.results = []
284
+ self.start_time = time.monotonic()
285
+ self.completed = False
286
+ self.cache = cache
287
+ self.sidecar_model = sidecar_model
288
+
289
+ self.jobs_runner_status = JobsRunnerStatus(self, n=n)
290
+
291
+ stop_event = threading.Event()
292
+
293
+ async def process_results(cache):
294
+ """Processes results from interviews."""
295
+ async for result in self.run_async_generator(
296
+ n=n,
297
+ stop_on_exception=stop_on_exception,
298
+ cache=cache,
299
+ sidecar_model=sidecar_model,
300
+ raise_validation_errors=raise_validation_errors,
301
+ ):
302
+ self.results.append(result)
303
+ self.completed = True
304
+
305
+ def run_progress_bar(stop_event):
306
+ """Runs the progress bar in a separate thread."""
307
+ self.jobs_runner_status.update_progress(stop_event)
308
+
309
+ if progress_bar:
310
+ progress_thread = threading.Thread(
311
+ target=run_progress_bar, args=(stop_event,)
312
+ )
313
+ progress_thread.start()
314
+
315
+ exception_to_raise = None
316
+ try:
317
+ with cache as c:
318
+ await process_results(cache=c)
319
+ except KeyboardInterrupt:
320
+ print("Keyboard interrupt received. Stopping gracefully...")
321
+ stop_event.set()
322
+ except Exception as e:
323
+ if stop_on_exception:
324
+ exception_to_raise = e
325
+ stop_event.set()
326
+ finally:
327
+ stop_event.set()
328
+ if progress_bar:
329
+ # self.jobs_runner_status.stop_event.set()
330
+ if progress_thread:
331
+ progress_thread.join()
332
+
333
+ if exception_to_raise:
334
+ raise exception_to_raise
335
+
336
+ return self.process_results(
337
+ raw_results=self.results, cache=cache, print_exceptions=print_exceptions
338
+ )