edsl 0.1.36.dev2__py3-none-any.whl → 0.1.36.dev6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (257) hide show
  1. edsl/Base.py +303 -298
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +47 -47
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +804 -800
  7. edsl/agents/AgentList.py +337 -337
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +294 -294
  10. edsl/agents/PromptConstructor.py +312 -311
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +86 -86
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +152 -152
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +238 -238
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/mug_negotiation.py +81 -81
  45. edsl/conversation/next_speaker_utilities.py +93 -93
  46. edsl/coop/PriceFetcher.py +54 -58
  47. edsl/coop/__init__.py +2 -2
  48. edsl/coop/coop.py +849 -815
  49. edsl/coop/utils.py +131 -131
  50. edsl/data/Cache.py +527 -527
  51. edsl/data/CacheEntry.py +228 -228
  52. edsl/data/CacheHandler.py +149 -149
  53. edsl/data/RemoteCacheSync.py +84 -0
  54. edsl/data/SQLiteDict.py +292 -292
  55. edsl/data/__init__.py +4 -4
  56. edsl/data/orm.py +10 -10
  57. edsl/data_transfer_models.py +73 -73
  58. edsl/enums.py +173 -173
  59. edsl/exceptions/__init__.py +50 -50
  60. edsl/exceptions/agents.py +40 -40
  61. edsl/exceptions/configuration.py +16 -16
  62. edsl/exceptions/coop.py +10 -2
  63. edsl/exceptions/data.py +14 -14
  64. edsl/exceptions/general.py +34 -34
  65. edsl/exceptions/jobs.py +33 -33
  66. edsl/exceptions/language_models.py +63 -63
  67. edsl/exceptions/prompts.py +15 -15
  68. edsl/exceptions/questions.py +91 -91
  69. edsl/exceptions/results.py +26 -26
  70. edsl/exceptions/surveys.py +34 -34
  71. edsl/inference_services/AnthropicService.py +87 -87
  72. edsl/inference_services/AwsBedrock.py +115 -115
  73. edsl/inference_services/AzureAI.py +217 -217
  74. edsl/inference_services/DeepInfraService.py +18 -18
  75. edsl/inference_services/GoogleService.py +156 -156
  76. edsl/inference_services/GroqService.py +20 -20
  77. edsl/inference_services/InferenceServiceABC.py +147 -119
  78. edsl/inference_services/InferenceServicesCollection.py +72 -68
  79. edsl/inference_services/MistralAIService.py +123 -123
  80. edsl/inference_services/OllamaService.py +18 -18
  81. edsl/inference_services/OpenAIService.py +224 -224
  82. edsl/inference_services/TestService.py +89 -89
  83. edsl/inference_services/TogetherAIService.py +170 -170
  84. edsl/inference_services/models_available_cache.py +118 -94
  85. edsl/inference_services/rate_limits_cache.py +25 -25
  86. edsl/inference_services/registry.py +39 -39
  87. edsl/inference_services/write_available.py +10 -10
  88. edsl/jobs/Answers.py +56 -56
  89. edsl/jobs/Jobs.py +1112 -1089
  90. edsl/jobs/__init__.py +1 -1
  91. edsl/jobs/buckets/BucketCollection.py +63 -63
  92. edsl/jobs/buckets/ModelBuckets.py +65 -65
  93. edsl/jobs/buckets/TokenBucket.py +248 -248
  94. edsl/jobs/interviews/Interview.py +651 -633
  95. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -90
  96. edsl/jobs/interviews/InterviewExceptionEntry.py +182 -164
  97. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  98. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  99. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  100. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  101. edsl/jobs/interviews/ReportErrors.py +66 -66
  102. edsl/jobs/interviews/interview_status_enum.py +9 -9
  103. edsl/jobs/runners/JobsRunnerAsyncio.py +337 -343
  104. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  105. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  106. edsl/jobs/tasks/TaskCreators.py +64 -64
  107. edsl/jobs/tasks/TaskHistory.py +441 -425
  108. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  109. edsl/jobs/tasks/task_status_enum.py +163 -163
  110. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  111. edsl/jobs/tokens/TokenUsage.py +34 -34
  112. edsl/language_models/LanguageModel.py +718 -718
  113. edsl/language_models/ModelList.py +102 -102
  114. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  115. edsl/language_models/__init__.py +2 -2
  116. edsl/language_models/fake_openai_call.py +15 -15
  117. edsl/language_models/fake_openai_service.py +61 -61
  118. edsl/language_models/registry.py +137 -137
  119. edsl/language_models/repair.py +156 -156
  120. edsl/language_models/unused/ReplicateBase.py +83 -83
  121. edsl/language_models/utilities.py +64 -64
  122. edsl/notebooks/Notebook.py +259 -259
  123. edsl/notebooks/__init__.py +1 -1
  124. edsl/prompts/Prompt.py +358 -358
  125. edsl/prompts/__init__.py +2 -2
  126. edsl/questions/AnswerValidatorMixin.py +289 -289
  127. edsl/questions/QuestionBase.py +616 -616
  128. edsl/questions/QuestionBaseGenMixin.py +161 -161
  129. edsl/questions/QuestionBasePromptsMixin.py +266 -266
  130. edsl/questions/QuestionBudget.py +227 -227
  131. edsl/questions/QuestionCheckBox.py +359 -359
  132. edsl/questions/QuestionExtract.py +183 -183
  133. edsl/questions/QuestionFreeText.py +113 -113
  134. edsl/questions/QuestionFunctional.py +159 -155
  135. edsl/questions/QuestionList.py +231 -231
  136. edsl/questions/QuestionMultipleChoice.py +286 -286
  137. edsl/questions/QuestionNumerical.py +153 -153
  138. edsl/questions/QuestionRank.py +324 -324
  139. edsl/questions/Quick.py +41 -41
  140. edsl/questions/RegisterQuestionsMeta.py +71 -71
  141. edsl/questions/ResponseValidatorABC.py +174 -174
  142. edsl/questions/SimpleAskMixin.py +73 -73
  143. edsl/questions/__init__.py +26 -26
  144. edsl/questions/compose_questions.py +98 -98
  145. edsl/questions/decorators.py +21 -21
  146. edsl/questions/derived/QuestionLikertFive.py +76 -76
  147. edsl/questions/derived/QuestionLinearScale.py +87 -87
  148. edsl/questions/derived/QuestionTopK.py +91 -91
  149. edsl/questions/derived/QuestionYesNo.py +82 -82
  150. edsl/questions/descriptors.py +418 -418
  151. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  152. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  153. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  154. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  155. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  156. edsl/questions/prompt_templates/question_list.jinja +17 -17
  157. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  158. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  159. edsl/questions/question_registry.py +147 -147
  160. edsl/questions/settings.py +12 -12
  161. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  162. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  163. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  164. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  165. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  167. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  168. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  169. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  170. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  171. edsl/questions/templates/list/question_presentation.jinja +5 -5
  172. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  173. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  174. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  176. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  177. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  178. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  179. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  180. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  181. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  182. edsl/results/Dataset.py +293 -281
  183. edsl/results/DatasetExportMixin.py +693 -693
  184. edsl/results/DatasetTree.py +145 -145
  185. edsl/results/Result.py +433 -431
  186. edsl/results/Results.py +1158 -1146
  187. edsl/results/ResultsDBMixin.py +238 -238
  188. edsl/results/ResultsExportMixin.py +43 -43
  189. edsl/results/ResultsFetchMixin.py +33 -33
  190. edsl/results/ResultsGGMixin.py +121 -121
  191. edsl/results/ResultsToolsMixin.py +98 -98
  192. edsl/results/Selector.py +118 -118
  193. edsl/results/__init__.py +2 -2
  194. edsl/results/tree_explore.py +115 -115
  195. edsl/scenarios/FileStore.py +443 -443
  196. edsl/scenarios/Scenario.py +507 -496
  197. edsl/scenarios/ScenarioHtmlMixin.py +59 -59
  198. edsl/scenarios/ScenarioList.py +1101 -1101
  199. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  200. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  201. edsl/scenarios/__init__.py +2 -2
  202. edsl/shared.py +1 -1
  203. edsl/study/ObjectEntry.py +173 -173
  204. edsl/study/ProofOfWork.py +113 -113
  205. edsl/study/SnapShot.py +80 -80
  206. edsl/study/Study.py +528 -528
  207. edsl/study/__init__.py +4 -4
  208. edsl/surveys/DAG.py +148 -148
  209. edsl/surveys/Memory.py +31 -31
  210. edsl/surveys/MemoryPlan.py +244 -244
  211. edsl/surveys/Rule.py +324 -324
  212. edsl/surveys/RuleCollection.py +387 -387
  213. edsl/surveys/Survey.py +1772 -1769
  214. edsl/surveys/SurveyCSS.py +261 -261
  215. edsl/surveys/SurveyExportMixin.py +259 -259
  216. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  217. edsl/surveys/SurveyQualtricsImport.py +284 -284
  218. edsl/surveys/__init__.py +3 -3
  219. edsl/surveys/base.py +53 -53
  220. edsl/surveys/descriptors.py +56 -56
  221. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  222. edsl/surveys/instructions/Instruction.py +51 -34
  223. edsl/surveys/instructions/InstructionCollection.py +77 -77
  224. edsl/templates/error_reporting/base.html +23 -23
  225. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  226. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  227. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  228. edsl/templates/error_reporting/interview_details.html +115 -115
  229. edsl/templates/error_reporting/interviews.html +9 -9
  230. edsl/templates/error_reporting/overview.html +4 -4
  231. edsl/templates/error_reporting/performance_plot.html +1 -1
  232. edsl/templates/error_reporting/report.css +73 -73
  233. edsl/templates/error_reporting/report.html +117 -117
  234. edsl/templates/error_reporting/report.js +25 -25
  235. edsl/tools/__init__.py +1 -1
  236. edsl/tools/clusters.py +192 -192
  237. edsl/tools/embeddings.py +27 -27
  238. edsl/tools/embeddings_plotting.py +118 -118
  239. edsl/tools/plotting.py +112 -112
  240. edsl/tools/summarize.py +18 -18
  241. edsl/utilities/SystemInfo.py +28 -28
  242. edsl/utilities/__init__.py +22 -22
  243. edsl/utilities/ast_utilities.py +25 -25
  244. edsl/utilities/data/Registry.py +6 -6
  245. edsl/utilities/data/__init__.py +1 -1
  246. edsl/utilities/data/scooter_results.json +1 -1
  247. edsl/utilities/decorators.py +77 -77
  248. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  249. edsl/utilities/interface.py +627 -627
  250. edsl/utilities/repair_functions.py +28 -28
  251. edsl/utilities/restricted_python.py +70 -70
  252. edsl/utilities/utilities.py +391 -391
  253. {edsl-0.1.36.dev2.dist-info → edsl-0.1.36.dev6.dist-info}/LICENSE +21 -21
  254. {edsl-0.1.36.dev2.dist-info → edsl-0.1.36.dev6.dist-info}/METADATA +1 -1
  255. edsl-0.1.36.dev6.dist-info/RECORD +279 -0
  256. edsl-0.1.36.dev2.dist-info/RECORD +0 -278
  257. {edsl-0.1.36.dev2.dist-info → edsl-0.1.36.dev6.dist-info}/WHEEL +0 -0
@@ -1,343 +1,337 @@
1
- from __future__ import annotations
2
- import time
3
- import math
4
- import asyncio
5
- import functools
6
- import threading
7
- from typing import Coroutine, List, AsyncGenerator, Optional, Union, Generator
8
- from contextlib import contextmanager
9
- from collections import UserList
10
-
11
- from rich.live import Live
12
- from rich.console import Console
13
-
14
- from edsl.results.Results import Results
15
- from edsl import shared_globals
16
- from edsl.jobs.interviews.Interview import Interview
17
- from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus
18
-
19
- from edsl.jobs.tasks.TaskHistory import TaskHistory
20
- from edsl.jobs.buckets.BucketCollection import BucketCollection
21
- from edsl.utilities.decorators import jupyter_nb_handler
22
- from edsl.data.Cache import Cache
23
- from edsl.results.Result import Result
24
- from edsl.results.Results import Results
25
- from edsl.language_models.LanguageModel import LanguageModel
26
- from edsl.data.Cache import Cache
27
-
28
-
29
- class StatusTracker(UserList):
30
- def __init__(self, total_tasks: int):
31
- self.total_tasks = total_tasks
32
- super().__init__()
33
-
34
- def current_status(self):
35
- return print(f"Completed: {len(self.data)} of {self.total_tasks}", end="\r")
36
-
37
-
38
- class JobsRunnerAsyncio:
39
- """A class for running a collection of interviews asynchronously.
40
-
41
- It gets instaniated from a Jobs object.
42
- The Jobs object is a collection of interviews that are to be run.
43
- """
44
-
45
- def __init__(self, jobs: "Jobs"):
46
- self.jobs = jobs
47
- self.interviews: List["Interview"] = jobs.interviews()
48
- self.bucket_collection: "BucketCollection" = jobs.bucket_collection
49
- self.total_interviews: List["Interview"] = []
50
-
51
- # self.jobs_runner_status = JobsRunnerStatus(self, n=1)
52
-
53
- async def run_async_generator(
54
- self,
55
- cache: Cache,
56
- n: int = 1,
57
- stop_on_exception: bool = False,
58
- sidecar_model: Optional[LanguageModel] = None,
59
- total_interviews: Optional[List["Interview"]] = None,
60
- raise_validation_errors: bool = False,
61
- ) -> AsyncGenerator["Result", None]:
62
- """Creates the tasks, runs them asynchronously, and returns the results as a Results object.
63
-
64
- Completed tasks are yielded as they are completed.
65
-
66
- :param n: how many times to run each interview
67
- :param stop_on_exception: Whether to stop the interview if an exception is raised
68
- :param sidecar_model: a language model to use in addition to the interview's model
69
- :param total_interviews: A list of interviews to run can be provided instead.
70
- :param raise_validation_errors: Whether to raise validation errors
71
- """
72
- tasks = []
73
- if total_interviews: # was already passed in total interviews
74
- self.total_interviews = total_interviews
75
- else:
76
- self.total_interviews = list(
77
- self._populate_total_interviews(n=n)
78
- ) # Populate self.total_interviews before creating tasks
79
-
80
- for interview in self.total_interviews:
81
- interviewing_task = self._build_interview_task(
82
- interview=interview,
83
- stop_on_exception=stop_on_exception,
84
- sidecar_model=sidecar_model,
85
- raise_validation_errors=raise_validation_errors,
86
- )
87
- tasks.append(asyncio.create_task(interviewing_task))
88
-
89
- for task in asyncio.as_completed(tasks):
90
- result = await task
91
- self.jobs_runner_status.add_completed_interview(result)
92
- yield result
93
-
94
- def _populate_total_interviews(
95
- self, n: int = 1
96
- ) -> Generator["Interview", None, None]:
97
- """Populates self.total_interviews with n copies of each interview.
98
-
99
- :param n: how many times to run each interview.
100
- """
101
- for interview in self.interviews:
102
- for iteration in range(n):
103
- if iteration > 0:
104
- yield interview.duplicate(iteration=iteration, cache=self.cache)
105
- else:
106
- interview.cache = self.cache
107
- yield interview
108
-
109
- async def run_async(self, cache: Optional[Cache] = None, n: int = 1) -> Results:
110
- """Used for some other modules that have a non-standard way of running interviews."""
111
- self.jobs_runner_status = JobsRunnerStatus(self, n=n)
112
- self.cache = Cache() if cache is None else cache
113
- data = []
114
- async for result in self.run_async_generator(cache=self.cache, n=n):
115
- data.append(result)
116
- return Results(survey=self.jobs.survey, data=data)
117
-
118
- def simple_run(self):
119
- data = asyncio.run(self.run_async())
120
- return Results(survey=self.jobs.survey, data=data)
121
-
122
- async def _build_interview_task(
123
- self,
124
- *,
125
- interview: Interview,
126
- stop_on_exception: bool = False,
127
- sidecar_model: Optional["LanguageModel"] = None,
128
- raise_validation_errors: bool = False,
129
- ) -> "Result":
130
- """Conducts an interview and returns the result.
131
-
132
- :param interview: the interview to conduct
133
- :param stop_on_exception: stops the interview if an exception is raised
134
- :param sidecar_model: a language model to use in addition to the interview's model
135
- """
136
- # the model buckets are used to track usage rates
137
- model_buckets = self.bucket_collection[interview.model]
138
-
139
- # get the results of the interview
140
- answer, valid_results = await interview.async_conduct_interview(
141
- model_buckets=model_buckets,
142
- stop_on_exception=stop_on_exception,
143
- sidecar_model=sidecar_model,
144
- raise_validation_errors=raise_validation_errors,
145
- )
146
-
147
- question_results = {}
148
- for result in valid_results:
149
- question_results[result.question_name] = result
150
-
151
- answer_key_names = list(question_results.keys())
152
-
153
- generated_tokens_dict = {
154
- k + "_generated_tokens": question_results[k].generated_tokens
155
- for k in answer_key_names
156
- }
157
- comments_dict = {
158
- k + "_comment": question_results[k].comment for k in answer_key_names
159
- }
160
-
161
- # we should have a valid result for each question
162
- answer_dict = {k: answer[k] for k in answer_key_names}
163
- assert len(valid_results) == len(answer_key_names)
164
-
165
- # TODO: move this down into Interview
166
- question_name_to_prompts = dict({})
167
- for result in valid_results:
168
- question_name = result.question_name
169
- question_name_to_prompts[question_name] = {
170
- "user_prompt": result.prompts["user_prompt"],
171
- "system_prompt": result.prompts["system_prompt"],
172
- }
173
-
174
- prompt_dictionary = {}
175
- for answer_key_name in answer_key_names:
176
- prompt_dictionary[
177
- answer_key_name + "_user_prompt"
178
- ] = question_name_to_prompts[answer_key_name]["user_prompt"]
179
- prompt_dictionary[
180
- answer_key_name + "_system_prompt"
181
- ] = question_name_to_prompts[answer_key_name]["system_prompt"]
182
-
183
- raw_model_results_dictionary = {}
184
- for result in valid_results:
185
- question_name = result.question_name
186
- raw_model_results_dictionary[
187
- question_name + "_raw_model_response"
188
- ] = result.raw_model_response
189
- raw_model_results_dictionary[question_name + "_cost"] = result.cost
190
- one_use_buys = (
191
- "NA"
192
- if isinstance(result.cost, str)
193
- or result.cost == 0
194
- or result.cost is None
195
- else 1.0 / result.cost
196
- )
197
- raw_model_results_dictionary[question_name + "_one_usd_buys"] = one_use_buys
198
-
199
- result = Result(
200
- agent=interview.agent,
201
- scenario=interview.scenario,
202
- model=interview.model,
203
- iteration=interview.iteration,
204
- answer=answer_dict,
205
- prompt=prompt_dictionary,
206
- raw_model_response=raw_model_results_dictionary,
207
- survey=interview.survey,
208
- generated_tokens=generated_tokens_dict,
209
- comments_dict=comments_dict,
210
- )
211
- result.interview_hash = hash(interview)
212
-
213
- return result
214
-
215
- @property
216
- def elapsed_time(self):
217
- return time.monotonic() - self.start_time
218
-
219
- def process_results(
220
- self, raw_results: Results, cache: Cache, print_exceptions: bool
221
- ):
222
- interview_lookup = {
223
- hash(interview): index
224
- for index, interview in enumerate(self.total_interviews)
225
- }
226
- interview_hashes = list(interview_lookup.keys())
227
-
228
- results = Results(
229
- survey=self.jobs.survey,
230
- data=sorted(
231
- raw_results, key=lambda x: interview_hashes.index(x.interview_hash)
232
- ),
233
- )
234
- results.cache = cache
235
- results.task_history = TaskHistory(
236
- self.total_interviews, include_traceback=False
237
- )
238
- results.has_unfixed_exceptions = results.task_history.has_unfixed_exceptions
239
- results.bucket_collection = self.bucket_collection
240
-
241
- if results.has_unfixed_exceptions and print_exceptions:
242
- from edsl.scenarios.FileStore import HTMLFileStore
243
- from edsl.config import CONFIG
244
- from edsl.coop.coop import Coop
245
-
246
- msg = f"Exceptions were raised in {len(results.task_history.indices)} out of {len(self.total_interviews)} interviews.\n"
247
-
248
- if len(results.task_history.indices) > 5:
249
- msg += f"Exceptions were raised in the following interviews: {results.task_history.indices}.\n"
250
-
251
- print(msg)
252
- # this is where exceptions are opening up
253
- filepath = results.task_history.html(
254
- cta="Open report to see details.",
255
- open_in_browser=True,
256
- return_link=True,
257
- )
258
-
259
- try:
260
- coop = Coop()
261
- user_edsl_settings = coop.edsl_settings
262
- remote_logging = user_edsl_settings["remote_logging"]
263
- except Exception as e:
264
- print(e)
265
- remote_logging = False
266
- if remote_logging:
267
- filestore = HTMLFileStore(filepath)
268
- coop_details = filestore.push(description="Error report")
269
- print(coop_details)
270
-
271
- print("Also see: https://docs.expectedparrot.com/en/latest/exceptions.html")
272
-
273
- return results
274
-
275
- @jupyter_nb_handler
276
- async def run(
277
- self,
278
- cache: Union[Cache, False, None],
279
- n: int = 1,
280
- stop_on_exception: bool = False,
281
- progress_bar: bool = False,
282
- sidecar_model: Optional[LanguageModel] = None,
283
- print_exceptions: bool = True,
284
- raise_validation_errors: bool = False,
285
- ) -> "Coroutine":
286
- """Runs a collection of interviews, handling both async and sync contexts."""
287
-
288
- self.results = []
289
- self.start_time = time.monotonic()
290
- self.completed = False
291
- self.cache = cache
292
- self.sidecar_model = sidecar_model
293
-
294
- self.jobs_runner_status = JobsRunnerStatus(self, n=n)
295
-
296
- stop_event = threading.Event()
297
-
298
- async def process_results(cache):
299
- """Processes results from interviews."""
300
- async for result in self.run_async_generator(
301
- n=n,
302
- stop_on_exception=stop_on_exception,
303
- cache=cache,
304
- sidecar_model=sidecar_model,
305
- raise_validation_errors=raise_validation_errors,
306
- ):
307
- self.results.append(result)
308
- self.completed = True
309
-
310
- def run_progress_bar(stop_event):
311
- """Runs the progress bar in a separate thread."""
312
- self.jobs_runner_status.update_progress(stop_event)
313
-
314
- if progress_bar:
315
- progress_thread = threading.Thread(
316
- target=run_progress_bar, args=(stop_event,)
317
- )
318
- progress_thread.start()
319
-
320
- exception_to_raise = None
321
- try:
322
- with cache as c:
323
- await process_results(cache=c)
324
- except KeyboardInterrupt:
325
- print("Keyboard interrupt received. Stopping gracefully...")
326
- stop_event.set()
327
- except Exception as e:
328
- if stop_on_exception:
329
- exception_to_raise = e
330
- stop_event.set()
331
- finally:
332
- stop_event.set()
333
- if progress_bar:
334
- # self.jobs_runner_status.stop_event.set()
335
- if progress_thread:
336
- progress_thread.join()
337
-
338
- if exception_to_raise:
339
- raise exception_to_raise
340
-
341
- return self.process_results(
342
- raw_results=self.results, cache=cache, print_exceptions=print_exceptions
343
- )
1
+ from __future__ import annotations
2
+ import time
3
+ import asyncio
4
+ import threading
5
+ from typing import Coroutine, List, AsyncGenerator, Optional, Union, Generator
6
+ from contextlib import contextmanager
7
+ from collections import UserList
8
+
9
+ from edsl.results.Results import Results
10
+ from edsl.jobs.interviews.Interview import Interview
11
+ from edsl.jobs.runners.JobsRunnerStatus import JobsRunnerStatus
12
+
13
+ from edsl.jobs.tasks.TaskHistory import TaskHistory
14
+ from edsl.jobs.buckets.BucketCollection import BucketCollection
15
+ from edsl.utilities.decorators import jupyter_nb_handler
16
+ from edsl.data.Cache import Cache
17
+ from edsl.results.Result import Result
18
+ from edsl.results.Results import Results
19
+ from edsl.language_models.LanguageModel import LanguageModel
20
+ from edsl.data.Cache import Cache
21
+
22
+ class StatusTracker(UserList):
23
+ def __init__(self, total_tasks: int):
24
+ self.total_tasks = total_tasks
25
+ super().__init__()
26
+
27
+ def current_status(self):
28
+ return print(f"Completed: {len(self.data)} of {self.total_tasks}", end="\r")
29
+
30
+
31
+ class JobsRunnerAsyncio:
32
+ """A class for running a collection of interviews asynchronously.
33
+
34
+ It gets instaniated from a Jobs object.
35
+ The Jobs object is a collection of interviews that are to be run.
36
+ """
37
+
38
+ def __init__(self, jobs: "Jobs"):
39
+ self.jobs = jobs
40
+ self.interviews: List["Interview"] = jobs.interviews()
41
+ self.bucket_collection: "BucketCollection" = jobs.bucket_collection
42
+ self.total_interviews: List["Interview"] = []
43
+
44
+ async def run_async_generator(
45
+ self,
46
+ cache: Cache,
47
+ n: int = 1,
48
+ stop_on_exception: bool = False,
49
+ sidecar_model: Optional[LanguageModel] = None,
50
+ total_interviews: Optional[List["Interview"]] = None,
51
+ raise_validation_errors: bool = False,
52
+ ) -> AsyncGenerator["Result", None]:
53
+ """Creates the tasks, runs them asynchronously, and returns the results as a Results object.
54
+
55
+ Completed tasks are yielded as they are completed.
56
+
57
+ :param n: how many times to run each interview
58
+ :param stop_on_exception: Whether to stop the interview if an exception is raised
59
+ :param sidecar_model: a language model to use in addition to the interview's model
60
+ :param total_interviews: A list of interviews to run can be provided instead.
61
+ :param raise_validation_errors: Whether to raise validation errors
62
+ """
63
+ tasks = []
64
+ if total_interviews: # was already passed in total interviews
65
+ self.total_interviews = total_interviews
66
+ else:
67
+ self.total_interviews = list(
68
+ self._populate_total_interviews(n=n)
69
+ ) # Populate self.total_interviews before creating tasks
70
+
71
+ for interview in self.total_interviews:
72
+ interviewing_task = self._build_interview_task(
73
+ interview=interview,
74
+ stop_on_exception=stop_on_exception,
75
+ sidecar_model=sidecar_model,
76
+ raise_validation_errors=raise_validation_errors,
77
+ )
78
+ tasks.append(asyncio.create_task(interviewing_task))
79
+
80
+ for task in asyncio.as_completed(tasks):
81
+ result = await task
82
+ self.jobs_runner_status.add_completed_interview(result)
83
+ yield result
84
+
85
+ def _populate_total_interviews(
86
+ self, n: int = 1
87
+ ) -> Generator["Interview", None, None]:
88
+ """Populates self.total_interviews with n copies of each interview.
89
+
90
+ :param n: how many times to run each interview.
91
+ """
92
+ for interview in self.interviews:
93
+ for iteration in range(n):
94
+ if iteration > 0:
95
+ yield interview.duplicate(iteration=iteration, cache=self.cache)
96
+ else:
97
+ interview.cache = self.cache
98
+ yield interview
99
+
100
+ async def run_async(self, cache: Optional[Cache] = None, n: int = 1) -> Results:
101
+ """Used for some other modules that have a non-standard way of running interviews."""
102
+ self.jobs_runner_status = JobsRunnerStatus(self, n=n)
103
+ self.cache = Cache() if cache is None else cache
104
+ data = []
105
+ async for result in self.run_async_generator(cache=self.cache, n=n):
106
+ data.append(result)
107
+ return Results(survey=self.jobs.survey, data=data)
108
+
109
+ def simple_run(self):
110
+ data = asyncio.run(self.run_async())
111
+ return Results(survey=self.jobs.survey, data=data)
112
+
113
+ async def _build_interview_task(
114
+ self,
115
+ *,
116
+ interview: Interview,
117
+ stop_on_exception: bool = False,
118
+ sidecar_model: Optional["LanguageModel"] = None,
119
+ raise_validation_errors: bool = False,
120
+ ) -> "Result":
121
+ """Conducts an interview and returns the result.
122
+
123
+ :param interview: the interview to conduct
124
+ :param stop_on_exception: stops the interview if an exception is raised
125
+ :param sidecar_model: a language model to use in addition to the interview's model
126
+ """
127
+ # the model buckets are used to track usage rates
128
+ model_buckets = self.bucket_collection[interview.model]
129
+
130
+ # get the results of the interview
131
+ answer, valid_results = await interview.async_conduct_interview(
132
+ model_buckets=model_buckets,
133
+ stop_on_exception=stop_on_exception,
134
+ sidecar_model=sidecar_model,
135
+ raise_validation_errors=raise_validation_errors,
136
+ )
137
+
138
+ question_results = {}
139
+ for result in valid_results:
140
+ question_results[result.question_name] = result
141
+
142
+ answer_key_names = list(question_results.keys())
143
+
144
+ generated_tokens_dict = {
145
+ k + "_generated_tokens": question_results[k].generated_tokens
146
+ for k in answer_key_names
147
+ }
148
+ comments_dict = {
149
+ k + "_comment": question_results[k].comment for k in answer_key_names
150
+ }
151
+
152
+ # we should have a valid result for each question
153
+ answer_dict = {k: answer[k] for k in answer_key_names}
154
+ assert len(valid_results) == len(answer_key_names)
155
+
156
+ # TODO: move this down into Interview
157
+ question_name_to_prompts = dict({})
158
+ for result in valid_results:
159
+ question_name = result.question_name
160
+ question_name_to_prompts[question_name] = {
161
+ "user_prompt": result.prompts["user_prompt"],
162
+ "system_prompt": result.prompts["system_prompt"],
163
+ }
164
+
165
+ prompt_dictionary = {}
166
+ for answer_key_name in answer_key_names:
167
+ prompt_dictionary[answer_key_name + "_user_prompt"] = (
168
+ question_name_to_prompts[answer_key_name]["user_prompt"]
169
+ )
170
+ prompt_dictionary[answer_key_name + "_system_prompt"] = (
171
+ question_name_to_prompts[answer_key_name]["system_prompt"]
172
+ )
173
+
174
+ raw_model_results_dictionary = {}
175
+ cache_used_dictionary = {}
176
+ for result in valid_results:
177
+ question_name = result.question_name
178
+ raw_model_results_dictionary[question_name + "_raw_model_response"] = (
179
+ result.raw_model_response
180
+ )
181
+ raw_model_results_dictionary[question_name + "_cost"] = result.cost
182
+ one_use_buys = (
183
+ "NA"
184
+ if isinstance(result.cost, str)
185
+ or result.cost == 0
186
+ or result.cost is None
187
+ else 1.0 / result.cost
188
+ )
189
+ raw_model_results_dictionary[question_name + "_one_usd_buys"] = one_use_buys
190
+ cache_used_dictionary[question_name] = result.cache_used
191
+
192
+ result = Result(
193
+ agent=interview.agent,
194
+ scenario=interview.scenario,
195
+ model=interview.model,
196
+ iteration=interview.iteration,
197
+ answer=answer_dict,
198
+ prompt=prompt_dictionary,
199
+ raw_model_response=raw_model_results_dictionary,
200
+ survey=interview.survey,
201
+ generated_tokens=generated_tokens_dict,
202
+ comments_dict=comments_dict,
203
+ cache_used_dict=cache_used_dictionary,
204
+ )
205
+ result.interview_hash = hash(interview)
206
+
207
+ return result
208
+
209
+ @property
210
+ def elapsed_time(self):
211
+ return time.monotonic() - self.start_time
212
+
213
+ def process_results(
214
+ self, raw_results: Results, cache: Cache, print_exceptions: bool
215
+ ):
216
+ interview_lookup = {
217
+ hash(interview): index
218
+ for index, interview in enumerate(self.total_interviews)
219
+ }
220
+ interview_hashes = list(interview_lookup.keys())
221
+
222
+ task_history = TaskHistory(self.total_interviews, include_traceback=False)
223
+
224
+ results = Results(
225
+ survey=self.jobs.survey,
226
+ data=sorted(
227
+ raw_results, key=lambda x: interview_hashes.index(x.interview_hash)
228
+ ),
229
+ task_history=task_history,
230
+ cache=cache,
231
+ )
232
+ results.bucket_collection = self.bucket_collection
233
+
234
+ if results.has_unfixed_exceptions and print_exceptions:
235
+ from edsl.scenarios.FileStore import HTMLFileStore
236
+ from edsl.config import CONFIG
237
+ from edsl.coop.coop import Coop
238
+
239
+ msg = f"Exceptions were raised in {len(results.task_history.indices)} out of {len(self.total_interviews)} interviews.\n"
240
+
241
+ if len(results.task_history.indices) > 5:
242
+ msg += f"Exceptions were raised in the following interviews: {results.task_history.indices}.\n"
243
+
244
+ print(msg)
245
+ # this is where exceptions are opening up
246
+ filepath = results.task_history.html(
247
+ cta="Open report to see details.",
248
+ open_in_browser=True,
249
+ return_link=True,
250
+ )
251
+
252
+ try:
253
+ coop = Coop()
254
+ user_edsl_settings = coop.edsl_settings
255
+ remote_logging = user_edsl_settings["remote_logging"]
256
+ except Exception as e:
257
+ print(e)
258
+ remote_logging = False
259
+
260
+ if remote_logging:
261
+ filestore = HTMLFileStore(filepath)
262
+ coop_details = filestore.push(description="Error report")
263
+ print(coop_details)
264
+
265
+ print("Also see: https://docs.expectedparrot.com/en/latest/exceptions.html")
266
+
267
+ return results
268
+
269
+ @jupyter_nb_handler
270
+ async def run(
271
+ self,
272
+ cache: Union[Cache, False, None],
273
+ n: int = 1,
274
+ stop_on_exception: bool = False,
275
+ progress_bar: bool = False,
276
+ sidecar_model: Optional[LanguageModel] = None,
277
+ print_exceptions: bool = True,
278
+ raise_validation_errors: bool = False,
279
+ ) -> "Coroutine":
280
+ """Runs a collection of interviews, handling both async and sync contexts."""
281
+
282
+ self.results = []
283
+ self.start_time = time.monotonic()
284
+ self.completed = False
285
+ self.cache = cache
286
+ self.sidecar_model = sidecar_model
287
+
288
+ self.jobs_runner_status = JobsRunnerStatus(self, n=n)
289
+
290
+ stop_event = threading.Event()
291
+
292
+ async def process_results(cache):
293
+ """Processes results from interviews."""
294
+ async for result in self.run_async_generator(
295
+ n=n,
296
+ stop_on_exception=stop_on_exception,
297
+ cache=cache,
298
+ sidecar_model=sidecar_model,
299
+ raise_validation_errors=raise_validation_errors,
300
+ ):
301
+ self.results.append(result)
302
+ self.completed = True
303
+
304
+ def run_progress_bar(stop_event):
305
+ """Runs the progress bar in a separate thread."""
306
+ self.jobs_runner_status.update_progress(stop_event)
307
+
308
+ if progress_bar:
309
+ progress_thread = threading.Thread(
310
+ target=run_progress_bar, args=(stop_event,)
311
+ )
312
+ progress_thread.start()
313
+
314
+ exception_to_raise = None
315
+ try:
316
+ with cache as c:
317
+ await process_results(cache=c)
318
+ except KeyboardInterrupt:
319
+ print("Keyboard interrupt received. Stopping gracefully...")
320
+ stop_event.set()
321
+ except Exception as e:
322
+ if stop_on_exception:
323
+ exception_to_raise = e
324
+ stop_event.set()
325
+ finally:
326
+ stop_event.set()
327
+ if progress_bar:
328
+ # self.jobs_runner_status.stop_event.set()
329
+ if progress_thread:
330
+ progress_thread.join()
331
+
332
+ if exception_to_raise:
333
+ raise exception_to_raise
334
+
335
+ return self.process_results(
336
+ raw_results=self.results, cache=cache, print_exceptions=print_exceptions
337
+ )