edsl 0.1.37.dev3__py3-none-any.whl → 0.1.37.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +48 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +855 -804
  7. edsl/agents/AgentList.py +350 -345
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -305
  10. edsl/agents/PromptConstructor.py +353 -312
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -86
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +160 -152
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +290 -238
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/chips.py +95 -0
  45. edsl/conversation/mug_negotiation.py +81 -81
  46. edsl/conversation/next_speaker_utilities.py +93 -93
  47. edsl/coop/PriceFetcher.py +54 -54
  48. edsl/coop/__init__.py +2 -2
  49. edsl/coop/coop.py +958 -824
  50. edsl/coop/utils.py +131 -131
  51. edsl/data/Cache.py +527 -527
  52. edsl/data/CacheEntry.py +228 -228
  53. edsl/data/CacheHandler.py +149 -149
  54. edsl/data/RemoteCacheSync.py +97 -97
  55. edsl/data/SQLiteDict.py +292 -292
  56. edsl/data/__init__.py +4 -4
  57. edsl/data/orm.py +10 -10
  58. edsl/data_transfer_models.py +73 -73
  59. edsl/enums.py +173 -173
  60. edsl/exceptions/BaseException.py +21 -0
  61. edsl/exceptions/__init__.py +54 -50
  62. edsl/exceptions/agents.py +38 -40
  63. edsl/exceptions/configuration.py +16 -16
  64. edsl/exceptions/coop.py +10 -10
  65. edsl/exceptions/data.py +14 -14
  66. edsl/exceptions/general.py +34 -34
  67. edsl/exceptions/jobs.py +33 -33
  68. edsl/exceptions/language_models.py +63 -63
  69. edsl/exceptions/prompts.py +15 -15
  70. edsl/exceptions/questions.py +91 -91
  71. edsl/exceptions/results.py +29 -26
  72. edsl/exceptions/scenarios.py +22 -0
  73. edsl/exceptions/surveys.py +37 -34
  74. edsl/inference_services/AnthropicService.py +87 -87
  75. edsl/inference_services/AwsBedrock.py +120 -115
  76. edsl/inference_services/AzureAI.py +217 -217
  77. edsl/inference_services/DeepInfraService.py +18 -18
  78. edsl/inference_services/GoogleService.py +156 -156
  79. edsl/inference_services/GroqService.py +20 -20
  80. edsl/inference_services/InferenceServiceABC.py +147 -147
  81. edsl/inference_services/InferenceServicesCollection.py +97 -74
  82. edsl/inference_services/MistralAIService.py +123 -123
  83. edsl/inference_services/OllamaService.py +18 -18
  84. edsl/inference_services/OpenAIService.py +224 -224
  85. edsl/inference_services/TestService.py +89 -89
  86. edsl/inference_services/TogetherAIService.py +170 -170
  87. edsl/inference_services/models_available_cache.py +118 -118
  88. edsl/inference_services/rate_limits_cache.py +25 -25
  89. edsl/inference_services/registry.py +39 -39
  90. edsl/inference_services/write_available.py +10 -10
  91. edsl/jobs/Answers.py +56 -56
  92. edsl/jobs/Jobs.py +1347 -1121
  93. edsl/jobs/__init__.py +1 -1
  94. edsl/jobs/buckets/BucketCollection.py +63 -63
  95. edsl/jobs/buckets/ModelBuckets.py +65 -65
  96. edsl/jobs/buckets/TokenBucket.py +248 -248
  97. edsl/jobs/interviews/Interview.py +661 -661
  98. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  99. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -182
  100. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  101. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  102. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  103. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  104. edsl/jobs/interviews/ReportErrors.py +66 -66
  105. edsl/jobs/interviews/interview_status_enum.py +9 -9
  106. edsl/jobs/runners/JobsRunnerAsyncio.py +338 -338
  107. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  108. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  109. edsl/jobs/tasks/TaskCreators.py +64 -64
  110. edsl/jobs/tasks/TaskHistory.py +442 -441
  111. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  112. edsl/jobs/tasks/task_status_enum.py +163 -163
  113. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  114. edsl/jobs/tokens/TokenUsage.py +34 -34
  115. edsl/language_models/KeyLookup.py +30 -0
  116. edsl/language_models/LanguageModel.py +706 -718
  117. edsl/language_models/ModelList.py +102 -102
  118. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  119. edsl/language_models/__init__.py +3 -2
  120. edsl/language_models/fake_openai_call.py +15 -15
  121. edsl/language_models/fake_openai_service.py +61 -61
  122. edsl/language_models/registry.py +137 -137
  123. edsl/language_models/repair.py +156 -156
  124. edsl/language_models/unused/ReplicateBase.py +83 -83
  125. edsl/language_models/utilities.py +64 -64
  126. edsl/notebooks/Notebook.py +259 -259
  127. edsl/notebooks/__init__.py +1 -1
  128. edsl/prompts/Prompt.py +357 -353
  129. edsl/prompts/__init__.py +2 -2
  130. edsl/questions/AnswerValidatorMixin.py +289 -289
  131. edsl/questions/QuestionBase.py +656 -616
  132. edsl/questions/QuestionBaseGenMixin.py +161 -161
  133. edsl/questions/QuestionBasePromptsMixin.py +234 -266
  134. edsl/questions/QuestionBudget.py +227 -227
  135. edsl/questions/QuestionCheckBox.py +359 -359
  136. edsl/questions/QuestionExtract.py +183 -183
  137. edsl/questions/QuestionFreeText.py +114 -114
  138. edsl/questions/QuestionFunctional.py +159 -159
  139. edsl/questions/QuestionList.py +231 -231
  140. edsl/questions/QuestionMultipleChoice.py +286 -286
  141. edsl/questions/QuestionNumerical.py +153 -153
  142. edsl/questions/QuestionRank.py +324 -324
  143. edsl/questions/Quick.py +41 -41
  144. edsl/questions/RegisterQuestionsMeta.py +71 -71
  145. edsl/questions/ResponseValidatorABC.py +174 -174
  146. edsl/questions/SimpleAskMixin.py +73 -73
  147. edsl/questions/__init__.py +26 -26
  148. edsl/questions/compose_questions.py +98 -98
  149. edsl/questions/decorators.py +21 -21
  150. edsl/questions/derived/QuestionLikertFive.py +76 -76
  151. edsl/questions/derived/QuestionLinearScale.py +87 -87
  152. edsl/questions/derived/QuestionTopK.py +91 -91
  153. edsl/questions/derived/QuestionYesNo.py +82 -82
  154. edsl/questions/descriptors.py +413 -418
  155. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  156. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  157. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  158. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  159. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  160. edsl/questions/prompt_templates/question_list.jinja +17 -17
  161. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  162. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  163. edsl/questions/question_registry.py +147 -147
  164. edsl/questions/settings.py +12 -12
  165. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  167. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  168. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  169. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  170. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  171. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  172. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  173. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  174. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  175. edsl/questions/templates/list/question_presentation.jinja +5 -5
  176. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  177. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  178. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  179. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  180. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  181. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  182. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  183. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  184. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  185. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  186. edsl/results/Dataset.py +293 -293
  187. edsl/results/DatasetExportMixin.py +717 -693
  188. edsl/results/DatasetTree.py +145 -145
  189. edsl/results/Result.py +450 -435
  190. edsl/results/Results.py +1071 -1160
  191. edsl/results/ResultsDBMixin.py +238 -238
  192. edsl/results/ResultsExportMixin.py +43 -43
  193. edsl/results/ResultsFetchMixin.py +33 -33
  194. edsl/results/ResultsGGMixin.py +121 -121
  195. edsl/results/ResultsToolsMixin.py +98 -98
  196. edsl/results/Selector.py +135 -118
  197. edsl/results/__init__.py +2 -2
  198. edsl/results/tree_explore.py +115 -115
  199. edsl/scenarios/FileStore.py +458 -458
  200. edsl/scenarios/Scenario.py +546 -510
  201. edsl/scenarios/ScenarioHtmlMixin.py +64 -59
  202. edsl/scenarios/ScenarioList.py +1112 -1101
  203. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  204. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  205. edsl/scenarios/__init__.py +4 -4
  206. edsl/shared.py +1 -1
  207. edsl/study/ObjectEntry.py +173 -173
  208. edsl/study/ProofOfWork.py +113 -113
  209. edsl/study/SnapShot.py +80 -80
  210. edsl/study/Study.py +528 -528
  211. edsl/study/__init__.py +4 -4
  212. edsl/surveys/DAG.py +148 -148
  213. edsl/surveys/Memory.py +31 -31
  214. edsl/surveys/MemoryPlan.py +244 -244
  215. edsl/surveys/Rule.py +330 -324
  216. edsl/surveys/RuleCollection.py +387 -387
  217. edsl/surveys/Survey.py +1795 -1772
  218. edsl/surveys/SurveyCSS.py +261 -261
  219. edsl/surveys/SurveyExportMixin.py +259 -259
  220. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  221. edsl/surveys/SurveyQualtricsImport.py +284 -284
  222. edsl/surveys/__init__.py +3 -3
  223. edsl/surveys/base.py +53 -53
  224. edsl/surveys/descriptors.py +56 -56
  225. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  226. edsl/surveys/instructions/Instruction.py +51 -51
  227. edsl/surveys/instructions/InstructionCollection.py +77 -77
  228. edsl/templates/error_reporting/base.html +23 -23
  229. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  230. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  231. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  232. edsl/templates/error_reporting/interview_details.html +115 -115
  233. edsl/templates/error_reporting/interviews.html +9 -9
  234. edsl/templates/error_reporting/overview.html +4 -4
  235. edsl/templates/error_reporting/performance_plot.html +1 -1
  236. edsl/templates/error_reporting/report.css +73 -73
  237. edsl/templates/error_reporting/report.html +117 -117
  238. edsl/templates/error_reporting/report.js +25 -25
  239. edsl/tools/__init__.py +1 -1
  240. edsl/tools/clusters.py +192 -192
  241. edsl/tools/embeddings.py +27 -27
  242. edsl/tools/embeddings_plotting.py +118 -118
  243. edsl/tools/plotting.py +112 -112
  244. edsl/tools/summarize.py +18 -18
  245. edsl/utilities/SystemInfo.py +28 -28
  246. edsl/utilities/__init__.py +22 -22
  247. edsl/utilities/ast_utilities.py +25 -25
  248. edsl/utilities/data/Registry.py +6 -6
  249. edsl/utilities/data/__init__.py +1 -1
  250. edsl/utilities/data/scooter_results.json +1 -1
  251. edsl/utilities/decorators.py +77 -77
  252. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  253. edsl/utilities/interface.py +627 -627
  254. edsl/utilities/repair_functions.py +28 -28
  255. edsl/utilities/restricted_python.py +70 -70
  256. edsl/utilities/utilities.py +409 -391
  257. {edsl-0.1.37.dev3.dist-info → edsl-0.1.37.dev5.dist-info}/LICENSE +21 -21
  258. {edsl-0.1.37.dev3.dist-info → edsl-0.1.37.dev5.dist-info}/METADATA +1 -1
  259. edsl-0.1.37.dev5.dist-info/RECORD +283 -0
  260. edsl-0.1.37.dev3.dist-info/RECORD +0 -279
  261. {edsl-0.1.37.dev3.dist-info → edsl-0.1.37.dev5.dist-info}/WHEEL +0 -0
@@ -1,242 +1,242 @@
1
- import asyncio
2
- from typing import Callable, Union, List
3
- from collections import UserList, UserDict
4
-
5
- from edsl.jobs.buckets import ModelBuckets
6
- from edsl.exceptions import InterviewErrorPriorTaskCanceled
7
-
8
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
9
- from edsl.jobs.tasks.task_status_enum import TaskStatus, TaskStatusDescriptor
10
- from edsl.jobs.tasks.TaskStatusLog import TaskStatusLog
11
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
12
- from edsl.jobs.tokens.TokenUsage import TokenUsage
13
- from edsl.jobs.Answers import Answers
14
- from edsl.questions.QuestionBase import QuestionBase
15
-
16
-
17
- class TokensUsed(UserDict):
18
- """ "Container for tokens used by a task."""
19
-
20
- def __init__(self, cached_tokens, new_tokens):
21
- d = {"cached_tokens": cached_tokens, "new_tokens": new_tokens}
22
- super().__init__(d)
23
-
24
-
25
- class QuestionTaskCreator(UserList):
26
- """Class to create and manage a single question and its dependencies.
27
- The class is an instance of a UserList of tasks that must be completed before the focal task can be run.
28
-
29
- It is a UserList with all the tasks that must be completed before the focal task can be run.
30
- The focal task is the question that we are interested in answering.
31
- """
32
-
33
- task_status = TaskStatusDescriptor()
34
-
35
- def __init__(
36
- self,
37
- *,
38
- question: QuestionBase,
39
- answer_question_func: Callable,
40
- model_buckets: ModelBuckets,
41
- token_estimator: Union[Callable, None] = None,
42
- iteration: int = 0,
43
- ):
44
- """Initialize the QuestionTaskCreator instance.
45
-
46
- :param question: the question that we are interested in answering.
47
- :param answer_question_func: the function that will answer the question.
48
- :param model_buckets: the bucket collection that contains the requests and tokens buckets which control the rate of API calls and token usage.
49
- :param token_estimator: a function that estimates the number of tokens required to answer the question.
50
- :param iteration: the iteration number of the question.
51
-
52
- """
53
- super().__init__([])
54
- # answer_question_func is the 'interview.answer_question_and_record_task" method
55
- self.answer_question_func = answer_question_func
56
- self.question = question
57
- self.iteration = iteration
58
-
59
- self.model_buckets = model_buckets
60
- self.requests_bucket = self.model_buckets.requests_bucket
61
- self.tokens_bucket = self.model_buckets.tokens_bucket
62
- self.status_log = TaskStatusLog()
63
-
64
- def fake_token_estimator(question):
65
- return 1
66
-
67
- self.token_estimator = token_estimator or fake_token_estimator
68
-
69
- # Assume that the task is *not* from the cache until we know otherwise; the _run_focal_task might flip this bit later.
70
- self.from_cache = False
71
-
72
- self.cached_token_usage = TokenUsage(from_cache=True)
73
- self.new_token_usage = TokenUsage(from_cache=False)
74
- self.task_status = TaskStatus.NOT_STARTED
75
-
76
- def add_dependency(self, task: asyncio.Task) -> None:
77
- """Adds a task dependency to the list of dependencies.
78
-
79
- >>> qt1 = QuestionTaskCreator.example()
80
- >>> qt2 = QuestionTaskCreator.example()
81
- >>> qt2.add_dependency(qt1)
82
- >>> len(qt2)
83
- 1
84
- """
85
- self.append(task)
86
-
87
- def generate_task(self) -> asyncio.Task:
88
- """Create a task that depends on the passed-in dependencies."""
89
- task = asyncio.create_task(
90
- self._run_task_async(), name=self.question.question_name
91
- )
92
- task.depends_on = [t.get_name() for t in self]
93
- return task
94
-
95
- def estimated_tokens(self) -> int:
96
- """Estimates the number of tokens that will be required to run the focal task."""
97
- return self.token_estimator(self.question)
98
-
99
- def token_usage(self) -> TokensUsed:
100
- """Returns the token usage for the task.
101
-
102
- >>> qt = QuestionTaskCreator.example()
103
- >>> answers = asyncio.run(qt._run_focal_task())
104
- >>> qt.token_usage()
105
- {'cached_tokens': TokenUsage(from_cache=True, prompt_tokens=0, completion_tokens=0), 'new_tokens': TokenUsage(from_cache=False, prompt_tokens=0, completion_tokens=0)}
106
- """
107
- return TokensUsed(
108
- cached_tokens=self.cached_token_usage, new_tokens=self.new_token_usage
109
- )
110
-
111
- async def _run_focal_task(self) -> Answers:
112
- """Run the focal task i.e., the question that we are interested in answering.
113
-
114
- It is only called after all the dependency tasks are completed.
115
-
116
- >>> qt = QuestionTaskCreator.example()
117
- >>> answers = asyncio.run(qt._run_focal_task())
118
- >>> answers.answer
119
- 'This is an example answer'
120
- """
121
-
122
- requested_tokens = self.estimated_tokens()
123
- if (estimated_wait_time := self.tokens_bucket.wait_time(requested_tokens)) > 0:
124
- self.task_status = TaskStatus.WAITING_FOR_TOKEN_CAPACITY
125
-
126
- await self.tokens_bucket.get_tokens(requested_tokens)
127
-
128
- if (estimated_wait_time := self.requests_bucket.wait_time(1)) > 0:
129
- self.waiting = True # do we need this?
130
- self.task_status = TaskStatus.WAITING_FOR_REQUEST_CAPACITY
131
-
132
- await self.requests_bucket.get_tokens(1, cheat_bucket_capacity=True)
133
-
134
- self.task_status = TaskStatus.API_CALL_IN_PROGRESS
135
- try:
136
- results = await self.answer_question_func(
137
- question=self.question, task=None # self
138
- )
139
- self.task_status = TaskStatus.SUCCESS
140
- except Exception as e:
141
- self.task_status = TaskStatus.FAILED
142
- raise e
143
-
144
- if results.cache_used:
145
- self.tokens_bucket.add_tokens(requested_tokens)
146
- self.requests_bucket.add_tokens(1)
147
- self.from_cache = True
148
- # Turbo mode means that we don't wait for tokens or requests.
149
- self.tokens_bucket.turbo_mode_on()
150
- self.requests_bucket.turbo_mode_on()
151
- else:
152
- self.tokens_bucket.turbo_mode_off()
153
- self.requests_bucket.turbo_mode_off()
154
-
155
- return results
156
-
157
- @classmethod
158
- def example(cls):
159
- """Return an example instance of the class."""
160
- from edsl import QuestionFreeText
161
- from edsl.jobs.buckets.ModelBuckets import ModelBuckets
162
-
163
- m = ModelBuckets.infinity_bucket()
164
-
165
- from collections import namedtuple
166
-
167
- AnswerDict = namedtuple("AnswerDict", ["answer", "cache_used"])
168
- answer = AnswerDict(answer="This is an example answer", cache_used=False)
169
-
170
- async def answer_question_func(question, task):
171
- return answer
172
-
173
- return cls(
174
- question=QuestionFreeText.example(),
175
- answer_question_func=answer_question_func,
176
- model_buckets=m,
177
- token_estimator=None,
178
- iteration=0,
179
- )
180
-
181
- async def _run_task_async(self) -> None:
182
- """Run the task asynchronously, awaiting the tasks that must be completed before this one can be run.
183
-
184
- >>> qt1 = QuestionTaskCreator.example()
185
- >>> qt2 = QuestionTaskCreator.example()
186
- >>> qt2.add_dependency(qt1)
187
-
188
- The method follows these steps:
189
- 1. Set the task_status to TaskStatus.WAITING_FOR_DEPENDENCIES, indicating that the task is waiting for its dependencies to complete.
190
- 2. Await asyncio.gather(*self, return_exceptions=True) to run all the dependent tasks concurrently.
191
-
192
- - the return_exceptions=True flag ensures that the task does not raise an exception if any of the dependencies fail.
193
-
194
- 3. If any of the dependencies raise an exception:
195
- - If it is a CancelledError, set the current task's task_status to TaskStatus.CANCELLED, and re-raise the CancelledError,
196
- terminating the execution of the current task.
197
- - If it is any other exception, set the task_status to TaskStatus.PARENT_FAILED, and raise a custom exception
198
- InterviewErrorPriorTaskCanceled with the original exception as the cause, terminating the execution of the current task.
199
- 4. If all the dependencies complete successfully without raising any exceptions, the code reaches the else block.
200
- 5. In the else block, run the focal task (self._run_focal_task(debug)).
201
-
202
- If any of the dependencies fail (raise an exception), the focal task will not run. The execution will be terminated,
203
- and an exception will be raised to indicate the failure of the dependencies.
204
-
205
- The focal task (self._run_focal_task(debug)) is only executed if all the dependencies complete successfully.
206
-
207
- Args:
208
- debug: A boolean value indicating whether to run the task in debug mode.
209
-
210
- Returns:
211
- None
212
- """
213
- try:
214
- self.task_status = TaskStatus.WAITING_FOR_DEPENDENCIES
215
- # If this were set to 'return_exceptions=False', then the first exception would be raised immediately.
216
- # and it would cancel all the other tasks. This is not the behavior we want.
217
-
218
- gather_results = await asyncio.gather(*self, return_exceptions=True)
219
-
220
- for result in gather_results:
221
- if isinstance(result, Exception):
222
- raise result
223
-
224
- except asyncio.CancelledError:
225
- self.task_status = TaskStatus.CANCELLED
226
- raise
227
- except Exception as e:
228
- # one of the dependencies failed
229
- self.task_status = TaskStatus.PARENT_FAILED
230
- # turns the parent exception into a custom exception so the task gets canceled but this InterviewErrorPriorTaskCanceled exception
231
- raise InterviewErrorPriorTaskCanceled(
232
- f"Required tasks failed for {self.question.question_name}"
233
- ) from e
234
-
235
- # this only runs if all the dependencies are successful
236
- return await self._run_focal_task()
237
-
238
-
239
- if __name__ == "__main__":
240
- import doctest
241
-
242
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ import asyncio
2
+ from typing import Callable, Union, List
3
+ from collections import UserList, UserDict
4
+
5
+ from edsl.jobs.buckets import ModelBuckets
6
+ from edsl.exceptions import InterviewErrorPriorTaskCanceled
7
+
8
+ from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
9
+ from edsl.jobs.tasks.task_status_enum import TaskStatus, TaskStatusDescriptor
10
+ from edsl.jobs.tasks.TaskStatusLog import TaskStatusLog
11
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
12
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
13
+ from edsl.jobs.Answers import Answers
14
+ from edsl.questions.QuestionBase import QuestionBase
15
+
16
+
17
+ class TokensUsed(UserDict):
18
+ """ "Container for tokens used by a task."""
19
+
20
+ def __init__(self, cached_tokens, new_tokens):
21
+ d = {"cached_tokens": cached_tokens, "new_tokens": new_tokens}
22
+ super().__init__(d)
23
+
24
+
25
+ class QuestionTaskCreator(UserList):
26
+ """Class to create and manage a single question and its dependencies.
27
+ The class is an instance of a UserList of tasks that must be completed before the focal task can be run.
28
+
29
+ It is a UserList with all the tasks that must be completed before the focal task can be run.
30
+ The focal task is the question that we are interested in answering.
31
+ """
32
+
33
+ task_status = TaskStatusDescriptor()
34
+
35
+ def __init__(
36
+ self,
37
+ *,
38
+ question: QuestionBase,
39
+ answer_question_func: Callable,
40
+ model_buckets: ModelBuckets,
41
+ token_estimator: Union[Callable, None] = None,
42
+ iteration: int = 0,
43
+ ):
44
+ """Initialize the QuestionTaskCreator instance.
45
+
46
+ :param question: the question that we are interested in answering.
47
+ :param answer_question_func: the function that will answer the question.
48
+ :param model_buckets: the bucket collection that contains the requests and tokens buckets which control the rate of API calls and token usage.
49
+ :param token_estimator: a function that estimates the number of tokens required to answer the question.
50
+ :param iteration: the iteration number of the question.
51
+
52
+ """
53
+ super().__init__([])
54
+ # answer_question_func is the 'interview.answer_question_and_record_task" method
55
+ self.answer_question_func = answer_question_func
56
+ self.question = question
57
+ self.iteration = iteration
58
+
59
+ self.model_buckets = model_buckets
60
+ self.requests_bucket = self.model_buckets.requests_bucket
61
+ self.tokens_bucket = self.model_buckets.tokens_bucket
62
+ self.status_log = TaskStatusLog()
63
+
64
+ def fake_token_estimator(question):
65
+ return 1
66
+
67
+ self.token_estimator = token_estimator or fake_token_estimator
68
+
69
+ # Assume that the task is *not* from the cache until we know otherwise; the _run_focal_task might flip this bit later.
70
+ self.from_cache = False
71
+
72
+ self.cached_token_usage = TokenUsage(from_cache=True)
73
+ self.new_token_usage = TokenUsage(from_cache=False)
74
+ self.task_status = TaskStatus.NOT_STARTED
75
+
76
+ def add_dependency(self, task: asyncio.Task) -> None:
77
+ """Adds a task dependency to the list of dependencies.
78
+
79
+ >>> qt1 = QuestionTaskCreator.example()
80
+ >>> qt2 = QuestionTaskCreator.example()
81
+ >>> qt2.add_dependency(qt1)
82
+ >>> len(qt2)
83
+ 1
84
+ """
85
+ self.append(task)
86
+
87
+ def generate_task(self) -> asyncio.Task:
88
+ """Create a task that depends on the passed-in dependencies."""
89
+ task = asyncio.create_task(
90
+ self._run_task_async(), name=self.question.question_name
91
+ )
92
+ task.depends_on = [t.get_name() for t in self]
93
+ return task
94
+
95
+ def estimated_tokens(self) -> int:
96
+ """Estimates the number of tokens that will be required to run the focal task."""
97
+ return self.token_estimator(self.question)
98
+
99
+ def token_usage(self) -> TokensUsed:
100
+ """Returns the token usage for the task.
101
+
102
+ >>> qt = QuestionTaskCreator.example()
103
+ >>> answers = asyncio.run(qt._run_focal_task())
104
+ >>> qt.token_usage()
105
+ {'cached_tokens': TokenUsage(from_cache=True, prompt_tokens=0, completion_tokens=0), 'new_tokens': TokenUsage(from_cache=False, prompt_tokens=0, completion_tokens=0)}
106
+ """
107
+ return TokensUsed(
108
+ cached_tokens=self.cached_token_usage, new_tokens=self.new_token_usage
109
+ )
110
+
111
+ async def _run_focal_task(self) -> Answers:
112
+ """Run the focal task i.e., the question that we are interested in answering.
113
+
114
+ It is only called after all the dependency tasks are completed.
115
+
116
+ >>> qt = QuestionTaskCreator.example()
117
+ >>> answers = asyncio.run(qt._run_focal_task())
118
+ >>> answers.answer
119
+ 'This is an example answer'
120
+ """
121
+
122
+ requested_tokens = self.estimated_tokens()
123
+ if (estimated_wait_time := self.tokens_bucket.wait_time(requested_tokens)) > 0:
124
+ self.task_status = TaskStatus.WAITING_FOR_TOKEN_CAPACITY
125
+
126
+ await self.tokens_bucket.get_tokens(requested_tokens)
127
+
128
+ if (estimated_wait_time := self.requests_bucket.wait_time(1)) > 0:
129
+ self.waiting = True # do we need this?
130
+ self.task_status = TaskStatus.WAITING_FOR_REQUEST_CAPACITY
131
+
132
+ await self.requests_bucket.get_tokens(1, cheat_bucket_capacity=True)
133
+
134
+ self.task_status = TaskStatus.API_CALL_IN_PROGRESS
135
+ try:
136
+ results = await self.answer_question_func(
137
+ question=self.question, task=None # self
138
+ )
139
+ self.task_status = TaskStatus.SUCCESS
140
+ except Exception as e:
141
+ self.task_status = TaskStatus.FAILED
142
+ raise e
143
+
144
+ if results.cache_used:
145
+ self.tokens_bucket.add_tokens(requested_tokens)
146
+ self.requests_bucket.add_tokens(1)
147
+ self.from_cache = True
148
+ # Turbo mode means that we don't wait for tokens or requests.
149
+ self.tokens_bucket.turbo_mode_on()
150
+ self.requests_bucket.turbo_mode_on()
151
+ else:
152
+ self.tokens_bucket.turbo_mode_off()
153
+ self.requests_bucket.turbo_mode_off()
154
+
155
+ return results
156
+
157
+ @classmethod
158
+ def example(cls):
159
+ """Return an example instance of the class."""
160
+ from edsl import QuestionFreeText
161
+ from edsl.jobs.buckets.ModelBuckets import ModelBuckets
162
+
163
+ m = ModelBuckets.infinity_bucket()
164
+
165
+ from collections import namedtuple
166
+
167
+ AnswerDict = namedtuple("AnswerDict", ["answer", "cache_used"])
168
+ answer = AnswerDict(answer="This is an example answer", cache_used=False)
169
+
170
+ async def answer_question_func(question, task):
171
+ return answer
172
+
173
+ return cls(
174
+ question=QuestionFreeText.example(),
175
+ answer_question_func=answer_question_func,
176
+ model_buckets=m,
177
+ token_estimator=None,
178
+ iteration=0,
179
+ )
180
+
181
+ async def _run_task_async(self) -> None:
182
+ """Run the task asynchronously, awaiting the tasks that must be completed before this one can be run.
183
+
184
+ >>> qt1 = QuestionTaskCreator.example()
185
+ >>> qt2 = QuestionTaskCreator.example()
186
+ >>> qt2.add_dependency(qt1)
187
+
188
+ The method follows these steps:
189
+ 1. Set the task_status to TaskStatus.WAITING_FOR_DEPENDENCIES, indicating that the task is waiting for its dependencies to complete.
190
+ 2. Await asyncio.gather(*self, return_exceptions=True) to run all the dependent tasks concurrently.
191
+
192
+ - the return_exceptions=True flag ensures that the task does not raise an exception if any of the dependencies fail.
193
+
194
+ 3. If any of the dependencies raise an exception:
195
+ - If it is a CancelledError, set the current task's task_status to TaskStatus.CANCELLED, and re-raise the CancelledError,
196
+ terminating the execution of the current task.
197
+ - If it is any other exception, set the task_status to TaskStatus.PARENT_FAILED, and raise a custom exception
198
+ InterviewErrorPriorTaskCanceled with the original exception as the cause, terminating the execution of the current task.
199
+ 4. If all the dependencies complete successfully without raising any exceptions, the code reaches the else block.
200
+ 5. In the else block, run the focal task (self._run_focal_task(debug)).
201
+
202
+ If any of the dependencies fail (raise an exception), the focal task will not run. The execution will be terminated,
203
+ and an exception will be raised to indicate the failure of the dependencies.
204
+
205
+ The focal task (self._run_focal_task(debug)) is only executed if all the dependencies complete successfully.
206
+
207
+ Args:
208
+ debug: A boolean value indicating whether to run the task in debug mode.
209
+
210
+ Returns:
211
+ None
212
+ """
213
+ try:
214
+ self.task_status = TaskStatus.WAITING_FOR_DEPENDENCIES
215
+ # If this were set to 'return_exceptions=False', then the first exception would be raised immediately.
216
+ # and it would cancel all the other tasks. This is not the behavior we want.
217
+
218
+ gather_results = await asyncio.gather(*self, return_exceptions=True)
219
+
220
+ for result in gather_results:
221
+ if isinstance(result, Exception):
222
+ raise result
223
+
224
+ except asyncio.CancelledError:
225
+ self.task_status = TaskStatus.CANCELLED
226
+ raise
227
+ except Exception as e:
228
+ # one of the dependencies failed
229
+ self.task_status = TaskStatus.PARENT_FAILED
230
+ # turns the parent exception into a custom exception so the task gets canceled but this InterviewErrorPriorTaskCanceled exception
231
+ raise InterviewErrorPriorTaskCanceled(
232
+ f"Required tasks failed for {self.question.question_name}"
233
+ ) from e
234
+
235
+ # this only runs if all the dependencies are successful
236
+ return await self._run_focal_task()
237
+
238
+
239
+ if __name__ == "__main__":
240
+ import doctest
241
+
242
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,64 +1,64 @@
1
- from typing import Callable, Union, List
2
- from collections import UserDict
3
-
4
- from edsl.jobs.tokens.TokenUsage import TokenUsage
5
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
6
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
7
-
8
-
9
- class TaskCreators(UserDict):
10
- """A dictionary of task creators. A task is one question being answered.
11
-
12
- This is used to track the status of the tasks within an interview.
13
- """
14
-
15
- def __init__(self, *args, **kwargs):
16
- super().__init__(*args, **kwargs)
17
-
18
- @property
19
- def token_usage(self) -> InterviewTokenUsage:
20
- """Determines how many tokens were used for the interview.
21
-
22
- This is iterates through all tasks that make up an interview.
23
- For each task, it determines how many tokens were used and whether they were cached or new.
24
- It then sums the total number of cached and new tokens used for the interview.
25
-
26
- """
27
- cached_tokens = TokenUsage(from_cache=True)
28
- new_tokens = TokenUsage(from_cache=False)
29
- for task_creator in self.values():
30
- token_usage = task_creator.token_usage()
31
- cached_tokens += token_usage["cached_tokens"]
32
- new_tokens += token_usage["new_tokens"]
33
- return InterviewTokenUsage(
34
- new_token_usage=new_tokens, cached_token_usage=cached_tokens
35
- )
36
-
37
- def print(self) -> None:
38
- from rich import print
39
-
40
- print({task.get_name(): task.task_status for task in self.values()})
41
-
42
- @property
43
- def interview_status(self) -> InterviewStatusDictionary:
44
- """Returns a dictionary, InterviewStatusDictionary, mapping task status codes to counts of tasks in that state.
45
-
46
- >>> t = TaskCreators()
47
- >>> t.interview_status
48
- InterviewStatusDictionary({<TaskStatus.NOT_STARTED: 1>: 0, <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>: 0, <TaskStatus.CANCELLED: 3>: 0, <TaskStatus.PARENT_FAILED: 4>: 0, <TaskStatus.WAITING_FOR_REQUEST_CAPACITY: 5>: 0, <TaskStatus.WAITING_FOR_TOKEN_CAPACITY: 6>: 0, <TaskStatus.API_CALL_IN_PROGRESS: 7>: 0, <TaskStatus.SUCCESS: 8>: 0, <TaskStatus.FAILED: 9>: 0, 'number_from_cache': 0})
49
- """
50
- status_dict = InterviewStatusDictionary()
51
- for task_creator in self.values():
52
- status_dict[task_creator.task_status] += 1
53
- status_dict["number_from_cache"] += task_creator.from_cache
54
- return status_dict
55
-
56
- def status_logs(self):
57
- """Returns a list of status logs for each task."""
58
- return [task_creator.status_log for task_creator in self.values()]
59
-
60
-
61
- if __name__ == "__main__":
62
- import doctest
63
-
64
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from typing import Callable, Union, List
2
+ from collections import UserDict
3
+
4
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
5
+ from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
6
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
7
+
8
+
9
+ class TaskCreators(UserDict):
10
+ """A dictionary of task creators. A task is one question being answered.
11
+
12
+ This is used to track the status of the tasks within an interview.
13
+ """
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+
18
+ @property
19
+ def token_usage(self) -> InterviewTokenUsage:
20
+ """Determines how many tokens were used for the interview.
21
+
22
+ This is iterates through all tasks that make up an interview.
23
+ For each task, it determines how many tokens were used and whether they were cached or new.
24
+ It then sums the total number of cached and new tokens used for the interview.
25
+
26
+ """
27
+ cached_tokens = TokenUsage(from_cache=True)
28
+ new_tokens = TokenUsage(from_cache=False)
29
+ for task_creator in self.values():
30
+ token_usage = task_creator.token_usage()
31
+ cached_tokens += token_usage["cached_tokens"]
32
+ new_tokens += token_usage["new_tokens"]
33
+ return InterviewTokenUsage(
34
+ new_token_usage=new_tokens, cached_token_usage=cached_tokens
35
+ )
36
+
37
+ def print(self) -> None:
38
+ from rich import print
39
+
40
+ print({task.get_name(): task.task_status for task in self.values()})
41
+
42
+ @property
43
+ def interview_status(self) -> InterviewStatusDictionary:
44
+ """Returns a dictionary, InterviewStatusDictionary, mapping task status codes to counts of tasks in that state.
45
+
46
+ >>> t = TaskCreators()
47
+ >>> t.interview_status
48
+ InterviewStatusDictionary({<TaskStatus.NOT_STARTED: 1>: 0, <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>: 0, <TaskStatus.CANCELLED: 3>: 0, <TaskStatus.PARENT_FAILED: 4>: 0, <TaskStatus.WAITING_FOR_REQUEST_CAPACITY: 5>: 0, <TaskStatus.WAITING_FOR_TOKEN_CAPACITY: 6>: 0, <TaskStatus.API_CALL_IN_PROGRESS: 7>: 0, <TaskStatus.SUCCESS: 8>: 0, <TaskStatus.FAILED: 9>: 0, 'number_from_cache': 0})
49
+ """
50
+ status_dict = InterviewStatusDictionary()
51
+ for task_creator in self.values():
52
+ status_dict[task_creator.task_status] += 1
53
+ status_dict["number_from_cache"] += task_creator.from_cache
54
+ return status_dict
55
+
56
+ def status_logs(self):
57
+ """Returns a list of status logs for each task."""
58
+ return [task_creator.status_log for task_creator in self.values()]
59
+
60
+
61
+ if __name__ == "__main__":
62
+ import doctest
63
+
64
+ doctest.testmod(optionflags=doctest.ELLIPSIS)