edsl 0.1.39.dev1__py3-none-any.whl → 0.1.39.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (256) hide show
  1. edsl/Base.py +332 -332
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +867 -867
  7. edsl/agents/AgentList.py +413 -413
  8. edsl/agents/Invigilator.py +233 -233
  9. edsl/agents/InvigilatorBase.py +270 -265
  10. edsl/agents/PromptConstructor.py +354 -354
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +157 -157
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +1028 -1028
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +555 -555
  37. edsl/data/CacheEntry.py +233 -233
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +78 -78
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +175 -175
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -42
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +148 -148
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/PerplexityService.py +163 -163
  72. edsl/inference_services/TestService.py +89 -89
  73. edsl/inference_services/TogetherAIService.py +170 -170
  74. edsl/inference_services/models_available_cache.py +118 -118
  75. edsl/inference_services/rate_limits_cache.py +25 -25
  76. edsl/inference_services/registry.py +41 -41
  77. edsl/inference_services/write_available.py +10 -10
  78. edsl/jobs/Answers.py +56 -56
  79. edsl/jobs/Jobs.py +898 -898
  80. edsl/jobs/JobsChecks.py +147 -147
  81. edsl/jobs/JobsPrompts.py +268 -268
  82. edsl/jobs/JobsRemoteInferenceHandler.py +239 -239
  83. edsl/jobs/__init__.py +1 -1
  84. edsl/jobs/buckets/BucketCollection.py +63 -63
  85. edsl/jobs/buckets/ModelBuckets.py +65 -65
  86. edsl/jobs/buckets/TokenBucket.py +251 -251
  87. edsl/jobs/interviews/Interview.py +661 -661
  88. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  89. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  90. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  91. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  92. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  93. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  94. edsl/jobs/interviews/ReportErrors.py +66 -66
  95. edsl/jobs/interviews/interview_status_enum.py +9 -9
  96. edsl/jobs/runners/JobsRunnerAsyncio.py +466 -466
  97. edsl/jobs/runners/JobsRunnerStatus.py +330 -330
  98. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  99. edsl/jobs/tasks/TaskCreators.py +64 -64
  100. edsl/jobs/tasks/TaskHistory.py +450 -450
  101. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  102. edsl/jobs/tasks/task_status_enum.py +163 -163
  103. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  104. edsl/jobs/tokens/TokenUsage.py +34 -34
  105. edsl/language_models/KeyLookup.py +30 -30
  106. edsl/language_models/LanguageModel.py +668 -668
  107. edsl/language_models/ModelList.py +155 -155
  108. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  109. edsl/language_models/__init__.py +3 -3
  110. edsl/language_models/fake_openai_call.py +15 -15
  111. edsl/language_models/fake_openai_service.py +61 -61
  112. edsl/language_models/registry.py +190 -190
  113. edsl/language_models/repair.py +156 -156
  114. edsl/language_models/unused/ReplicateBase.py +83 -83
  115. edsl/language_models/utilities.py +64 -64
  116. edsl/notebooks/Notebook.py +258 -258
  117. edsl/notebooks/__init__.py +1 -1
  118. edsl/prompts/Prompt.py +362 -362
  119. edsl/prompts/__init__.py +2 -2
  120. edsl/questions/AnswerValidatorMixin.py +289 -289
  121. edsl/questions/QuestionBase.py +664 -664
  122. edsl/questions/QuestionBaseGenMixin.py +161 -161
  123. edsl/questions/QuestionBasePromptsMixin.py +217 -217
  124. edsl/questions/QuestionBudget.py +227 -227
  125. edsl/questions/QuestionCheckBox.py +359 -359
  126. edsl/questions/QuestionExtract.py +182 -182
  127. edsl/questions/QuestionFreeText.py +114 -114
  128. edsl/questions/QuestionFunctional.py +166 -166
  129. edsl/questions/QuestionList.py +231 -231
  130. edsl/questions/QuestionMultipleChoice.py +286 -286
  131. edsl/questions/QuestionNumerical.py +153 -153
  132. edsl/questions/QuestionRank.py +324 -324
  133. edsl/questions/Quick.py +41 -41
  134. edsl/questions/RegisterQuestionsMeta.py +71 -71
  135. edsl/questions/ResponseValidatorABC.py +174 -174
  136. edsl/questions/SimpleAskMixin.py +73 -73
  137. edsl/questions/__init__.py +26 -26
  138. edsl/questions/compose_questions.py +98 -98
  139. edsl/questions/decorators.py +21 -21
  140. edsl/questions/derived/QuestionLikertFive.py +76 -76
  141. edsl/questions/derived/QuestionLinearScale.py +87 -87
  142. edsl/questions/derived/QuestionTopK.py +93 -93
  143. edsl/questions/derived/QuestionYesNo.py +82 -82
  144. edsl/questions/descriptors.py +413 -413
  145. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  146. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  147. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  148. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  149. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  150. edsl/questions/prompt_templates/question_list.jinja +17 -17
  151. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  152. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  153. edsl/questions/question_registry.py +177 -177
  154. edsl/questions/settings.py +12 -12
  155. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  157. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  158. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  159. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  160. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  161. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  162. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  163. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  164. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  165. edsl/questions/templates/list/question_presentation.jinja +5 -5
  166. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  167. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  168. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  169. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  170. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  171. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  172. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  173. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  174. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  176. edsl/results/CSSParameterizer.py +108 -108
  177. edsl/results/Dataset.py +424 -424
  178. edsl/results/DatasetExportMixin.py +731 -731
  179. edsl/results/DatasetTree.py +275 -275
  180. edsl/results/Result.py +465 -465
  181. edsl/results/Results.py +1165 -1165
  182. edsl/results/ResultsDBMixin.py +238 -238
  183. edsl/results/ResultsExportMixin.py +43 -43
  184. edsl/results/ResultsFetchMixin.py +33 -33
  185. edsl/results/ResultsGGMixin.py +121 -121
  186. edsl/results/ResultsToolsMixin.py +98 -98
  187. edsl/results/Selector.py +135 -135
  188. edsl/results/TableDisplay.py +198 -198
  189. edsl/results/__init__.py +2 -2
  190. edsl/results/table_display.css +77 -77
  191. edsl/results/tree_explore.py +115 -115
  192. edsl/scenarios/FileStore.py +632 -632
  193. edsl/scenarios/Scenario.py +601 -601
  194. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  195. edsl/scenarios/ScenarioJoin.py +127 -127
  196. edsl/scenarios/ScenarioList.py +1287 -1287
  197. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  198. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  199. edsl/scenarios/__init__.py +4 -4
  200. edsl/shared.py +1 -1
  201. edsl/study/ObjectEntry.py +173 -173
  202. edsl/study/ProofOfWork.py +113 -113
  203. edsl/study/SnapShot.py +80 -80
  204. edsl/study/Study.py +528 -528
  205. edsl/study/__init__.py +4 -4
  206. edsl/surveys/DAG.py +148 -148
  207. edsl/surveys/Memory.py +31 -31
  208. edsl/surveys/MemoryPlan.py +244 -244
  209. edsl/surveys/Rule.py +326 -326
  210. edsl/surveys/RuleCollection.py +387 -387
  211. edsl/surveys/Survey.py +1801 -1801
  212. edsl/surveys/SurveyCSS.py +261 -261
  213. edsl/surveys/SurveyExportMixin.py +259 -259
  214. edsl/surveys/SurveyFlowVisualizationMixin.py +179 -179
  215. edsl/surveys/SurveyQualtricsImport.py +284 -284
  216. edsl/surveys/__init__.py +3 -3
  217. edsl/surveys/base.py +53 -53
  218. edsl/surveys/descriptors.py +56 -56
  219. edsl/surveys/instructions/ChangeInstruction.py +49 -49
  220. edsl/surveys/instructions/Instruction.py +65 -65
  221. edsl/surveys/instructions/InstructionCollection.py +77 -77
  222. edsl/templates/error_reporting/base.html +23 -23
  223. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  224. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  225. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  226. edsl/templates/error_reporting/interview_details.html +115 -115
  227. edsl/templates/error_reporting/interviews.html +19 -19
  228. edsl/templates/error_reporting/overview.html +4 -4
  229. edsl/templates/error_reporting/performance_plot.html +1 -1
  230. edsl/templates/error_reporting/report.css +73 -73
  231. edsl/templates/error_reporting/report.html +117 -117
  232. edsl/templates/error_reporting/report.js +25 -25
  233. edsl/tools/__init__.py +1 -1
  234. edsl/tools/clusters.py +192 -192
  235. edsl/tools/embeddings.py +27 -27
  236. edsl/tools/embeddings_plotting.py +118 -118
  237. edsl/tools/plotting.py +112 -112
  238. edsl/tools/summarize.py +18 -18
  239. edsl/utilities/SystemInfo.py +28 -28
  240. edsl/utilities/__init__.py +22 -22
  241. edsl/utilities/ast_utilities.py +25 -25
  242. edsl/utilities/data/Registry.py +6 -6
  243. edsl/utilities/data/__init__.py +1 -1
  244. edsl/utilities/data/scooter_results.json +1 -1
  245. edsl/utilities/decorators.py +77 -77
  246. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  247. edsl/utilities/interface.py +627 -627
  248. edsl/utilities/naming_utilities.py +263 -263
  249. edsl/utilities/repair_functions.py +28 -28
  250. edsl/utilities/restricted_python.py +70 -70
  251. edsl/utilities/utilities.py +424 -424
  252. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/LICENSE +21 -21
  253. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/METADATA +1 -1
  254. edsl-0.1.39.dev3.dist-info/RECORD +277 -0
  255. edsl-0.1.39.dev1.dist-info/RECORD +0 -277
  256. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/WHEEL +0 -0
@@ -1,242 +1,242 @@
1
- import asyncio
2
- from typing import Callable, Union, List
3
- from collections import UserList, UserDict
4
-
5
- from edsl.jobs.buckets import ModelBuckets
6
- from edsl.exceptions import InterviewErrorPriorTaskCanceled
7
-
8
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
9
- from edsl.jobs.tasks.task_status_enum import TaskStatus, TaskStatusDescriptor
10
- from edsl.jobs.tasks.TaskStatusLog import TaskStatusLog
11
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
12
- from edsl.jobs.tokens.TokenUsage import TokenUsage
13
- from edsl.jobs.Answers import Answers
14
- from edsl.questions.QuestionBase import QuestionBase
15
-
16
-
17
- class TokensUsed(UserDict):
18
- """ "Container for tokens used by a task."""
19
-
20
- def __init__(self, cached_tokens, new_tokens):
21
- d = {"cached_tokens": cached_tokens, "new_tokens": new_tokens}
22
- super().__init__(d)
23
-
24
-
25
- class QuestionTaskCreator(UserList):
26
- """Class to create and manage a single question and its dependencies.
27
- The class is an instance of a UserList of tasks that must be completed before the focal task can be run.
28
-
29
- It is a UserList with all the tasks that must be completed before the focal task can be run.
30
- The focal task is the question that we are interested in answering.
31
- """
32
-
33
- task_status = TaskStatusDescriptor()
34
-
35
- def __init__(
36
- self,
37
- *,
38
- question: QuestionBase,
39
- answer_question_func: Callable,
40
- model_buckets: ModelBuckets,
41
- token_estimator: Union[Callable, None] = None,
42
- iteration: int = 0,
43
- ):
44
- """Initialize the QuestionTaskCreator instance.
45
-
46
- :param question: the question that we are interested in answering.
47
- :param answer_question_func: the function that will answer the question.
48
- :param model_buckets: the bucket collection that contains the requests and tokens buckets which control the rate of API calls and token usage.
49
- :param token_estimator: a function that estimates the number of tokens required to answer the question.
50
- :param iteration: the iteration number of the question.
51
-
52
- """
53
- super().__init__([])
54
- # answer_question_func is the 'interview.answer_question_and_record_task" method
55
- self.answer_question_func = answer_question_func
56
- self.question = question
57
- self.iteration = iteration
58
-
59
- self.model_buckets = model_buckets
60
- self.requests_bucket = self.model_buckets.requests_bucket
61
- self.tokens_bucket = self.model_buckets.tokens_bucket
62
- self.status_log = TaskStatusLog()
63
-
64
- def fake_token_estimator(question):
65
- return 1
66
-
67
- self.token_estimator = token_estimator or fake_token_estimator
68
-
69
- # Assume that the task is *not* from the cache until we know otherwise; the _run_focal_task might flip this bit later.
70
- self.from_cache = False
71
-
72
- self.cached_token_usage = TokenUsage(from_cache=True)
73
- self.new_token_usage = TokenUsage(from_cache=False)
74
- self.task_status = TaskStatus.NOT_STARTED
75
-
76
- def add_dependency(self, task: asyncio.Task) -> None:
77
- """Adds a task dependency to the list of dependencies.
78
-
79
- >>> qt1 = QuestionTaskCreator.example()
80
- >>> qt2 = QuestionTaskCreator.example()
81
- >>> qt2.add_dependency(qt1)
82
- >>> len(qt2)
83
- 1
84
- """
85
- self.append(task)
86
-
87
- def generate_task(self) -> asyncio.Task:
88
- """Create a task that depends on the passed-in dependencies."""
89
- task = asyncio.create_task(
90
- self._run_task_async(), name=self.question.question_name
91
- )
92
- task.depends_on = [t.get_name() for t in self]
93
- return task
94
-
95
- def estimated_tokens(self) -> int:
96
- """Estimates the number of tokens that will be required to run the focal task."""
97
- return self.token_estimator(self.question)
98
-
99
- def token_usage(self) -> TokensUsed:
100
- """Returns the token usage for the task.
101
-
102
- >>> qt = QuestionTaskCreator.example()
103
- >>> answers = asyncio.run(qt._run_focal_task())
104
- >>> qt.token_usage()
105
- {'cached_tokens': TokenUsage(from_cache=True, prompt_tokens=0, completion_tokens=0), 'new_tokens': TokenUsage(from_cache=False, prompt_tokens=0, completion_tokens=0)}
106
- """
107
- return TokensUsed(
108
- cached_tokens=self.cached_token_usage, new_tokens=self.new_token_usage
109
- )
110
-
111
- async def _run_focal_task(self) -> Answers:
112
- """Run the focal task i.e., the question that we are interested in answering.
113
-
114
- It is only called after all the dependency tasks are completed.
115
-
116
- >>> qt = QuestionTaskCreator.example()
117
- >>> answers = asyncio.run(qt._run_focal_task())
118
- >>> answers.answer
119
- 'This is an example answer'
120
- """
121
-
122
- requested_tokens = self.estimated_tokens()
123
- if (estimated_wait_time := self.tokens_bucket.wait_time(requested_tokens)) > 0:
124
- self.task_status = TaskStatus.WAITING_FOR_TOKEN_CAPACITY
125
-
126
- await self.tokens_bucket.get_tokens(requested_tokens)
127
-
128
- if (estimated_wait_time := self.requests_bucket.wait_time(1)) > 0:
129
- self.waiting = True # do we need this?
130
- self.task_status = TaskStatus.WAITING_FOR_REQUEST_CAPACITY
131
-
132
- await self.requests_bucket.get_tokens(1, cheat_bucket_capacity=True)
133
-
134
- self.task_status = TaskStatus.API_CALL_IN_PROGRESS
135
- try:
136
- results = await self.answer_question_func(
137
- question=self.question, task=None # self
138
- )
139
- self.task_status = TaskStatus.SUCCESS
140
- except Exception as e:
141
- self.task_status = TaskStatus.FAILED
142
- raise e
143
-
144
- if results.cache_used:
145
- self.tokens_bucket.add_tokens(requested_tokens)
146
- self.requests_bucket.add_tokens(1)
147
- self.from_cache = True
148
- # Turbo mode means that we don't wait for tokens or requests.
149
- self.tokens_bucket.turbo_mode_on()
150
- self.requests_bucket.turbo_mode_on()
151
- else:
152
- self.tokens_bucket.turbo_mode_off()
153
- self.requests_bucket.turbo_mode_off()
154
-
155
- return results
156
-
157
- @classmethod
158
- def example(cls):
159
- """Return an example instance of the class."""
160
- from edsl import QuestionFreeText
161
- from edsl.jobs.buckets.ModelBuckets import ModelBuckets
162
-
163
- m = ModelBuckets.infinity_bucket()
164
-
165
- from collections import namedtuple
166
-
167
- AnswerDict = namedtuple("AnswerDict", ["answer", "cache_used"])
168
- answer = AnswerDict(answer="This is an example answer", cache_used=False)
169
-
170
- async def answer_question_func(question, task):
171
- return answer
172
-
173
- return cls(
174
- question=QuestionFreeText.example(),
175
- answer_question_func=answer_question_func,
176
- model_buckets=m,
177
- token_estimator=None,
178
- iteration=0,
179
- )
180
-
181
- async def _run_task_async(self) -> None:
182
- """Run the task asynchronously, awaiting the tasks that must be completed before this one can be run.
183
-
184
- >>> qt1 = QuestionTaskCreator.example()
185
- >>> qt2 = QuestionTaskCreator.example()
186
- >>> qt2.add_dependency(qt1)
187
-
188
- The method follows these steps:
189
- 1. Set the task_status to TaskStatus.WAITING_FOR_DEPENDENCIES, indicating that the task is waiting for its dependencies to complete.
190
- 2. Await asyncio.gather(*self, return_exceptions=True) to run all the dependent tasks concurrently.
191
-
192
- - the return_exceptions=True flag ensures that the task does not raise an exception if any of the dependencies fail.
193
-
194
- 3. If any of the dependencies raise an exception:
195
- - If it is a CancelledError, set the current task's task_status to TaskStatus.CANCELLED, and re-raise the CancelledError,
196
- terminating the execution of the current task.
197
- - If it is any other exception, set the task_status to TaskStatus.PARENT_FAILED, and raise a custom exception
198
- InterviewErrorPriorTaskCanceled with the original exception as the cause, terminating the execution of the current task.
199
- 4. If all the dependencies complete successfully without raising any exceptions, the code reaches the else block.
200
- 5. In the else block, run the focal task (self._run_focal_task(debug)).
201
-
202
- If any of the dependencies fail (raise an exception), the focal task will not run. The execution will be terminated,
203
- and an exception will be raised to indicate the failure of the dependencies.
204
-
205
- The focal task (self._run_focal_task(debug)) is only executed if all the dependencies complete successfully.
206
-
207
- Args:
208
- debug: A boolean value indicating whether to run the task in debug mode.
209
-
210
- Returns:
211
- None
212
- """
213
- try:
214
- self.task_status = TaskStatus.WAITING_FOR_DEPENDENCIES
215
- # If this were set to 'return_exceptions=False', then the first exception would be raised immediately.
216
- # and it would cancel all the other tasks. This is not the behavior we want.
217
-
218
- gather_results = await asyncio.gather(*self, return_exceptions=True)
219
-
220
- for result in gather_results:
221
- if isinstance(result, Exception):
222
- raise result
223
-
224
- except asyncio.CancelledError:
225
- self.task_status = TaskStatus.CANCELLED
226
- raise
227
- except Exception as e:
228
- # one of the dependencies failed
229
- self.task_status = TaskStatus.PARENT_FAILED
230
- # turns the parent exception into a custom exception so the task gets canceled but this InterviewErrorPriorTaskCanceled exception
231
- raise InterviewErrorPriorTaskCanceled(
232
- f"Required tasks failed for {self.question.question_name}"
233
- ) from e
234
-
235
- # this only runs if all the dependencies are successful
236
- return await self._run_focal_task()
237
-
238
-
239
- if __name__ == "__main__":
240
- import doctest
241
-
242
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ import asyncio
2
+ from typing import Callable, Union, List
3
+ from collections import UserList, UserDict
4
+
5
+ from edsl.jobs.buckets import ModelBuckets
6
+ from edsl.exceptions import InterviewErrorPriorTaskCanceled
7
+
8
+ from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
9
+ from edsl.jobs.tasks.task_status_enum import TaskStatus, TaskStatusDescriptor
10
+ from edsl.jobs.tasks.TaskStatusLog import TaskStatusLog
11
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
12
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
13
+ from edsl.jobs.Answers import Answers
14
+ from edsl.questions.QuestionBase import QuestionBase
15
+
16
+
17
+ class TokensUsed(UserDict):
18
+ """ "Container for tokens used by a task."""
19
+
20
+ def __init__(self, cached_tokens, new_tokens):
21
+ d = {"cached_tokens": cached_tokens, "new_tokens": new_tokens}
22
+ super().__init__(d)
23
+
24
+
25
+ class QuestionTaskCreator(UserList):
26
+ """Class to create and manage a single question and its dependencies.
27
+ The class is an instance of a UserList of tasks that must be completed before the focal task can be run.
28
+
29
+ It is a UserList with all the tasks that must be completed before the focal task can be run.
30
+ The focal task is the question that we are interested in answering.
31
+ """
32
+
33
+ task_status = TaskStatusDescriptor()
34
+
35
+ def __init__(
36
+ self,
37
+ *,
38
+ question: QuestionBase,
39
+ answer_question_func: Callable,
40
+ model_buckets: ModelBuckets,
41
+ token_estimator: Union[Callable, None] = None,
42
+ iteration: int = 0,
43
+ ):
44
+ """Initialize the QuestionTaskCreator instance.
45
+
46
+ :param question: the question that we are interested in answering.
47
+ :param answer_question_func: the function that will answer the question.
48
+ :param model_buckets: the bucket collection that contains the requests and tokens buckets which control the rate of API calls and token usage.
49
+ :param token_estimator: a function that estimates the number of tokens required to answer the question.
50
+ :param iteration: the iteration number of the question.
51
+
52
+ """
53
+ super().__init__([])
54
+ # answer_question_func is the 'interview.answer_question_and_record_task" method
55
+ self.answer_question_func = answer_question_func
56
+ self.question = question
57
+ self.iteration = iteration
58
+
59
+ self.model_buckets = model_buckets
60
+ self.requests_bucket = self.model_buckets.requests_bucket
61
+ self.tokens_bucket = self.model_buckets.tokens_bucket
62
+ self.status_log = TaskStatusLog()
63
+
64
+ def fake_token_estimator(question):
65
+ return 1
66
+
67
+ self.token_estimator = token_estimator or fake_token_estimator
68
+
69
+ # Assume that the task is *not* from the cache until we know otherwise; the _run_focal_task might flip this bit later.
70
+ self.from_cache = False
71
+
72
+ self.cached_token_usage = TokenUsage(from_cache=True)
73
+ self.new_token_usage = TokenUsage(from_cache=False)
74
+ self.task_status = TaskStatus.NOT_STARTED
75
+
76
+ def add_dependency(self, task: asyncio.Task) -> None:
77
+ """Adds a task dependency to the list of dependencies.
78
+
79
+ >>> qt1 = QuestionTaskCreator.example()
80
+ >>> qt2 = QuestionTaskCreator.example()
81
+ >>> qt2.add_dependency(qt1)
82
+ >>> len(qt2)
83
+ 1
84
+ """
85
+ self.append(task)
86
+
87
+ def generate_task(self) -> asyncio.Task:
88
+ """Create a task that depends on the passed-in dependencies."""
89
+ task = asyncio.create_task(
90
+ self._run_task_async(), name=self.question.question_name
91
+ )
92
+ task.depends_on = [t.get_name() for t in self]
93
+ return task
94
+
95
+ def estimated_tokens(self) -> int:
96
+ """Estimates the number of tokens that will be required to run the focal task."""
97
+ return self.token_estimator(self.question)
98
+
99
+ def token_usage(self) -> TokensUsed:
100
+ """Returns the token usage for the task.
101
+
102
+ >>> qt = QuestionTaskCreator.example()
103
+ >>> answers = asyncio.run(qt._run_focal_task())
104
+ >>> qt.token_usage()
105
+ {'cached_tokens': TokenUsage(from_cache=True, prompt_tokens=0, completion_tokens=0), 'new_tokens': TokenUsage(from_cache=False, prompt_tokens=0, completion_tokens=0)}
106
+ """
107
+ return TokensUsed(
108
+ cached_tokens=self.cached_token_usage, new_tokens=self.new_token_usage
109
+ )
110
+
111
+ async def _run_focal_task(self) -> Answers:
112
+ """Run the focal task i.e., the question that we are interested in answering.
113
+
114
+ It is only called after all the dependency tasks are completed.
115
+
116
+ >>> qt = QuestionTaskCreator.example()
117
+ >>> answers = asyncio.run(qt._run_focal_task())
118
+ >>> answers.answer
119
+ 'This is an example answer'
120
+ """
121
+
122
+ requested_tokens = self.estimated_tokens()
123
+ if (estimated_wait_time := self.tokens_bucket.wait_time(requested_tokens)) > 0:
124
+ self.task_status = TaskStatus.WAITING_FOR_TOKEN_CAPACITY
125
+
126
+ await self.tokens_bucket.get_tokens(requested_tokens)
127
+
128
+ if (estimated_wait_time := self.requests_bucket.wait_time(1)) > 0:
129
+ self.waiting = True # do we need this?
130
+ self.task_status = TaskStatus.WAITING_FOR_REQUEST_CAPACITY
131
+
132
+ await self.requests_bucket.get_tokens(1, cheat_bucket_capacity=True)
133
+
134
+ self.task_status = TaskStatus.API_CALL_IN_PROGRESS
135
+ try:
136
+ results = await self.answer_question_func(
137
+ question=self.question, task=None # self
138
+ )
139
+ self.task_status = TaskStatus.SUCCESS
140
+ except Exception as e:
141
+ self.task_status = TaskStatus.FAILED
142
+ raise e
143
+
144
+ if results.cache_used:
145
+ self.tokens_bucket.add_tokens(requested_tokens)
146
+ self.requests_bucket.add_tokens(1)
147
+ self.from_cache = True
148
+ # Turbo mode means that we don't wait for tokens or requests.
149
+ self.tokens_bucket.turbo_mode_on()
150
+ self.requests_bucket.turbo_mode_on()
151
+ else:
152
+ self.tokens_bucket.turbo_mode_off()
153
+ self.requests_bucket.turbo_mode_off()
154
+
155
+ return results
156
+
157
+ @classmethod
158
+ def example(cls):
159
+ """Return an example instance of the class."""
160
+ from edsl import QuestionFreeText
161
+ from edsl.jobs.buckets.ModelBuckets import ModelBuckets
162
+
163
+ m = ModelBuckets.infinity_bucket()
164
+
165
+ from collections import namedtuple
166
+
167
+ AnswerDict = namedtuple("AnswerDict", ["answer", "cache_used"])
168
+ answer = AnswerDict(answer="This is an example answer", cache_used=False)
169
+
170
+ async def answer_question_func(question, task):
171
+ return answer
172
+
173
+ return cls(
174
+ question=QuestionFreeText.example(),
175
+ answer_question_func=answer_question_func,
176
+ model_buckets=m,
177
+ token_estimator=None,
178
+ iteration=0,
179
+ )
180
+
181
+ async def _run_task_async(self) -> None:
182
+ """Run the task asynchronously, awaiting the tasks that must be completed before this one can be run.
183
+
184
+ >>> qt1 = QuestionTaskCreator.example()
185
+ >>> qt2 = QuestionTaskCreator.example()
186
+ >>> qt2.add_dependency(qt1)
187
+
188
+ The method follows these steps:
189
+ 1. Set the task_status to TaskStatus.WAITING_FOR_DEPENDENCIES, indicating that the task is waiting for its dependencies to complete.
190
+ 2. Await asyncio.gather(*self, return_exceptions=True) to run all the dependent tasks concurrently.
191
+
192
+ - the return_exceptions=True flag ensures that the task does not raise an exception if any of the dependencies fail.
193
+
194
+ 3. If any of the dependencies raise an exception:
195
+ - If it is a CancelledError, set the current task's task_status to TaskStatus.CANCELLED, and re-raise the CancelledError,
196
+ terminating the execution of the current task.
197
+ - If it is any other exception, set the task_status to TaskStatus.PARENT_FAILED, and raise a custom exception
198
+ InterviewErrorPriorTaskCanceled with the original exception as the cause, terminating the execution of the current task.
199
+ 4. If all the dependencies complete successfully without raising any exceptions, the code reaches the else block.
200
+ 5. In the else block, run the focal task (self._run_focal_task(debug)).
201
+
202
+ If any of the dependencies fail (raise an exception), the focal task will not run. The execution will be terminated,
203
+ and an exception will be raised to indicate the failure of the dependencies.
204
+
205
+ The focal task (self._run_focal_task(debug)) is only executed if all the dependencies complete successfully.
206
+
207
+ Args:
208
+ debug: A boolean value indicating whether to run the task in debug mode.
209
+
210
+ Returns:
211
+ None
212
+ """
213
+ try:
214
+ self.task_status = TaskStatus.WAITING_FOR_DEPENDENCIES
215
+ # If this were set to 'return_exceptions=False', then the first exception would be raised immediately.
216
+ # and it would cancel all the other tasks. This is not the behavior we want.
217
+
218
+ gather_results = await asyncio.gather(*self, return_exceptions=True)
219
+
220
+ for result in gather_results:
221
+ if isinstance(result, Exception):
222
+ raise result
223
+
224
+ except asyncio.CancelledError:
225
+ self.task_status = TaskStatus.CANCELLED
226
+ raise
227
+ except Exception as e:
228
+ # one of the dependencies failed
229
+ self.task_status = TaskStatus.PARENT_FAILED
230
+ # turns the parent exception into a custom exception so the task gets canceled but this InterviewErrorPriorTaskCanceled exception
231
+ raise InterviewErrorPriorTaskCanceled(
232
+ f"Required tasks failed for {self.question.question_name}"
233
+ ) from e
234
+
235
+ # this only runs if all the dependencies are successful
236
+ return await self._run_focal_task()
237
+
238
+
239
+ if __name__ == "__main__":
240
+ import doctest
241
+
242
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,64 +1,64 @@
1
- from typing import Callable, Union, List
2
- from collections import UserDict
3
-
4
- from edsl.jobs.tokens.TokenUsage import TokenUsage
5
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
6
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
7
-
8
-
9
- class TaskCreators(UserDict):
10
- """A dictionary of task creators. A task is one question being answered.
11
-
12
- This is used to track the status of the tasks within an interview.
13
- """
14
-
15
- def __init__(self, *args, **kwargs):
16
- super().__init__(*args, **kwargs)
17
-
18
- @property
19
- def token_usage(self) -> InterviewTokenUsage:
20
- """Determines how many tokens were used for the interview.
21
-
22
- This is iterates through all tasks that make up an interview.
23
- For each task, it determines how many tokens were used and whether they were cached or new.
24
- It then sums the total number of cached and new tokens used for the interview.
25
-
26
- """
27
- cached_tokens = TokenUsage(from_cache=True)
28
- new_tokens = TokenUsage(from_cache=False)
29
- for task_creator in self.values():
30
- token_usage = task_creator.token_usage()
31
- cached_tokens += token_usage["cached_tokens"]
32
- new_tokens += token_usage["new_tokens"]
33
- return InterviewTokenUsage(
34
- new_token_usage=new_tokens, cached_token_usage=cached_tokens
35
- )
36
-
37
- def print(self) -> None:
38
- from rich import print
39
-
40
- print({task.get_name(): task.task_status for task in self.values()})
41
-
42
- @property
43
- def interview_status(self) -> InterviewStatusDictionary:
44
- """Returns a dictionary, InterviewStatusDictionary, mapping task status codes to counts of tasks in that state.
45
-
46
- >>> t = TaskCreators()
47
- >>> t.interview_status
48
- InterviewStatusDictionary({<TaskStatus.NOT_STARTED: 1>: 0, <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>: 0, <TaskStatus.CANCELLED: 3>: 0, <TaskStatus.PARENT_FAILED: 4>: 0, <TaskStatus.WAITING_FOR_REQUEST_CAPACITY: 5>: 0, <TaskStatus.WAITING_FOR_TOKEN_CAPACITY: 6>: 0, <TaskStatus.API_CALL_IN_PROGRESS: 7>: 0, <TaskStatus.SUCCESS: 8>: 0, <TaskStatus.FAILED: 9>: 0, 'number_from_cache': 0})
49
- """
50
- status_dict = InterviewStatusDictionary()
51
- for task_creator in self.values():
52
- status_dict[task_creator.task_status] += 1
53
- status_dict["number_from_cache"] += task_creator.from_cache
54
- return status_dict
55
-
56
- def status_logs(self):
57
- """Returns a list of status logs for each task."""
58
- return [task_creator.status_log for task_creator in self.values()]
59
-
60
-
61
- if __name__ == "__main__":
62
- import doctest
63
-
64
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from typing import Callable, Union, List
2
+ from collections import UserDict
3
+
4
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
5
+ from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
6
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
7
+
8
+
9
+ class TaskCreators(UserDict):
10
+ """A dictionary of task creators. A task is one question being answered.
11
+
12
+ This is used to track the status of the tasks within an interview.
13
+ """
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+
18
+ @property
19
+ def token_usage(self) -> InterviewTokenUsage:
20
+ """Determines how many tokens were used for the interview.
21
+
22
+ This is iterates through all tasks that make up an interview.
23
+ For each task, it determines how many tokens were used and whether they were cached or new.
24
+ It then sums the total number of cached and new tokens used for the interview.
25
+
26
+ """
27
+ cached_tokens = TokenUsage(from_cache=True)
28
+ new_tokens = TokenUsage(from_cache=False)
29
+ for task_creator in self.values():
30
+ token_usage = task_creator.token_usage()
31
+ cached_tokens += token_usage["cached_tokens"]
32
+ new_tokens += token_usage["new_tokens"]
33
+ return InterviewTokenUsage(
34
+ new_token_usage=new_tokens, cached_token_usage=cached_tokens
35
+ )
36
+
37
+ def print(self) -> None:
38
+ from rich import print
39
+
40
+ print({task.get_name(): task.task_status for task in self.values()})
41
+
42
+ @property
43
+ def interview_status(self) -> InterviewStatusDictionary:
44
+ """Returns a dictionary, InterviewStatusDictionary, mapping task status codes to counts of tasks in that state.
45
+
46
+ >>> t = TaskCreators()
47
+ >>> t.interview_status
48
+ InterviewStatusDictionary({<TaskStatus.NOT_STARTED: 1>: 0, <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>: 0, <TaskStatus.CANCELLED: 3>: 0, <TaskStatus.PARENT_FAILED: 4>: 0, <TaskStatus.WAITING_FOR_REQUEST_CAPACITY: 5>: 0, <TaskStatus.WAITING_FOR_TOKEN_CAPACITY: 6>: 0, <TaskStatus.API_CALL_IN_PROGRESS: 7>: 0, <TaskStatus.SUCCESS: 8>: 0, <TaskStatus.FAILED: 9>: 0, 'number_from_cache': 0})
49
+ """
50
+ status_dict = InterviewStatusDictionary()
51
+ for task_creator in self.values():
52
+ status_dict[task_creator.task_status] += 1
53
+ status_dict["number_from_cache"] += task_creator.from_cache
54
+ return status_dict
55
+
56
+ def status_logs(self):
57
+ """Returns a list of status logs for each task."""
58
+ return [task_creator.status_log for task_creator in self.values()]
59
+
60
+
61
+ if __name__ == "__main__":
62
+ import doctest
63
+
64
+ doctest.testmod(optionflags=doctest.ELLIPSIS)