edsl 0.1.39.dev3__py3-none-any.whl → 0.1.39.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (344) hide show
  1. edsl/Base.py +413 -332
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +57 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +1071 -867
  7. edsl/agents/AgentList.py +551 -413
  8. edsl/agents/Invigilator.py +284 -233
  9. edsl/agents/InvigilatorBase.py +257 -270
  10. edsl/agents/PromptConstructor.py +272 -354
  11. edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
  12. edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
  13. edsl/agents/__init__.py +2 -3
  14. edsl/agents/descriptors.py +99 -99
  15. edsl/agents/prompt_helpers.py +129 -129
  16. edsl/agents/question_option_processor.py +172 -0
  17. edsl/auto/AutoStudy.py +130 -117
  18. edsl/auto/StageBase.py +243 -230
  19. edsl/auto/StageGenerateSurvey.py +178 -178
  20. edsl/auto/StageLabelQuestions.py +125 -125
  21. edsl/auto/StagePersona.py +61 -61
  22. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  23. edsl/auto/StagePersonaDimensionValues.py +74 -74
  24. edsl/auto/StagePersonaDimensions.py +69 -69
  25. edsl/auto/StageQuestions.py +74 -73
  26. edsl/auto/SurveyCreatorPipeline.py +21 -21
  27. edsl/auto/utilities.py +218 -224
  28. edsl/base/Base.py +279 -279
  29. edsl/config.py +177 -157
  30. edsl/conversation/Conversation.py +290 -290
  31. edsl/conversation/car_buying.py +59 -58
  32. edsl/conversation/chips.py +95 -95
  33. edsl/conversation/mug_negotiation.py +81 -81
  34. edsl/conversation/next_speaker_utilities.py +93 -93
  35. edsl/coop/CoopFunctionsMixin.py +15 -0
  36. edsl/coop/ExpectedParrotKeyHandler.py +125 -0
  37. edsl/coop/PriceFetcher.py +54 -54
  38. edsl/coop/__init__.py +2 -2
  39. edsl/coop/coop.py +1106 -1028
  40. edsl/coop/utils.py +131 -131
  41. edsl/data/Cache.py +573 -555
  42. edsl/data/CacheEntry.py +230 -233
  43. edsl/data/CacheHandler.py +168 -149
  44. edsl/data/RemoteCacheSync.py +186 -78
  45. edsl/data/SQLiteDict.py +292 -292
  46. edsl/data/__init__.py +5 -4
  47. edsl/data/hack.py +10 -0
  48. edsl/data/orm.py +10 -10
  49. edsl/data_transfer_models.py +74 -73
  50. edsl/enums.py +202 -175
  51. edsl/exceptions/BaseException.py +21 -21
  52. edsl/exceptions/__init__.py +54 -54
  53. edsl/exceptions/agents.py +54 -42
  54. edsl/exceptions/cache.py +5 -5
  55. edsl/exceptions/configuration.py +16 -16
  56. edsl/exceptions/coop.py +10 -10
  57. edsl/exceptions/data.py +14 -14
  58. edsl/exceptions/general.py +34 -34
  59. edsl/exceptions/inference_services.py +5 -0
  60. edsl/exceptions/jobs.py +33 -33
  61. edsl/exceptions/language_models.py +63 -63
  62. edsl/exceptions/prompts.py +15 -15
  63. edsl/exceptions/questions.py +109 -91
  64. edsl/exceptions/results.py +29 -29
  65. edsl/exceptions/scenarios.py +29 -22
  66. edsl/exceptions/surveys.py +37 -37
  67. edsl/inference_services/AnthropicService.py +106 -87
  68. edsl/inference_services/AvailableModelCacheHandler.py +184 -0
  69. edsl/inference_services/AvailableModelFetcher.py +215 -0
  70. edsl/inference_services/AwsBedrock.py +118 -120
  71. edsl/inference_services/AzureAI.py +215 -217
  72. edsl/inference_services/DeepInfraService.py +18 -18
  73. edsl/inference_services/GoogleService.py +143 -148
  74. edsl/inference_services/GroqService.py +20 -20
  75. edsl/inference_services/InferenceServiceABC.py +80 -147
  76. edsl/inference_services/InferenceServicesCollection.py +138 -97
  77. edsl/inference_services/MistralAIService.py +120 -123
  78. edsl/inference_services/OllamaService.py +18 -18
  79. edsl/inference_services/OpenAIService.py +236 -224
  80. edsl/inference_services/PerplexityService.py +160 -163
  81. edsl/inference_services/ServiceAvailability.py +135 -0
  82. edsl/inference_services/TestService.py +90 -89
  83. edsl/inference_services/TogetherAIService.py +172 -170
  84. edsl/inference_services/data_structures.py +134 -0
  85. edsl/inference_services/models_available_cache.py +118 -118
  86. edsl/inference_services/rate_limits_cache.py +25 -25
  87. edsl/inference_services/registry.py +41 -41
  88. edsl/inference_services/write_available.py +10 -10
  89. edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
  90. edsl/jobs/Answers.py +43 -56
  91. edsl/jobs/FetchInvigilator.py +47 -0
  92. edsl/jobs/InterviewTaskManager.py +98 -0
  93. edsl/jobs/InterviewsConstructor.py +50 -0
  94. edsl/jobs/Jobs.py +823 -898
  95. edsl/jobs/JobsChecks.py +172 -147
  96. edsl/jobs/JobsComponentConstructor.py +189 -0
  97. edsl/jobs/JobsPrompts.py +270 -268
  98. edsl/jobs/JobsRemoteInferenceHandler.py +311 -239
  99. edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
  100. edsl/jobs/RequestTokenEstimator.py +30 -0
  101. edsl/jobs/__init__.py +1 -1
  102. edsl/jobs/async_interview_runner.py +138 -0
  103. edsl/jobs/buckets/BucketCollection.py +104 -63
  104. edsl/jobs/buckets/ModelBuckets.py +65 -65
  105. edsl/jobs/buckets/TokenBucket.py +283 -251
  106. edsl/jobs/buckets/TokenBucketAPI.py +211 -0
  107. edsl/jobs/buckets/TokenBucketClient.py +191 -0
  108. edsl/jobs/check_survey_scenario_compatibility.py +85 -0
  109. edsl/jobs/data_structures.py +120 -0
  110. edsl/jobs/decorators.py +35 -0
  111. edsl/jobs/interviews/Interview.py +396 -661
  112. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  113. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  114. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  115. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  116. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  117. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  118. edsl/jobs/interviews/ReportErrors.py +66 -66
  119. edsl/jobs/interviews/interview_status_enum.py +9 -9
  120. edsl/jobs/jobs_status_enums.py +9 -0
  121. edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
  122. edsl/jobs/results_exceptions_handler.py +98 -0
  123. edsl/jobs/runners/JobsRunnerAsyncio.py +151 -466
  124. edsl/jobs/runners/JobsRunnerStatus.py +297 -330
  125. edsl/jobs/tasks/QuestionTaskCreator.py +244 -242
  126. edsl/jobs/tasks/TaskCreators.py +64 -64
  127. edsl/jobs/tasks/TaskHistory.py +470 -450
  128. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  129. edsl/jobs/tasks/task_status_enum.py +161 -163
  130. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  131. edsl/jobs/tokens/TokenUsage.py +34 -34
  132. edsl/language_models/ComputeCost.py +63 -0
  133. edsl/language_models/LanguageModel.py +626 -668
  134. edsl/language_models/ModelList.py +164 -155
  135. edsl/language_models/PriceManager.py +127 -0
  136. edsl/language_models/RawResponseHandler.py +106 -0
  137. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  138. edsl/language_models/ServiceDataSources.py +0 -0
  139. edsl/language_models/__init__.py +2 -3
  140. edsl/language_models/fake_openai_call.py +15 -15
  141. edsl/language_models/fake_openai_service.py +61 -61
  142. edsl/language_models/key_management/KeyLookup.py +63 -0
  143. edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
  144. edsl/language_models/key_management/KeyLookupCollection.py +38 -0
  145. edsl/language_models/key_management/__init__.py +0 -0
  146. edsl/language_models/key_management/models.py +131 -0
  147. edsl/language_models/model.py +256 -0
  148. edsl/language_models/repair.py +156 -156
  149. edsl/language_models/utilities.py +65 -64
  150. edsl/notebooks/Notebook.py +263 -258
  151. edsl/notebooks/NotebookToLaTeX.py +142 -0
  152. edsl/notebooks/__init__.py +1 -1
  153. edsl/prompts/Prompt.py +352 -362
  154. edsl/prompts/__init__.py +2 -2
  155. edsl/questions/ExceptionExplainer.py +77 -0
  156. edsl/questions/HTMLQuestion.py +103 -0
  157. edsl/questions/QuestionBase.py +518 -664
  158. edsl/questions/QuestionBasePromptsMixin.py +221 -217
  159. edsl/questions/QuestionBudget.py +227 -227
  160. edsl/questions/QuestionCheckBox.py +359 -359
  161. edsl/questions/QuestionExtract.py +180 -182
  162. edsl/questions/QuestionFreeText.py +113 -114
  163. edsl/questions/QuestionFunctional.py +166 -166
  164. edsl/questions/QuestionList.py +223 -231
  165. edsl/questions/QuestionMatrix.py +265 -0
  166. edsl/questions/QuestionMultipleChoice.py +330 -286
  167. edsl/questions/QuestionNumerical.py +151 -153
  168. edsl/questions/QuestionRank.py +314 -324
  169. edsl/questions/Quick.py +41 -41
  170. edsl/questions/SimpleAskMixin.py +74 -73
  171. edsl/questions/__init__.py +27 -26
  172. edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +334 -289
  173. edsl/questions/compose_questions.py +98 -98
  174. edsl/questions/data_structures.py +20 -0
  175. edsl/questions/decorators.py +21 -21
  176. edsl/questions/derived/QuestionLikertFive.py +76 -76
  177. edsl/questions/derived/QuestionLinearScale.py +90 -87
  178. edsl/questions/derived/QuestionTopK.py +93 -93
  179. edsl/questions/derived/QuestionYesNo.py +82 -82
  180. edsl/questions/descriptors.py +427 -413
  181. edsl/questions/loop_processor.py +149 -0
  182. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  183. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  184. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  185. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  186. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  187. edsl/questions/prompt_templates/question_list.jinja +17 -17
  188. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  189. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  190. edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +168 -161
  191. edsl/questions/question_registry.py +177 -177
  192. edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +71 -71
  193. edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +188 -174
  194. edsl/questions/response_validator_factory.py +34 -0
  195. edsl/questions/settings.py +12 -12
  196. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  197. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  198. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  199. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  200. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  201. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  202. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  203. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  204. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  205. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  206. edsl/questions/templates/list/question_presentation.jinja +5 -5
  207. edsl/questions/templates/matrix/__init__.py +1 -0
  208. edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
  209. edsl/questions/templates/matrix/question_presentation.jinja +20 -0
  210. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  211. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  212. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  213. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  214. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  215. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  216. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  217. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  218. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  219. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  220. edsl/results/CSSParameterizer.py +108 -108
  221. edsl/results/Dataset.py +587 -424
  222. edsl/results/DatasetExportMixin.py +594 -731
  223. edsl/results/DatasetTree.py +295 -275
  224. edsl/results/MarkdownToDocx.py +122 -0
  225. edsl/results/MarkdownToPDF.py +111 -0
  226. edsl/results/Result.py +557 -465
  227. edsl/results/Results.py +1183 -1165
  228. edsl/results/ResultsExportMixin.py +45 -43
  229. edsl/results/ResultsGGMixin.py +121 -121
  230. edsl/results/TableDisplay.py +125 -198
  231. edsl/results/TextEditor.py +50 -0
  232. edsl/results/__init__.py +2 -2
  233. edsl/results/file_exports.py +252 -0
  234. edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +33 -33
  235. edsl/results/{Selector.py → results_selector.py} +145 -135
  236. edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +98 -98
  237. edsl/results/smart_objects.py +96 -0
  238. edsl/results/table_data_class.py +12 -0
  239. edsl/results/table_display.css +77 -77
  240. edsl/results/table_renderers.py +118 -0
  241. edsl/results/tree_explore.py +115 -115
  242. edsl/scenarios/ConstructDownloadLink.py +109 -0
  243. edsl/scenarios/DocumentChunker.py +102 -0
  244. edsl/scenarios/DocxScenario.py +16 -0
  245. edsl/scenarios/FileStore.py +511 -632
  246. edsl/scenarios/PdfExtractor.py +40 -0
  247. edsl/scenarios/Scenario.py +498 -601
  248. edsl/scenarios/ScenarioHtmlMixin.py +65 -64
  249. edsl/scenarios/ScenarioList.py +1458 -1287
  250. edsl/scenarios/ScenarioListExportMixin.py +45 -52
  251. edsl/scenarios/ScenarioListPdfMixin.py +239 -261
  252. edsl/scenarios/__init__.py +3 -4
  253. edsl/scenarios/directory_scanner.py +96 -0
  254. edsl/scenarios/file_methods.py +85 -0
  255. edsl/scenarios/handlers/__init__.py +13 -0
  256. edsl/scenarios/handlers/csv.py +38 -0
  257. edsl/scenarios/handlers/docx.py +76 -0
  258. edsl/scenarios/handlers/html.py +37 -0
  259. edsl/scenarios/handlers/json.py +111 -0
  260. edsl/scenarios/handlers/latex.py +5 -0
  261. edsl/scenarios/handlers/md.py +51 -0
  262. edsl/scenarios/handlers/pdf.py +68 -0
  263. edsl/scenarios/handlers/png.py +39 -0
  264. edsl/scenarios/handlers/pptx.py +105 -0
  265. edsl/scenarios/handlers/py.py +294 -0
  266. edsl/scenarios/handlers/sql.py +313 -0
  267. edsl/scenarios/handlers/sqlite.py +149 -0
  268. edsl/scenarios/handlers/txt.py +33 -0
  269. edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +131 -127
  270. edsl/scenarios/scenario_selector.py +156 -0
  271. edsl/shared.py +1 -1
  272. edsl/study/ObjectEntry.py +173 -173
  273. edsl/study/ProofOfWork.py +113 -113
  274. edsl/study/SnapShot.py +80 -80
  275. edsl/study/Study.py +521 -528
  276. edsl/study/__init__.py +4 -4
  277. edsl/surveys/ConstructDAG.py +92 -0
  278. edsl/surveys/DAG.py +148 -148
  279. edsl/surveys/EditSurvey.py +221 -0
  280. edsl/surveys/InstructionHandler.py +100 -0
  281. edsl/surveys/Memory.py +31 -31
  282. edsl/surveys/MemoryManagement.py +72 -0
  283. edsl/surveys/MemoryPlan.py +244 -244
  284. edsl/surveys/Rule.py +327 -326
  285. edsl/surveys/RuleCollection.py +385 -387
  286. edsl/surveys/RuleManager.py +172 -0
  287. edsl/surveys/Simulator.py +75 -0
  288. edsl/surveys/Survey.py +1280 -1801
  289. edsl/surveys/SurveyCSS.py +273 -261
  290. edsl/surveys/SurveyExportMixin.py +259 -259
  291. edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +181 -179
  292. edsl/surveys/SurveyQualtricsImport.py +284 -284
  293. edsl/surveys/SurveyToApp.py +141 -0
  294. edsl/surveys/__init__.py +5 -3
  295. edsl/surveys/base.py +53 -53
  296. edsl/surveys/descriptors.py +60 -56
  297. edsl/surveys/instructions/ChangeInstruction.py +48 -49
  298. edsl/surveys/instructions/Instruction.py +56 -65
  299. edsl/surveys/instructions/InstructionCollection.py +82 -77
  300. edsl/templates/error_reporting/base.html +23 -23
  301. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  302. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  303. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  304. edsl/templates/error_reporting/interview_details.html +115 -115
  305. edsl/templates/error_reporting/interviews.html +19 -19
  306. edsl/templates/error_reporting/overview.html +4 -4
  307. edsl/templates/error_reporting/performance_plot.html +1 -1
  308. edsl/templates/error_reporting/report.css +73 -73
  309. edsl/templates/error_reporting/report.html +117 -117
  310. edsl/templates/error_reporting/report.js +25 -25
  311. edsl/test_h +1 -0
  312. edsl/tools/__init__.py +1 -1
  313. edsl/tools/clusters.py +192 -192
  314. edsl/tools/embeddings.py +27 -27
  315. edsl/tools/embeddings_plotting.py +118 -118
  316. edsl/tools/plotting.py +112 -112
  317. edsl/tools/summarize.py +18 -18
  318. edsl/utilities/PrettyList.py +56 -0
  319. edsl/utilities/SystemInfo.py +28 -28
  320. edsl/utilities/__init__.py +22 -22
  321. edsl/utilities/ast_utilities.py +25 -25
  322. edsl/utilities/data/Registry.py +6 -6
  323. edsl/utilities/data/__init__.py +1 -1
  324. edsl/utilities/data/scooter_results.json +1 -1
  325. edsl/utilities/decorators.py +77 -77
  326. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  327. edsl/utilities/gcp_bucket/example.py +50 -0
  328. edsl/utilities/interface.py +627 -627
  329. edsl/utilities/is_notebook.py +18 -0
  330. edsl/utilities/is_valid_variable_name.py +11 -0
  331. edsl/utilities/naming_utilities.py +263 -263
  332. edsl/utilities/remove_edsl_version.py +24 -0
  333. edsl/utilities/repair_functions.py +28 -28
  334. edsl/utilities/restricted_python.py +70 -70
  335. edsl/utilities/utilities.py +436 -424
  336. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/LICENSE +21 -21
  337. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/METADATA +13 -11
  338. edsl-0.1.39.dev4.dist-info/RECORD +361 -0
  339. edsl/language_models/KeyLookup.py +0 -30
  340. edsl/language_models/registry.py +0 -190
  341. edsl/language_models/unused/ReplicateBase.py +0 -83
  342. edsl/results/ResultsDBMixin.py +0 -238
  343. edsl-0.1.39.dev3.dist-info/RECORD +0 -277
  344. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/WHEEL +0 -0
@@ -1,242 +1,244 @@
1
- import asyncio
2
- from typing import Callable, Union, List
3
- from collections import UserList, UserDict
4
-
5
- from edsl.jobs.buckets import ModelBuckets
6
- from edsl.exceptions import InterviewErrorPriorTaskCanceled
7
-
8
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
9
- from edsl.jobs.tasks.task_status_enum import TaskStatus, TaskStatusDescriptor
10
- from edsl.jobs.tasks.TaskStatusLog import TaskStatusLog
11
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
12
- from edsl.jobs.tokens.TokenUsage import TokenUsage
13
- from edsl.jobs.Answers import Answers
14
- from edsl.questions.QuestionBase import QuestionBase
15
-
16
-
17
- class TokensUsed(UserDict):
18
- """ "Container for tokens used by a task."""
19
-
20
- def __init__(self, cached_tokens, new_tokens):
21
- d = {"cached_tokens": cached_tokens, "new_tokens": new_tokens}
22
- super().__init__(d)
23
-
24
-
25
- class QuestionTaskCreator(UserList):
26
- """Class to create and manage a single question and its dependencies.
27
- The class is an instance of a UserList of tasks that must be completed before the focal task can be run.
28
-
29
- It is a UserList with all the tasks that must be completed before the focal task can be run.
30
- The focal task is the question that we are interested in answering.
31
- """
32
-
33
- task_status = TaskStatusDescriptor()
34
-
35
- def __init__(
36
- self,
37
- *,
38
- question: QuestionBase,
39
- answer_question_func: Callable,
40
- model_buckets: ModelBuckets,
41
- token_estimator: Union[Callable, None] = None,
42
- iteration: int = 0,
43
- ):
44
- """Initialize the QuestionTaskCreator instance.
45
-
46
- :param question: the question that we are interested in answering.
47
- :param answer_question_func: the function that will answer the question.
48
- :param model_buckets: the bucket collection that contains the requests and tokens buckets which control the rate of API calls and token usage.
49
- :param token_estimator: a function that estimates the number of tokens required to answer the question.
50
- :param iteration: the iteration number of the question.
51
-
52
- """
53
- super().__init__([])
54
- # answer_question_func is the 'interview.answer_question_and_record_task" method
55
- self.answer_question_func = answer_question_func
56
- self.question = question
57
- self.iteration = iteration
58
-
59
- self.model_buckets = model_buckets
60
- self.requests_bucket = self.model_buckets.requests_bucket
61
- self.tokens_bucket = self.model_buckets.tokens_bucket
62
- self.status_log = TaskStatusLog()
63
-
64
- def fake_token_estimator(question):
65
- return 1
66
-
67
- self.token_estimator = token_estimator or fake_token_estimator
68
-
69
- # Assume that the task is *not* from the cache until we know otherwise; the _run_focal_task might flip this bit later.
70
- self.from_cache = False
71
-
72
- self.cached_token_usage = TokenUsage(from_cache=True)
73
- self.new_token_usage = TokenUsage(from_cache=False)
74
- self.task_status = TaskStatus.NOT_STARTED
75
-
76
- def add_dependency(self, task: asyncio.Task) -> None:
77
- """Adds a task dependency to the list of dependencies.
78
-
79
- >>> qt1 = QuestionTaskCreator.example()
80
- >>> qt2 = QuestionTaskCreator.example()
81
- >>> qt2.add_dependency(qt1)
82
- >>> len(qt2)
83
- 1
84
- """
85
- self.append(task)
86
-
87
- def generate_task(self) -> asyncio.Task:
88
- """Create a task that depends on the passed-in dependencies."""
89
- task = asyncio.create_task(
90
- self._run_task_async(), name=self.question.question_name
91
- )
92
- task.depends_on = [t.get_name() for t in self]
93
- return task
94
-
95
- def estimated_tokens(self) -> int:
96
- """Estimates the number of tokens that will be required to run the focal task."""
97
- return self.token_estimator(self.question)
98
-
99
- def token_usage(self) -> TokensUsed:
100
- """Returns the token usage for the task.
101
-
102
- >>> qt = QuestionTaskCreator.example()
103
- >>> answers = asyncio.run(qt._run_focal_task())
104
- >>> qt.token_usage()
105
- {'cached_tokens': TokenUsage(from_cache=True, prompt_tokens=0, completion_tokens=0), 'new_tokens': TokenUsage(from_cache=False, prompt_tokens=0, completion_tokens=0)}
106
- """
107
- return TokensUsed(
108
- cached_tokens=self.cached_token_usage, new_tokens=self.new_token_usage
109
- )
110
-
111
- async def _run_focal_task(self) -> Answers:
112
- """Run the focal task i.e., the question that we are interested in answering.
113
-
114
- It is only called after all the dependency tasks are completed.
115
-
116
- >>> qt = QuestionTaskCreator.example()
117
- >>> answers = asyncio.run(qt._run_focal_task())
118
- >>> answers.answer
119
- 'This is an example answer'
120
- """
121
-
122
- requested_tokens = self.estimated_tokens()
123
- if (estimated_wait_time := self.tokens_bucket.wait_time(requested_tokens)) > 0:
124
- self.task_status = TaskStatus.WAITING_FOR_TOKEN_CAPACITY
125
-
126
- await self.tokens_bucket.get_tokens(requested_tokens)
127
-
128
- if (estimated_wait_time := self.requests_bucket.wait_time(1)) > 0:
129
- self.waiting = True # do we need this?
130
- self.task_status = TaskStatus.WAITING_FOR_REQUEST_CAPACITY
131
-
132
- await self.requests_bucket.get_tokens(1, cheat_bucket_capacity=True)
133
-
134
- self.task_status = TaskStatus.API_CALL_IN_PROGRESS
135
- try:
136
- results = await self.answer_question_func(
137
- question=self.question, task=None # self
138
- )
139
- self.task_status = TaskStatus.SUCCESS
140
- except Exception as e:
141
- self.task_status = TaskStatus.FAILED
142
- raise e
143
-
144
- if results.cache_used:
145
- self.tokens_bucket.add_tokens(requested_tokens)
146
- self.requests_bucket.add_tokens(1)
147
- self.from_cache = True
148
- # Turbo mode means that we don't wait for tokens or requests.
149
- self.tokens_bucket.turbo_mode_on()
150
- self.requests_bucket.turbo_mode_on()
151
- else:
152
- self.tokens_bucket.turbo_mode_off()
153
- self.requests_bucket.turbo_mode_off()
154
-
155
- return results
156
-
157
- @classmethod
158
- def example(cls):
159
- """Return an example instance of the class."""
160
- from edsl import QuestionFreeText
161
- from edsl.jobs.buckets.ModelBuckets import ModelBuckets
162
-
163
- m = ModelBuckets.infinity_bucket()
164
-
165
- from collections import namedtuple
166
-
167
- AnswerDict = namedtuple("AnswerDict", ["answer", "cache_used"])
168
- answer = AnswerDict(answer="This is an example answer", cache_used=False)
169
-
170
- async def answer_question_func(question, task):
171
- return answer
172
-
173
- return cls(
174
- question=QuestionFreeText.example(),
175
- answer_question_func=answer_question_func,
176
- model_buckets=m,
177
- token_estimator=None,
178
- iteration=0,
179
- )
180
-
181
- async def _run_task_async(self) -> None:
182
- """Run the task asynchronously, awaiting the tasks that must be completed before this one can be run.
183
-
184
- >>> qt1 = QuestionTaskCreator.example()
185
- >>> qt2 = QuestionTaskCreator.example()
186
- >>> qt2.add_dependency(qt1)
187
-
188
- The method follows these steps:
189
- 1. Set the task_status to TaskStatus.WAITING_FOR_DEPENDENCIES, indicating that the task is waiting for its dependencies to complete.
190
- 2. Await asyncio.gather(*self, return_exceptions=True) to run all the dependent tasks concurrently.
191
-
192
- - the return_exceptions=True flag ensures that the task does not raise an exception if any of the dependencies fail.
193
-
194
- 3. If any of the dependencies raise an exception:
195
- - If it is a CancelledError, set the current task's task_status to TaskStatus.CANCELLED, and re-raise the CancelledError,
196
- terminating the execution of the current task.
197
- - If it is any other exception, set the task_status to TaskStatus.PARENT_FAILED, and raise a custom exception
198
- InterviewErrorPriorTaskCanceled with the original exception as the cause, terminating the execution of the current task.
199
- 4. If all the dependencies complete successfully without raising any exceptions, the code reaches the else block.
200
- 5. In the else block, run the focal task (self._run_focal_task(debug)).
201
-
202
- If any of the dependencies fail (raise an exception), the focal task will not run. The execution will be terminated,
203
- and an exception will be raised to indicate the failure of the dependencies.
204
-
205
- The focal task (self._run_focal_task(debug)) is only executed if all the dependencies complete successfully.
206
-
207
- Args:
208
- debug: A boolean value indicating whether to run the task in debug mode.
209
-
210
- Returns:
211
- None
212
- """
213
- try:
214
- self.task_status = TaskStatus.WAITING_FOR_DEPENDENCIES
215
- # If this were set to 'return_exceptions=False', then the first exception would be raised immediately.
216
- # and it would cancel all the other tasks. This is not the behavior we want.
217
-
218
- gather_results = await asyncio.gather(*self, return_exceptions=True)
219
-
220
- for result in gather_results:
221
- if isinstance(result, Exception):
222
- raise result
223
-
224
- except asyncio.CancelledError:
225
- self.task_status = TaskStatus.CANCELLED
226
- raise
227
- except Exception as e:
228
- # one of the dependencies failed
229
- self.task_status = TaskStatus.PARENT_FAILED
230
- # turns the parent exception into a custom exception so the task gets canceled but this InterviewErrorPriorTaskCanceled exception
231
- raise InterviewErrorPriorTaskCanceled(
232
- f"Required tasks failed for {self.question.question_name}"
233
- ) from e
234
-
235
- # this only runs if all the dependencies are successful
236
- return await self._run_focal_task()
237
-
238
-
239
- if __name__ == "__main__":
240
- import doctest
241
-
242
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ import asyncio
2
+ from typing import Callable, Union, List, TYPE_CHECKING
3
+ from collections import UserList, UserDict
4
+
5
+ from edsl.exceptions.jobs import InterviewErrorPriorTaskCanceled
6
+
7
+ from edsl.jobs.tasks.task_status_enum import TaskStatus, TaskStatusDescriptor
8
+ from edsl.jobs.tasks.TaskStatusLog import TaskStatusLog
9
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
10
+ from edsl.jobs.Answers import Answers
11
+
12
+ if TYPE_CHECKING:
13
+ from edsl.questions.QuestionBase import QuestionBase
14
+ from edsl.jobs.buckets import ModelBuckets
15
+
16
+
17
+ class TokensUsed(UserDict):
18
+ """ "Container for tokens used by a task."""
19
+
20
+ def __init__(self, cached_tokens, new_tokens):
21
+ d = {"cached_tokens": cached_tokens, "new_tokens": new_tokens}
22
+ super().__init__(d)
23
+
24
+
25
+ class QuestionTaskCreator(UserList):
26
+ """Class to create and manage a single question and its dependencies.
27
+
28
+ It is a UserList with all the tasks that must be completed before the focal task can be run.
29
+ The focal task is the question that we are interested in answering.
30
+ """
31
+
32
+ task_status = TaskStatusDescriptor()
33
+
34
+ def __init__(
35
+ self,
36
+ *,
37
+ question: "QuestionBase",
38
+ answer_question_func: Callable,
39
+ model_buckets: "ModelBuckets",
40
+ token_estimator: Union[Callable, None] = None,
41
+ iteration: int = 0,
42
+ ):
43
+ """Initialize the QuestionTaskCreator instance.
44
+
45
+ :param question: the question that we are interested in answering.
46
+ :param answer_question_func: the function that will answer the question.
47
+ :param model_buckets: the bucket collection that contains the requests and tokens buckets which control the rate of API calls and token usage.
48
+ :param token_estimator: a function that estimates the number of tokens required to answer the question.
49
+ :param iteration: the iteration number of the question.
50
+
51
+ """
52
+ super().__init__([])
53
+ self.answer_question_func = answer_question_func
54
+ self.question = question
55
+ self.iteration = iteration
56
+
57
+ self.model_buckets = model_buckets
58
+
59
+ self.requests_bucket = self.model_buckets.requests_bucket
60
+ self.tokens_bucket = self.model_buckets.tokens_bucket
61
+
62
+ self.status_log = TaskStatusLog()
63
+
64
+ def fake_token_estimator(question):
65
+ return 1
66
+
67
+ self.token_estimator = token_estimator or fake_token_estimator
68
+
69
+ # Assume that the task is *not* from the cache until we know otherwise; the _run_focal_task might flip this bit later.
70
+ self.from_cache = False
71
+
72
+ self.cached_token_usage = TokenUsage(from_cache=True)
73
+ self.new_token_usage = TokenUsage(from_cache=False)
74
+ self.task_status = TaskStatus.NOT_STARTED
75
+
76
+ def add_dependency(self, task: asyncio.Task) -> None:
77
+ """Adds a task dependency to the list of dependencies.
78
+
79
+ >>> qt1 = QuestionTaskCreator.example()
80
+ >>> qt2 = QuestionTaskCreator.example()
81
+ >>> qt2.add_dependency(qt1)
82
+ >>> len(qt2)
83
+ 1
84
+ """
85
+ self.append(task)
86
+
87
+ def generate_task(self) -> asyncio.Task:
88
+ """Create a task that depends on the passed-in dependencies."""
89
+ task = asyncio.create_task(
90
+ self._run_task_async(), name=self.question.question_name
91
+ )
92
+ task.depends_on = [t.get_name() for t in self]
93
+ return task
94
+
95
+ def estimated_tokens(self) -> int:
96
+ """Estimates the number of tokens that will be required to run the focal task."""
97
+ return self.token_estimator(self.question)
98
+
99
+ def token_usage(self) -> TokensUsed:
100
+ """Returns the token usage for the task.
101
+
102
+ >>> qt = QuestionTaskCreator.example()
103
+ >>> answers = asyncio.run(qt._run_focal_task())
104
+ >>> qt.token_usage()
105
+ {'cached_tokens': TokenUsage(from_cache=True, prompt_tokens=0, completion_tokens=0), 'new_tokens': TokenUsage(from_cache=False, prompt_tokens=0, completion_tokens=0)}
106
+ """
107
+ return TokensUsed(
108
+ cached_tokens=self.cached_token_usage, new_tokens=self.new_token_usage
109
+ )
110
+
111
+ async def _run_focal_task(self) -> Answers:
112
+ """Run the focal task i.e., the question that we are interested in answering.
113
+
114
+ It is only called after all the dependency tasks are completed.
115
+
116
+ >>> qt = QuestionTaskCreator.example()
117
+ >>> answers = asyncio.run(qt._run_focal_task())
118
+ >>> answers.answer
119
+ 'This is an example answer'
120
+ """
121
+
122
+ requested_tokens = self.estimated_tokens()
123
+ if (estimated_wait_time := self.tokens_bucket.wait_time(requested_tokens)) > 0:
124
+ self.task_status = TaskStatus.WAITING_FOR_TOKEN_CAPACITY
125
+
126
+ await self.tokens_bucket.get_tokens(requested_tokens)
127
+
128
+ if (estimated_wait_time := self.model_buckets.requests_bucket.wait_time(1)) > 0:
129
+ self.waiting = True # do we need this?
130
+ self.task_status = TaskStatus.WAITING_FOR_REQUEST_CAPACITY
131
+
132
+ await self.model_buckets.requests_bucket.get_tokens(
133
+ 1, cheat_bucket_capacity=True
134
+ )
135
+
136
+ self.task_status = TaskStatus.API_CALL_IN_PROGRESS
137
+ try:
138
+ results = await self.answer_question_func(
139
+ question=self.question, task=None # self
140
+ )
141
+ self.task_status = TaskStatus.SUCCESS
142
+ except Exception as e:
143
+ self.task_status = TaskStatus.FAILED
144
+ raise e
145
+
146
+ if results.cache_used:
147
+ self.model_buckets.tokens_bucket.add_tokens(requested_tokens)
148
+ self.model_buckets.requests_bucket.add_tokens(1)
149
+ self.from_cache = True
150
+ # Turbo mode means that we don't wait for tokens or requests.
151
+ self.model_buckets.tokens_bucket.turbo_mode_on()
152
+ self.model_buckets.requests_bucket.turbo_mode_on()
153
+ else:
154
+ self.model_buckets.tokens_bucket.turbo_mode_off()
155
+ self.model_buckets.requests_bucket.turbo_mode_off()
156
+
157
+ return results
158
+
159
+ @classmethod
160
+ def example(cls):
161
+ """Return an example instance of the class."""
162
+ from edsl.questions.QuestionFreeText import QuestionFreeText
163
+ from edsl.jobs.buckets.ModelBuckets import ModelBuckets
164
+
165
+ m = ModelBuckets.infinity_bucket()
166
+
167
+ from collections import namedtuple
168
+
169
+ AnswerDict = namedtuple("AnswerDict", ["answer", "cache_used"])
170
+ answer = AnswerDict(answer="This is an example answer", cache_used=False)
171
+
172
+ async def answer_question_func(question, task):
173
+ return answer
174
+
175
+ return cls(
176
+ question=QuestionFreeText.example(),
177
+ answer_question_func=answer_question_func,
178
+ model_buckets=m,
179
+ token_estimator=None,
180
+ iteration=0,
181
+ )
182
+
183
+ async def _run_task_async(self) -> None:
184
+ """Run the task asynchronously, awaiting the tasks that must be completed before this one can be run.
185
+
186
+ >>> qt1 = QuestionTaskCreator.example()
187
+ >>> qt2 = QuestionTaskCreator.example()
188
+ >>> qt2.add_dependency(qt1)
189
+
190
+ The method follows these steps:
191
+ 1. Set the task_status to TaskStatus.WAITING_FOR_DEPENDENCIES, indicating that the task is waiting for its dependencies to complete.
192
+ 2. Await asyncio.gather(*self, return_exceptions=True) to run all the dependent tasks concurrently.
193
+
194
+ - the return_exceptions=True flag ensures that the task does not raise an exception if any of the dependencies fail.
195
+
196
+ 3. If any of the dependencies raise an exception:
197
+ - If it is a CancelledError, set the current task's task_status to TaskStatus.CANCELLED, and re-raise the CancelledError,
198
+ terminating the execution of the current task.
199
+ - If it is any other exception, set the task_status to TaskStatus.PARENT_FAILED, and raise a custom exception
200
+ InterviewErrorPriorTaskCanceled with the original exception as the cause, terminating the execution of the current task.
201
+ 4. If all the dependencies complete successfully without raising any exceptions, the code reaches the else block.
202
+ 5. In the else block, run the focal task (self._run_focal_task(debug)).
203
+
204
+ If any of the dependencies fail (raise an exception), the focal task will not run. The execution will be terminated,
205
+ and an exception will be raised to indicate the failure of the dependencies.
206
+
207
+ The focal task (self._run_focal_task(debug)) is only executed if all the dependencies complete successfully.
208
+
209
+ Args:
210
+ debug: A boolean value indicating whether to run the task in debug mode.
211
+
212
+ Returns:
213
+ None
214
+ """
215
+ try:
216
+ self.task_status = TaskStatus.WAITING_FOR_DEPENDENCIES
217
+ # If this were set to 'return_exceptions=False', then the first exception would be raised immediately.
218
+ # and it would cancel all the other tasks. This is not the behavior we want.
219
+
220
+ gather_results = await asyncio.gather(*self, return_exceptions=True)
221
+
222
+ for result in gather_results:
223
+ if isinstance(result, Exception):
224
+ raise result
225
+
226
+ except asyncio.CancelledError:
227
+ self.task_status = TaskStatus.CANCELLED
228
+ raise
229
+ except Exception as e:
230
+ # one of the dependencies failed
231
+ self.task_status = TaskStatus.PARENT_FAILED
232
+ # turns the parent exception into a custom exception so the task gets canceled but this InterviewErrorPriorTaskCanceled exception
233
+ raise InterviewErrorPriorTaskCanceled(
234
+ f"Required tasks failed for {self.question.question_name}"
235
+ ) from e
236
+
237
+ # this only runs if all the dependencies are successful
238
+ return await self._run_focal_task()
239
+
240
+
241
+ if __name__ == "__main__":
242
+ import doctest
243
+
244
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
@@ -1,64 +1,64 @@
1
- from typing import Callable, Union, List
2
- from collections import UserDict
3
-
4
- from edsl.jobs.tokens.TokenUsage import TokenUsage
5
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
6
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
7
-
8
-
9
- class TaskCreators(UserDict):
10
- """A dictionary of task creators. A task is one question being answered.
11
-
12
- This is used to track the status of the tasks within an interview.
13
- """
14
-
15
- def __init__(self, *args, **kwargs):
16
- super().__init__(*args, **kwargs)
17
-
18
- @property
19
- def token_usage(self) -> InterviewTokenUsage:
20
- """Determines how many tokens were used for the interview.
21
-
22
- This is iterates through all tasks that make up an interview.
23
- For each task, it determines how many tokens were used and whether they were cached or new.
24
- It then sums the total number of cached and new tokens used for the interview.
25
-
26
- """
27
- cached_tokens = TokenUsage(from_cache=True)
28
- new_tokens = TokenUsage(from_cache=False)
29
- for task_creator in self.values():
30
- token_usage = task_creator.token_usage()
31
- cached_tokens += token_usage["cached_tokens"]
32
- new_tokens += token_usage["new_tokens"]
33
- return InterviewTokenUsage(
34
- new_token_usage=new_tokens, cached_token_usage=cached_tokens
35
- )
36
-
37
- def print(self) -> None:
38
- from rich import print
39
-
40
- print({task.get_name(): task.task_status for task in self.values()})
41
-
42
- @property
43
- def interview_status(self) -> InterviewStatusDictionary:
44
- """Returns a dictionary, InterviewStatusDictionary, mapping task status codes to counts of tasks in that state.
45
-
46
- >>> t = TaskCreators()
47
- >>> t.interview_status
48
- InterviewStatusDictionary({<TaskStatus.NOT_STARTED: 1>: 0, <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>: 0, <TaskStatus.CANCELLED: 3>: 0, <TaskStatus.PARENT_FAILED: 4>: 0, <TaskStatus.WAITING_FOR_REQUEST_CAPACITY: 5>: 0, <TaskStatus.WAITING_FOR_TOKEN_CAPACITY: 6>: 0, <TaskStatus.API_CALL_IN_PROGRESS: 7>: 0, <TaskStatus.SUCCESS: 8>: 0, <TaskStatus.FAILED: 9>: 0, 'number_from_cache': 0})
49
- """
50
- status_dict = InterviewStatusDictionary()
51
- for task_creator in self.values():
52
- status_dict[task_creator.task_status] += 1
53
- status_dict["number_from_cache"] += task_creator.from_cache
54
- return status_dict
55
-
56
- def status_logs(self):
57
- """Returns a list of status logs for each task."""
58
- return [task_creator.status_log for task_creator in self.values()]
59
-
60
-
61
- if __name__ == "__main__":
62
- import doctest
63
-
64
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from typing import Callable, Union, List
2
+ from collections import UserDict
3
+
4
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
5
+ from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
6
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
7
+
8
+
9
+ class TaskCreators(UserDict):
10
+ """A dictionary of task creators. A task is one question being answered.
11
+
12
+ This is used to track the status of the tasks within an interview.
13
+ """
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+
18
+ @property
19
+ def token_usage(self) -> InterviewTokenUsage:
20
+ """Determines how many tokens were used for the interview.
21
+
22
+ This is iterates through all tasks that make up an interview.
23
+ For each task, it determines how many tokens were used and whether they were cached or new.
24
+ It then sums the total number of cached and new tokens used for the interview.
25
+
26
+ """
27
+ cached_tokens = TokenUsage(from_cache=True)
28
+ new_tokens = TokenUsage(from_cache=False)
29
+ for task_creator in self.values():
30
+ token_usage = task_creator.token_usage()
31
+ cached_tokens += token_usage["cached_tokens"]
32
+ new_tokens += token_usage["new_tokens"]
33
+ return InterviewTokenUsage(
34
+ new_token_usage=new_tokens, cached_token_usage=cached_tokens
35
+ )
36
+
37
+ def print(self) -> None:
38
+ from rich import print
39
+
40
+ print({task.get_name(): task.task_status for task in self.values()})
41
+
42
+ @property
43
+ def interview_status(self) -> InterviewStatusDictionary:
44
+ """Returns a dictionary, InterviewStatusDictionary, mapping task status codes to counts of tasks in that state.
45
+
46
+ >>> t = TaskCreators()
47
+ >>> t.interview_status
48
+ InterviewStatusDictionary({<TaskStatus.NOT_STARTED: 1>: 0, <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>: 0, <TaskStatus.CANCELLED: 3>: 0, <TaskStatus.PARENT_FAILED: 4>: 0, <TaskStatus.WAITING_FOR_REQUEST_CAPACITY: 5>: 0, <TaskStatus.WAITING_FOR_TOKEN_CAPACITY: 6>: 0, <TaskStatus.API_CALL_IN_PROGRESS: 7>: 0, <TaskStatus.SUCCESS: 8>: 0, <TaskStatus.FAILED: 9>: 0, 'number_from_cache': 0})
49
+ """
50
+ status_dict = InterviewStatusDictionary()
51
+ for task_creator in self.values():
52
+ status_dict[task_creator.task_status] += 1
53
+ status_dict["number_from_cache"] += task_creator.from_cache
54
+ return status_dict
55
+
56
+ def status_logs(self):
57
+ """Returns a list of status logs for each task."""
58
+ return [task_creator.status_log for task_creator in self.values()]
59
+
60
+
61
+ if __name__ == "__main__":
62
+ import doctest
63
+
64
+ doctest.testmod(optionflags=doctest.ELLIPSIS)