edsl 0.1.39.dev3__py3-none-any.whl → 0.1.39.dev4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +413 -332
- edsl/BaseDiff.py +260 -260
- edsl/TemplateLoader.py +24 -24
- edsl/__init__.py +57 -49
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +1071 -867
- edsl/agents/AgentList.py +551 -413
- edsl/agents/Invigilator.py +284 -233
- edsl/agents/InvigilatorBase.py +257 -270
- edsl/agents/PromptConstructor.py +272 -354
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/__init__.py +2 -3
- edsl/agents/descriptors.py +99 -99
- edsl/agents/prompt_helpers.py +129 -129
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -117
- edsl/auto/StageBase.py +243 -230
- edsl/auto/StageGenerateSurvey.py +178 -178
- edsl/auto/StageLabelQuestions.py +125 -125
- edsl/auto/StagePersona.py +61 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
- edsl/auto/StagePersonaDimensionValues.py +74 -74
- edsl/auto/StagePersonaDimensions.py +69 -69
- edsl/auto/StageQuestions.py +74 -73
- edsl/auto/SurveyCreatorPipeline.py +21 -21
- edsl/auto/utilities.py +218 -224
- edsl/base/Base.py +279 -279
- edsl/config.py +177 -157
- edsl/conversation/Conversation.py +290 -290
- edsl/conversation/car_buying.py +59 -58
- edsl/conversation/chips.py +95 -95
- edsl/conversation/mug_negotiation.py +81 -81
- edsl/conversation/next_speaker_utilities.py +93 -93
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -54
- edsl/coop/__init__.py +2 -2
- edsl/coop/coop.py +1106 -1028
- edsl/coop/utils.py +131 -131
- edsl/data/Cache.py +573 -555
- edsl/data/CacheEntry.py +230 -233
- edsl/data/CacheHandler.py +168 -149
- edsl/data/RemoteCacheSync.py +186 -78
- edsl/data/SQLiteDict.py +292 -292
- edsl/data/__init__.py +5 -4
- edsl/data/hack.py +10 -0
- edsl/data/orm.py +10 -10
- edsl/data_transfer_models.py +74 -73
- edsl/enums.py +202 -175
- edsl/exceptions/BaseException.py +21 -21
- edsl/exceptions/__init__.py +54 -54
- edsl/exceptions/agents.py +54 -42
- edsl/exceptions/cache.py +5 -5
- edsl/exceptions/configuration.py +16 -16
- edsl/exceptions/coop.py +10 -10
- edsl/exceptions/data.py +14 -14
- edsl/exceptions/general.py +34 -34
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +33 -33
- edsl/exceptions/language_models.py +63 -63
- edsl/exceptions/prompts.py +15 -15
- edsl/exceptions/questions.py +109 -91
- edsl/exceptions/results.py +29 -29
- edsl/exceptions/scenarios.py +29 -22
- edsl/exceptions/surveys.py +37 -37
- edsl/inference_services/AnthropicService.py +106 -87
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -120
- edsl/inference_services/AzureAI.py +215 -217
- edsl/inference_services/DeepInfraService.py +18 -18
- edsl/inference_services/GoogleService.py +143 -148
- edsl/inference_services/GroqService.py +20 -20
- edsl/inference_services/InferenceServiceABC.py +80 -147
- edsl/inference_services/InferenceServicesCollection.py +138 -97
- edsl/inference_services/MistralAIService.py +120 -123
- edsl/inference_services/OllamaService.py +18 -18
- edsl/inference_services/OpenAIService.py +236 -224
- edsl/inference_services/PerplexityService.py +160 -163
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -89
- edsl/inference_services/TogetherAIService.py +172 -170
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -118
- edsl/inference_services/rate_limits_cache.py +25 -25
- edsl/inference_services/registry.py +41 -41
- edsl/inference_services/write_available.py +10 -10
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +43 -56
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +823 -898
- edsl/jobs/JobsChecks.py +172 -147
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -268
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -239
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/__init__.py +1 -1
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -63
- edsl/jobs/buckets/ModelBuckets.py +65 -65
- edsl/jobs/buckets/TokenBucket.py +283 -251
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +396 -661
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
- edsl/jobs/interviews/InterviewStatistic.py +63 -63
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
- edsl/jobs/interviews/InterviewStatusLog.py +92 -92
- edsl/jobs/interviews/ReportErrors.py +66 -66
- edsl/jobs/interviews/interview_status_enum.py +9 -9
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -466
- edsl/jobs/runners/JobsRunnerStatus.py +297 -330
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -242
- edsl/jobs/tasks/TaskCreators.py +64 -64
- edsl/jobs/tasks/TaskHistory.py +470 -450
- edsl/jobs/tasks/TaskStatusLog.py +23 -23
- edsl/jobs/tasks/task_status_enum.py +161 -163
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
- edsl/jobs/tokens/TokenUsage.py +34 -34
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +626 -668
- edsl/language_models/ModelList.py +164 -155
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/__init__.py +2 -3
- edsl/language_models/fake_openai_call.py +15 -15
- edsl/language_models/fake_openai_service.py +61 -61
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +156 -156
- edsl/language_models/utilities.py +65 -64
- edsl/notebooks/Notebook.py +263 -258
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -1
- edsl/prompts/Prompt.py +352 -362
- edsl/prompts/__init__.py +2 -2
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -664
- edsl/questions/QuestionBasePromptsMixin.py +221 -217
- edsl/questions/QuestionBudget.py +227 -227
- edsl/questions/QuestionCheckBox.py +359 -359
- edsl/questions/QuestionExtract.py +180 -182
- edsl/questions/QuestionFreeText.py +113 -114
- edsl/questions/QuestionFunctional.py +166 -166
- edsl/questions/QuestionList.py +223 -231
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +330 -286
- edsl/questions/QuestionNumerical.py +151 -153
- edsl/questions/QuestionRank.py +314 -324
- edsl/questions/Quick.py +41 -41
- edsl/questions/SimpleAskMixin.py +74 -73
- edsl/questions/__init__.py +27 -26
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +334 -289
- edsl/questions/compose_questions.py +98 -98
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -21
- edsl/questions/derived/QuestionLikertFive.py +76 -76
- edsl/questions/derived/QuestionLinearScale.py +90 -87
- edsl/questions/derived/QuestionTopK.py +93 -93
- edsl/questions/derived/QuestionYesNo.py +82 -82
- edsl/questions/descriptors.py +427 -413
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -13
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
- edsl/questions/prompt_templates/question_extract.jinja +11 -11
- edsl/questions/prompt_templates/question_free_text.jinja +3 -3
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
- edsl/questions/prompt_templates/question_list.jinja +17 -17
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
- edsl/questions/prompt_templates/question_numerical.jinja +36 -36
- edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +168 -161
- edsl/questions/question_registry.py +177 -177
- edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +71 -71
- edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +188 -174
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +12 -12
- edsl/questions/templates/budget/answering_instructions.jinja +7 -7
- edsl/questions/templates/budget/question_presentation.jinja +7 -7
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
- edsl/questions/templates/extract/answering_instructions.jinja +7 -7
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
- edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
- edsl/questions/templates/list/answering_instructions.jinja +3 -3
- edsl/questions/templates/list/question_presentation.jinja +5 -5
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
- edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
- edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
- edsl/questions/templates/numerical/question_presentation.jinja +6 -6
- edsl/questions/templates/rank/answering_instructions.jinja +11 -11
- edsl/questions/templates/rank/question_presentation.jinja +15 -15
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
- edsl/questions/templates/top_k/question_presentation.jinja +22 -22
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
- edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
- edsl/results/CSSParameterizer.py +108 -108
- edsl/results/Dataset.py +587 -424
- edsl/results/DatasetExportMixin.py +594 -731
- edsl/results/DatasetTree.py +295 -275
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +557 -465
- edsl/results/Results.py +1183 -1165
- edsl/results/ResultsExportMixin.py +45 -43
- edsl/results/ResultsGGMixin.py +121 -121
- edsl/results/TableDisplay.py +125 -198
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +2 -2
- edsl/results/file_exports.py +252 -0
- edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +33 -33
- edsl/results/{Selector.py → results_selector.py} +145 -135
- edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +98 -98
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +77 -77
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -115
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +511 -632
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +498 -601
- edsl/scenarios/ScenarioHtmlMixin.py +65 -64
- edsl/scenarios/ScenarioList.py +1458 -1287
- edsl/scenarios/ScenarioListExportMixin.py +45 -52
- edsl/scenarios/ScenarioListPdfMixin.py +239 -261
- edsl/scenarios/__init__.py +3 -4
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +38 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +131 -127
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -1
- edsl/study/ObjectEntry.py +173 -173
- edsl/study/ProofOfWork.py +113 -113
- edsl/study/SnapShot.py +80 -80
- edsl/study/Study.py +521 -528
- edsl/study/__init__.py +4 -4
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +148 -148
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +31 -31
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +244 -244
- edsl/surveys/Rule.py +327 -326
- edsl/surveys/RuleCollection.py +385 -387
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1280 -1801
- edsl/surveys/SurveyCSS.py +273 -261
- edsl/surveys/SurveyExportMixin.py +259 -259
- edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +181 -179
- edsl/surveys/SurveyQualtricsImport.py +284 -284
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +5 -3
- edsl/surveys/base.py +53 -53
- edsl/surveys/descriptors.py +60 -56
- edsl/surveys/instructions/ChangeInstruction.py +48 -49
- edsl/surveys/instructions/Instruction.py +56 -65
- edsl/surveys/instructions/InstructionCollection.py +82 -77
- edsl/templates/error_reporting/base.html +23 -23
- edsl/templates/error_reporting/exceptions_by_model.html +34 -34
- edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
- edsl/templates/error_reporting/exceptions_by_type.html +16 -16
- edsl/templates/error_reporting/interview_details.html +115 -115
- edsl/templates/error_reporting/interviews.html +19 -19
- edsl/templates/error_reporting/overview.html +4 -4
- edsl/templates/error_reporting/performance_plot.html +1 -1
- edsl/templates/error_reporting/report.css +73 -73
- edsl/templates/error_reporting/report.html +117 -117
- edsl/templates/error_reporting/report.js +25 -25
- edsl/test_h +1 -0
- edsl/tools/__init__.py +1 -1
- edsl/tools/clusters.py +192 -192
- edsl/tools/embeddings.py +27 -27
- edsl/tools/embeddings_plotting.py +118 -118
- edsl/tools/plotting.py +112 -112
- edsl/tools/summarize.py +18 -18
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +28 -28
- edsl/utilities/__init__.py +22 -22
- edsl/utilities/ast_utilities.py +25 -25
- edsl/utilities/data/Registry.py +6 -6
- edsl/utilities/data/__init__.py +1 -1
- edsl/utilities/data/scooter_results.json +1 -1
- edsl/utilities/decorators.py +77 -77
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
- edsl/utilities/gcp_bucket/example.py +50 -0
- edsl/utilities/interface.py +627 -627
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -263
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -28
- edsl/utilities/restricted_python.py +70 -70
- edsl/utilities/utilities.py +436 -424
- {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/LICENSE +21 -21
- {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/METADATA +13 -11
- edsl-0.1.39.dev4.dist-info/RECORD +361 -0
- edsl/language_models/KeyLookup.py +0 -30
- edsl/language_models/registry.py +0 -190
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/results/ResultsDBMixin.py +0 -238
- edsl-0.1.39.dev3.dist-info/RECORD +0 -277
- {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/WHEEL +0 -0
@@ -1,661 +1,396 @@
|
|
1
|
-
"""This module contains the Interview class, which is responsible for conducting an interview asynchronously."""
|
2
|
-
|
3
|
-
from __future__ import annotations
|
4
|
-
import asyncio
|
5
|
-
from typing import Any, Type, List, Generator, Optional, Union
|
6
|
-
import copy
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
from edsl import
|
17
|
-
from edsl.
|
18
|
-
from edsl.
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
from edsl.jobs.
|
23
|
-
from edsl.jobs.
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
from edsl.
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
from edsl.
|
34
|
-
from edsl.
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
:
|
85
|
-
|
86
|
-
|
87
|
-
:
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
self.
|
112
|
-
self.
|
113
|
-
self.
|
114
|
-
|
115
|
-
|
116
|
-
self.
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
self.
|
124
|
-
self.
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
"""
|
137
|
-
return
|
138
|
-
|
139
|
-
@property
|
140
|
-
def
|
141
|
-
"""
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
d
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
exceptions =
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
:
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
:
|
264
|
-
|
265
|
-
|
266
|
-
""
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
task
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
question.question_name in self.exceptions
|
398
|
-
and had_language_model_no_response_error
|
399
|
-
):
|
400
|
-
self.exceptions.record_fixed_question(question.question_name)
|
401
|
-
|
402
|
-
return response
|
403
|
-
|
404
|
-
try:
|
405
|
-
return await attempt_answer()
|
406
|
-
except RetryError as retry_error:
|
407
|
-
# All retries have failed for LanguageModelNoResponseError
|
408
|
-
original_error = retry_error.last_attempt.exception()
|
409
|
-
self._handle_exception(
|
410
|
-
original_error, self._get_invigilator(question), task
|
411
|
-
)
|
412
|
-
raise original_error # Re-raise the original error after handling
|
413
|
-
|
414
|
-
def _get_invigilator(self, question: QuestionBase) -> InvigilatorBase:
|
415
|
-
"""Return an invigilator for the given question.
|
416
|
-
|
417
|
-
:param question: the question to be answered
|
418
|
-
:param debug: whether to use debug mode, in which case `InvigilatorDebug` is used.
|
419
|
-
"""
|
420
|
-
invigilator = self.agent.create_invigilator(
|
421
|
-
question=question,
|
422
|
-
scenario=self.scenario,
|
423
|
-
model=self.model,
|
424
|
-
debug=False,
|
425
|
-
survey=self.survey,
|
426
|
-
memory_plan=self.survey.memory_plan,
|
427
|
-
current_answers=self.answers,
|
428
|
-
iteration=self.iteration,
|
429
|
-
cache=self.cache,
|
430
|
-
sidecar_model=self.sidecar_model,
|
431
|
-
raise_validation_errors=self.raise_validation_errors,
|
432
|
-
)
|
433
|
-
"""Return an invigilator for the given question."""
|
434
|
-
return invigilator
|
435
|
-
|
436
|
-
def _skip_this_question(self, current_question: "QuestionBase") -> bool:
|
437
|
-
"""Determine if the current question should be skipped.
|
438
|
-
|
439
|
-
:param current_question: the question to be answered.
|
440
|
-
"""
|
441
|
-
current_question_index = self.to_index[current_question.question_name]
|
442
|
-
|
443
|
-
answers = self.answers | self.scenario | self.agent["traits"]
|
444
|
-
skip = self.survey.rule_collection.skip_question_before_running(
|
445
|
-
current_question_index, answers
|
446
|
-
)
|
447
|
-
return skip
|
448
|
-
|
449
|
-
def _handle_exception(
|
450
|
-
self, e: Exception, invigilator: "InvigilatorBase", task=None
|
451
|
-
):
|
452
|
-
import copy
|
453
|
-
|
454
|
-
# breakpoint()
|
455
|
-
|
456
|
-
answers = copy.copy(self.answers)
|
457
|
-
exception_entry = InterviewExceptionEntry(
|
458
|
-
exception=e,
|
459
|
-
invigilator=invigilator,
|
460
|
-
answers=answers,
|
461
|
-
)
|
462
|
-
if task:
|
463
|
-
task.task_status = TaskStatus.FAILED
|
464
|
-
self.exceptions.add(invigilator.question.question_name, exception_entry)
|
465
|
-
|
466
|
-
if self.raise_validation_errors:
|
467
|
-
if isinstance(e, QuestionAnswerValidationError):
|
468
|
-
raise e
|
469
|
-
|
470
|
-
if hasattr(self, "stop_on_exception"):
|
471
|
-
stop_on_exception = self.stop_on_exception
|
472
|
-
else:
|
473
|
-
stop_on_exception = False
|
474
|
-
|
475
|
-
if stop_on_exception:
|
476
|
-
raise e
|
477
|
-
|
478
|
-
def _cancel_skipped_questions(self, current_question: QuestionBase) -> None:
|
479
|
-
"""Cancel the tasks for questions that are skipped.
|
480
|
-
|
481
|
-
:param current_question: the question that was just answered.
|
482
|
-
|
483
|
-
It first determines the next question, given the current question and the current answers.
|
484
|
-
If the next question is the end of the survey, it cancels all remaining tasks.
|
485
|
-
If the next question is after the current question, it cancels all tasks between the current question and the next question.
|
486
|
-
"""
|
487
|
-
current_question_index: int = self.to_index[current_question.question_name]
|
488
|
-
|
489
|
-
next_question: Union[
|
490
|
-
int, EndOfSurvey
|
491
|
-
] = self.survey.rule_collection.next_question(
|
492
|
-
q_now=current_question_index,
|
493
|
-
answers=self.answers | self.scenario | self.agent["traits"],
|
494
|
-
)
|
495
|
-
|
496
|
-
next_question_index = next_question.next_q
|
497
|
-
|
498
|
-
def cancel_between(start, end):
|
499
|
-
"""Cancel the tasks between the start and end indices."""
|
500
|
-
for i in range(start, end):
|
501
|
-
self.tasks[i].cancel()
|
502
|
-
|
503
|
-
if next_question_index == EndOfSurvey:
|
504
|
-
cancel_between(current_question_index + 1, len(self.survey.questions))
|
505
|
-
return
|
506
|
-
|
507
|
-
if next_question_index > (current_question_index + 1):
|
508
|
-
cancel_between(current_question_index + 1, next_question_index)
|
509
|
-
|
510
|
-
# endregion
|
511
|
-
|
512
|
-
# region: Conducting the interview
|
513
|
-
async def async_conduct_interview(
|
514
|
-
self,
|
515
|
-
model_buckets: Optional[ModelBuckets] = None,
|
516
|
-
stop_on_exception: bool = False,
|
517
|
-
sidecar_model: Optional["LanguageModel"] = None,
|
518
|
-
raise_validation_errors: bool = True,
|
519
|
-
) -> tuple["Answers", List[dict[str, Any]]]:
|
520
|
-
"""
|
521
|
-
Conduct an Interview asynchronously.
|
522
|
-
It returns a tuple with the answers and a list of valid results.
|
523
|
-
|
524
|
-
:param model_buckets: a dictionary of token buckets for the model.
|
525
|
-
:param debug: run without calls to LLM.
|
526
|
-
:param stop_on_exception: if True, stops the interview if an exception is raised.
|
527
|
-
:param sidecar_model: a sidecar model used to answer questions.
|
528
|
-
|
529
|
-
Example usage:
|
530
|
-
|
531
|
-
>>> i = Interview.example()
|
532
|
-
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
533
|
-
>>> result['q0']
|
534
|
-
'yes'
|
535
|
-
|
536
|
-
>>> i = Interview.example(throw_exception = True)
|
537
|
-
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
538
|
-
>>> i.exceptions
|
539
|
-
{'q0': ...
|
540
|
-
>>> i = Interview.example()
|
541
|
-
>>> result, _ = asyncio.run(i.async_conduct_interview(stop_on_exception = True))
|
542
|
-
Traceback (most recent call last):
|
543
|
-
...
|
544
|
-
asyncio.exceptions.CancelledError
|
545
|
-
"""
|
546
|
-
self.sidecar_model = sidecar_model
|
547
|
-
self.stop_on_exception = stop_on_exception
|
548
|
-
|
549
|
-
# if no model bucket is passed, create an 'infinity' bucket with no rate limits
|
550
|
-
if model_buckets is None or hasattr(self.agent, "answer_question_directly"):
|
551
|
-
model_buckets = ModelBuckets.infinity_bucket()
|
552
|
-
|
553
|
-
## This is the key part---it creates a task for each question,
|
554
|
-
## with dependencies on the questions that must be answered before this one can be answered.
|
555
|
-
self.tasks = self._build_question_tasks(model_buckets=model_buckets)
|
556
|
-
|
557
|
-
## 'Invigilators' are used to administer the survey
|
558
|
-
self.invigilators = [
|
559
|
-
self._get_invigilator(question) for question in self.survey.questions
|
560
|
-
]
|
561
|
-
await asyncio.gather(
|
562
|
-
*self.tasks, return_exceptions=not stop_on_exception
|
563
|
-
) # not stop_on_exception)
|
564
|
-
self.answers.replace_missing_answers_with_none(self.survey)
|
565
|
-
valid_results = list(self._extract_valid_results())
|
566
|
-
return self.answers, valid_results
|
567
|
-
|
568
|
-
# endregion
|
569
|
-
|
570
|
-
# region: Extracting results and recording errors
|
571
|
-
def _extract_valid_results(self) -> Generator["Answers", None, None]:
|
572
|
-
"""Extract the valid results from the list of results.
|
573
|
-
|
574
|
-
It iterates through the tasks and invigilators, and yields the results of the tasks that are done.
|
575
|
-
If a task is not done, it raises a ValueError.
|
576
|
-
If an exception is raised in the task, it records the exception in the Interview instance except if the task was cancelled, which is expected behavior.
|
577
|
-
|
578
|
-
>>> i = Interview.example()
|
579
|
-
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
580
|
-
>>> results = list(i._extract_valid_results())
|
581
|
-
>>> len(results) == len(i.survey)
|
582
|
-
True
|
583
|
-
"""
|
584
|
-
assert len(self.tasks) == len(self.invigilators)
|
585
|
-
|
586
|
-
for task, invigilator in zip(self.tasks, self.invigilators):
|
587
|
-
if not task.done():
|
588
|
-
raise ValueError(f"Task {task.get_name()} is not done.")
|
589
|
-
|
590
|
-
try:
|
591
|
-
result = task.result()
|
592
|
-
except asyncio.CancelledError as e: # task was cancelled
|
593
|
-
result = invigilator.get_failed_task_result(
|
594
|
-
failure_reason="Task was cancelled."
|
595
|
-
)
|
596
|
-
except Exception as e: # any other kind of exception in the task
|
597
|
-
result = invigilator.get_failed_task_result(
|
598
|
-
failure_reason=f"Task failed with exception: {str(e)}."
|
599
|
-
)
|
600
|
-
exception_entry = InterviewExceptionEntry(
|
601
|
-
exception=e,
|
602
|
-
invigilator=invigilator,
|
603
|
-
)
|
604
|
-
self.exceptions.add(task.get_name(), exception_entry)
|
605
|
-
|
606
|
-
yield result
|
607
|
-
|
608
|
-
# endregion
|
609
|
-
|
610
|
-
# region: Magic methods
|
611
|
-
def __repr__(self) -> str:
|
612
|
-
"""Return a string representation of the Interview instance."""
|
613
|
-
return f"Interview(agent = {repr(self.agent)}, survey = {repr(self.survey)}, scenario = {repr(self.scenario)}, model = {repr(self.model)})"
|
614
|
-
|
615
|
-
def duplicate(self, iteration: int, cache: "Cache") -> Interview:
|
616
|
-
"""Duplicate the interview, but with a new iteration number and cache.
|
617
|
-
|
618
|
-
>>> i = Interview.example()
|
619
|
-
>>> i2 = i.duplicate(1, None)
|
620
|
-
>>> i.iteration + 1 == i2.iteration
|
621
|
-
True
|
622
|
-
|
623
|
-
"""
|
624
|
-
return Interview(
|
625
|
-
agent=self.agent,
|
626
|
-
survey=self.survey,
|
627
|
-
scenario=self.scenario,
|
628
|
-
model=self.model,
|
629
|
-
iteration=iteration,
|
630
|
-
cache=cache,
|
631
|
-
skip_retry=self.skip_retry,
|
632
|
-
)
|
633
|
-
|
634
|
-
@classmethod
|
635
|
-
def example(self, throw_exception: bool = False) -> Interview:
|
636
|
-
"""Return an example Interview instance."""
|
637
|
-
from edsl.agents import Agent
|
638
|
-
from edsl.surveys import Survey
|
639
|
-
from edsl.scenarios import Scenario
|
640
|
-
from edsl.language_models import LanguageModel
|
641
|
-
|
642
|
-
def f(self, question, scenario):
|
643
|
-
return "yes"
|
644
|
-
|
645
|
-
agent = Agent.example()
|
646
|
-
agent.add_direct_question_answering_method(f)
|
647
|
-
survey = Survey.example()
|
648
|
-
scenario = Scenario.example()
|
649
|
-
model = LanguageModel.example()
|
650
|
-
if throw_exception:
|
651
|
-
model = LanguageModel.example(test_model=True, throw_exception=True)
|
652
|
-
agent = Agent.example()
|
653
|
-
return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
|
654
|
-
return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
|
655
|
-
|
656
|
-
|
657
|
-
if __name__ == "__main__":
|
658
|
-
import doctest
|
659
|
-
|
660
|
-
# add ellipsis
|
661
|
-
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
1
|
+
"""This module contains the Interview class, which is responsible for conducting an interview asynchronously."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
import asyncio
|
5
|
+
from typing import Any, Type, List, Generator, Optional, Union, TYPE_CHECKING
|
6
|
+
import copy
|
7
|
+
from dataclasses import dataclass
|
8
|
+
|
9
|
+
# from edsl.jobs.Answers import Answers
|
10
|
+
from edsl.jobs.data_structures import Answers
|
11
|
+
from edsl.jobs.interviews.InterviewStatusLog import InterviewStatusLog
|
12
|
+
from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
|
13
|
+
from edsl.jobs.interviews.InterviewExceptionCollection import (
|
14
|
+
InterviewExceptionCollection,
|
15
|
+
)
|
16
|
+
from edsl.jobs.interviews.InterviewExceptionEntry import InterviewExceptionEntry
|
17
|
+
from edsl.jobs.buckets.ModelBuckets import ModelBuckets
|
18
|
+
from edsl.jobs.AnswerQuestionFunctionConstructor import (
|
19
|
+
AnswerQuestionFunctionConstructor,
|
20
|
+
)
|
21
|
+
from edsl.jobs.InterviewTaskManager import InterviewTaskManager
|
22
|
+
from edsl.jobs.FetchInvigilator import FetchInvigilator
|
23
|
+
from edsl.jobs.RequestTokenEstimator import RequestTokenEstimator
|
24
|
+
|
25
|
+
|
26
|
+
if TYPE_CHECKING:
|
27
|
+
from edsl.agents.Agent import Agent
|
28
|
+
from edsl.surveys.Survey import Survey
|
29
|
+
from edsl.scenarios.Scenario import Scenario
|
30
|
+
from edsl.data.Cache import Cache
|
31
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
32
|
+
from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
|
33
|
+
from edsl.agents.InvigilatorBase import InvigilatorBase
|
34
|
+
from edsl.language_models.key_management.KeyLookup import KeyLookup
|
35
|
+
|
36
|
+
|
37
|
+
@dataclass
|
38
|
+
class InterviewRunningConfig:
|
39
|
+
cache: Optional["Cache"] = (None,)
|
40
|
+
skip_retry: bool = (False,) # COULD BE SET WITH CONFIG
|
41
|
+
raise_validation_errors: bool = (True,)
|
42
|
+
stop_on_exception: bool = (False,)
|
43
|
+
|
44
|
+
|
45
|
+
class Interview:
|
46
|
+
"""
|
47
|
+
An 'interview' is one agent answering one survey, with one language model, for a given scenario.
|
48
|
+
|
49
|
+
The main method is `async_conduct_interview`, which conducts the interview asynchronously.
|
50
|
+
Most of the class is dedicated to creating the tasks for each question in the survey, and then running them.
|
51
|
+
"""
|
52
|
+
|
53
|
+
def __init__(
|
54
|
+
self,
|
55
|
+
agent: Agent,
|
56
|
+
survey: Survey,
|
57
|
+
scenario: Scenario,
|
58
|
+
model: Type["LanguageModel"],
|
59
|
+
iteration: int = 0,
|
60
|
+
indices: dict = None, # explain?
|
61
|
+
cache: Optional["Cache"] = None,
|
62
|
+
skip_retry: bool = False, # COULD BE SET WITH CONFIG
|
63
|
+
raise_validation_errors: bool = True,
|
64
|
+
):
|
65
|
+
"""Initialize the Interview instance.
|
66
|
+
|
67
|
+
:param agent: the agent being interviewed.
|
68
|
+
:param survey: the survey being administered to the agent.
|
69
|
+
:param scenario: the scenario that populates the survey questions.
|
70
|
+
:param model: the language model used to answer the questions.
|
71
|
+
# :param debug: if True, run without calls to the language model.
|
72
|
+
:param iteration: the iteration number of the interview.
|
73
|
+
:param cache: the cache used to store the answers.
|
74
|
+
|
75
|
+
>>> i = Interview.example()
|
76
|
+
>>> i.task_manager.task_creators
|
77
|
+
{}
|
78
|
+
|
79
|
+
>>> i.exceptions
|
80
|
+
{}
|
81
|
+
|
82
|
+
>>> _ = asyncio.run(i.async_conduct_interview())
|
83
|
+
>>> i.task_status_logs['q0']
|
84
|
+
[{'log_time': ..., 'value': <TaskStatus.NOT_STARTED: 1>}, {'log_time': ..., 'value': <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>}, {'log_time': ..., 'value': <TaskStatus.API_CALL_IN_PROGRESS: 7>}, {'log_time': ..., 'value': <TaskStatus.SUCCESS: 8>}]
|
85
|
+
|
86
|
+
>>> i.to_index
|
87
|
+
{'q0': 0, 'q1': 1, 'q2': 2}
|
88
|
+
|
89
|
+
"""
|
90
|
+
self.agent = agent
|
91
|
+
self.survey = copy.deepcopy(survey) # why do we need to deepcopy the survey?
|
92
|
+
self.scenario = scenario
|
93
|
+
self.model = model
|
94
|
+
self.iteration = iteration
|
95
|
+
|
96
|
+
self.answers = Answers() # will get filled in as interview progresses
|
97
|
+
|
98
|
+
self.task_manager = InterviewTaskManager(
|
99
|
+
survey=self.survey,
|
100
|
+
iteration=iteration,
|
101
|
+
)
|
102
|
+
|
103
|
+
self.exceptions = InterviewExceptionCollection()
|
104
|
+
|
105
|
+
self.running_config = InterviewRunningConfig(
|
106
|
+
cache=cache,
|
107
|
+
skip_retry=skip_retry,
|
108
|
+
raise_validation_errors=raise_validation_errors,
|
109
|
+
)
|
110
|
+
|
111
|
+
self.cache = cache
|
112
|
+
self.skip_retry = skip_retry
|
113
|
+
self.raise_validation_errors = raise_validation_errors
|
114
|
+
|
115
|
+
# dictionary mapping question names to their index in the survey.
|
116
|
+
self.to_index = {
|
117
|
+
question_name: index
|
118
|
+
for index, question_name in enumerate(self.survey.question_names)
|
119
|
+
}
|
120
|
+
|
121
|
+
self.failed_questions = []
|
122
|
+
|
123
|
+
self.indices = indices
|
124
|
+
self.initial_hash = hash(self)
|
125
|
+
|
126
|
+
@property
|
127
|
+
def has_exceptions(self) -> bool:
|
128
|
+
"""Return True if there are exceptions."""
|
129
|
+
return len(self.exceptions) > 0
|
130
|
+
|
131
|
+
@property
|
132
|
+
def task_status_logs(self) -> InterviewStatusLog:
|
133
|
+
"""Return the task status logs for the interview.
|
134
|
+
|
135
|
+
The keys are the question names; the values are the lists of status log changes for each task.
|
136
|
+
"""
|
137
|
+
return self.task_manager.task_status_logs
|
138
|
+
|
139
|
+
@property
|
140
|
+
def token_usage(self) -> InterviewTokenUsage:
|
141
|
+
"""Determine how many tokens were used for the interview."""
|
142
|
+
return self.task_manager.token_usage # task_creators.token_usage
|
143
|
+
|
144
|
+
@property
|
145
|
+
def interview_status(self) -> InterviewStatusDictionary:
|
146
|
+
"""Return a dictionary mapping task status codes to counts."""
|
147
|
+
# return self.task_creators.interview_status
|
148
|
+
return self.task_manager.interview_status
|
149
|
+
|
150
|
+
def to_dict(self, include_exceptions=True, add_edsl_version=True) -> dict[str, Any]:
|
151
|
+
"""Return a dictionary representation of the Interview instance.
|
152
|
+
This is just for hashing purposes.
|
153
|
+
|
154
|
+
>>> i = Interview.example()
|
155
|
+
>>> hash(i)
|
156
|
+
193593189022259693
|
157
|
+
"""
|
158
|
+
d = {
|
159
|
+
"agent": self.agent.to_dict(add_edsl_version=add_edsl_version),
|
160
|
+
"survey": self.survey.to_dict(add_edsl_version=add_edsl_version),
|
161
|
+
"scenario": self.scenario.to_dict(add_edsl_version=add_edsl_version),
|
162
|
+
"model": self.model.to_dict(add_edsl_version=add_edsl_version),
|
163
|
+
"iteration": self.iteration,
|
164
|
+
"exceptions": {},
|
165
|
+
}
|
166
|
+
if include_exceptions:
|
167
|
+
d["exceptions"] = self.exceptions.to_dict()
|
168
|
+
if hasattr(self, "indices"):
|
169
|
+
d["indices"] = self.indices
|
170
|
+
return d
|
171
|
+
|
172
|
+
@classmethod
|
173
|
+
def from_dict(cls, d: dict[str, Any]) -> "Interview":
|
174
|
+
"""Return an Interview instance from a dictionary."""
|
175
|
+
|
176
|
+
from edsl.agents.Agent import Agent
|
177
|
+
from edsl.surveys.Survey import Survey
|
178
|
+
from edsl.scenarios.Scenario import Scenario
|
179
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
180
|
+
|
181
|
+
agent = Agent.from_dict(d["agent"])
|
182
|
+
survey = Survey.from_dict(d["survey"])
|
183
|
+
scenario = Scenario.from_dict(d["scenario"])
|
184
|
+
model = LanguageModel.from_dict(d["model"])
|
185
|
+
iteration = d["iteration"]
|
186
|
+
params = {
|
187
|
+
"agent": agent,
|
188
|
+
"survey": survey,
|
189
|
+
"scenario": scenario,
|
190
|
+
"model": model,
|
191
|
+
"iteration": iteration,
|
192
|
+
}
|
193
|
+
if "indices" in d:
|
194
|
+
params["indices"] = d["indices"]
|
195
|
+
interview = cls(**params)
|
196
|
+
if "exceptions" in d:
|
197
|
+
exceptions = InterviewExceptionCollection.from_dict(d["exceptions"])
|
198
|
+
interview.exceptions = exceptions
|
199
|
+
return interview
|
200
|
+
|
201
|
+
def __hash__(self) -> int:
|
202
|
+
from edsl.utilities.utilities import dict_hash
|
203
|
+
|
204
|
+
return dict_hash(self.to_dict(include_exceptions=False, add_edsl_version=False))
|
205
|
+
|
206
|
+
def __eq__(self, other: "Interview") -> bool:
|
207
|
+
"""
|
208
|
+
>>> from edsl.jobs.interviews.Interview import Interview; i = Interview.example(); d = i.to_dict(); i2 = Interview.from_dict(d); i == i2
|
209
|
+
True
|
210
|
+
"""
|
211
|
+
return hash(self) == hash(other)
|
212
|
+
|
213
|
+
async def async_conduct_interview(
|
214
|
+
self,
|
215
|
+
run_config: Optional["RunConfig"] = None,
|
216
|
+
# model_buckets: Optional[ModelBuckets] = None,
|
217
|
+
# stop_on_exception: bool = False,
|
218
|
+
# raise_validation_errors: bool = True,
|
219
|
+
# key_lookup: Optional[KeyLookup] = None,
|
220
|
+
) -> tuple["Answers", List[dict[str, Any]]]:
|
221
|
+
"""
|
222
|
+
Conduct an Interview asynchronously.
|
223
|
+
It returns a tuple with the answers and a list of valid results.
|
224
|
+
|
225
|
+
:param model_buckets: a dictionary of token buckets for the model.
|
226
|
+
:param debug: run without calls to LLM.
|
227
|
+
:param stop_on_exception: if True, stops the interview if an exception is raised.
|
228
|
+
|
229
|
+
Example usage:
|
230
|
+
|
231
|
+
>>> i = Interview.example()
|
232
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
233
|
+
>>> result['q0']
|
234
|
+
'yes'
|
235
|
+
|
236
|
+
>>> i = Interview.example(throw_exception = True)
|
237
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
238
|
+
>>> i.exceptions
|
239
|
+
{'q0': ...
|
240
|
+
>>> i = Interview.example()
|
241
|
+
>>> from edsl.jobs.Jobs import RunConfig, RunParameters, RunEnvironment
|
242
|
+
>>> run_config = RunConfig(parameters = RunParameters(), environment = RunEnvironment())
|
243
|
+
>>> run_config.parameters.stop_on_exception = True
|
244
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview(run_config))
|
245
|
+
Traceback (most recent call last):
|
246
|
+
...
|
247
|
+
asyncio.exceptions.CancelledError
|
248
|
+
"""
|
249
|
+
from edsl.jobs.Jobs import RunConfig, RunParameters, RunEnvironment
|
250
|
+
|
251
|
+
if run_config is None:
|
252
|
+
run_config = RunConfig(
|
253
|
+
parameters=RunParameters(),
|
254
|
+
environment=RunEnvironment(),
|
255
|
+
)
|
256
|
+
self.stop_on_exception = run_config.parameters.stop_on_exception
|
257
|
+
|
258
|
+
# if no model bucket is passed, create an 'infinity' bucket with no rate limits
|
259
|
+
bucket_collection = run_config.environment.bucket_collection
|
260
|
+
|
261
|
+
if bucket_collection:
|
262
|
+
model_buckets = bucket_collection.get(self.model)
|
263
|
+
else:
|
264
|
+
model_buckets = None
|
265
|
+
|
266
|
+
if model_buckets is None or hasattr(self.agent, "answer_question_directly"):
|
267
|
+
model_buckets = ModelBuckets.infinity_bucket()
|
268
|
+
|
269
|
+
# was "self.tasks" - is that necessary?
|
270
|
+
self.tasks = self.task_manager.build_question_tasks(
|
271
|
+
answer_func=AnswerQuestionFunctionConstructor(
|
272
|
+
self, key_lookup=run_config.environment.key_lookup
|
273
|
+
)(),
|
274
|
+
token_estimator=RequestTokenEstimator(self),
|
275
|
+
model_buckets=model_buckets,
|
276
|
+
)
|
277
|
+
|
278
|
+
## This is the key part---it creates a task for each question,
|
279
|
+
## with dependencies on the questions that must be answered before this one can be answered.
|
280
|
+
|
281
|
+
## 'Invigilators' are used to administer the survey.
|
282
|
+
fetcher = FetchInvigilator(
|
283
|
+
interview=self,
|
284
|
+
current_answers=self.answers,
|
285
|
+
key_lookup=run_config.environment.key_lookup,
|
286
|
+
)
|
287
|
+
self.invigilators = [fetcher(question) for question in self.survey.questions]
|
288
|
+
await asyncio.gather(
|
289
|
+
*self.tasks, return_exceptions=not run_config.parameters.stop_on_exception
|
290
|
+
)
|
291
|
+
self.answers.replace_missing_answers_with_none(self.survey)
|
292
|
+
valid_results = list(
|
293
|
+
self._extract_valid_results(self.tasks, self.invigilators, self.exceptions)
|
294
|
+
)
|
295
|
+
return self.answers, valid_results
|
296
|
+
|
297
|
+
@staticmethod
|
298
|
+
def _extract_valid_results(
|
299
|
+
tasks: List["asyncio.Task"],
|
300
|
+
invigilators: List["InvigilatorBase"],
|
301
|
+
exceptions: InterviewExceptionCollection,
|
302
|
+
) -> Generator["Answers", None, None]:
|
303
|
+
"""Extract the valid results from the list of results.
|
304
|
+
|
305
|
+
It iterates through the tasks and invigilators, and yields the results of the tasks that are done.
|
306
|
+
If a task is not done, it raises a ValueError.
|
307
|
+
If an exception is raised in the task, it records the exception in the Interview instance except if the task was cancelled, which is expected behavior.
|
308
|
+
|
309
|
+
>>> i = Interview.example()
|
310
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
311
|
+
"""
|
312
|
+
assert len(tasks) == len(invigilators)
|
313
|
+
|
314
|
+
def handle_task(task, invigilator):
|
315
|
+
try:
|
316
|
+
result = task.result()
|
317
|
+
except asyncio.CancelledError as e: # task was cancelled
|
318
|
+
result = invigilator.get_failed_task_result(
|
319
|
+
failure_reason="Task was cancelled."
|
320
|
+
)
|
321
|
+
except Exception as e: # any other kind of exception in the task
|
322
|
+
result = invigilator.get_failed_task_result(
|
323
|
+
failure_reason=f"Task failed with exception: {str(e)}."
|
324
|
+
)
|
325
|
+
exception_entry = InterviewExceptionEntry(
|
326
|
+
exception=e,
|
327
|
+
invigilator=invigilator,
|
328
|
+
)
|
329
|
+
exceptions.add(task.get_name(), exception_entry)
|
330
|
+
return result
|
331
|
+
|
332
|
+
for task, invigilator in zip(tasks, invigilators):
|
333
|
+
if not task.done():
|
334
|
+
raise ValueError(f"Task {task.get_name()} is not done.")
|
335
|
+
|
336
|
+
yield handle_task(task, invigilator)
|
337
|
+
|
338
|
+
def __repr__(self) -> str:
|
339
|
+
"""Return a string representation of the Interview instance."""
|
340
|
+
return f"Interview(agent = {repr(self.agent)}, survey = {repr(self.survey)}, scenario = {repr(self.scenario)}, model = {repr(self.model)})"
|
341
|
+
|
342
|
+
def duplicate(
|
343
|
+
self, iteration: int, cache: "Cache", randomize_survey: Optional[bool] = True
|
344
|
+
) -> Interview:
|
345
|
+
"""Duplicate the interview, but with a new iteration number and cache.
|
346
|
+
|
347
|
+
>>> i = Interview.example()
|
348
|
+
>>> i2 = i.duplicate(1, None)
|
349
|
+
>>> i.iteration + 1 == i2.iteration
|
350
|
+
True
|
351
|
+
|
352
|
+
"""
|
353
|
+
if randomize_survey:
|
354
|
+
new_survey = self.survey.draw()
|
355
|
+
else:
|
356
|
+
new_survey = self.survey
|
357
|
+
|
358
|
+
return Interview(
|
359
|
+
agent=self.agent,
|
360
|
+
survey=new_survey,
|
361
|
+
scenario=self.scenario,
|
362
|
+
model=self.model,
|
363
|
+
iteration=iteration,
|
364
|
+
cache=self.running_config.cache,
|
365
|
+
skip_retry=self.running_config.skip_retry,
|
366
|
+
indices=self.indices,
|
367
|
+
)
|
368
|
+
|
369
|
+
@classmethod
|
370
|
+
def example(self, throw_exception: bool = False) -> Interview:
|
371
|
+
"""Return an example Interview instance."""
|
372
|
+
from edsl.agents import Agent
|
373
|
+
from edsl.surveys import Survey
|
374
|
+
from edsl.scenarios import Scenario
|
375
|
+
from edsl.language_models import LanguageModel
|
376
|
+
|
377
|
+
def f(self, question, scenario):
|
378
|
+
return "yes"
|
379
|
+
|
380
|
+
agent = Agent.example()
|
381
|
+
agent.add_direct_question_answering_method(f)
|
382
|
+
survey = Survey.example()
|
383
|
+
scenario = Scenario.example()
|
384
|
+
model = LanguageModel.example()
|
385
|
+
if throw_exception:
|
386
|
+
model = LanguageModel.example(test_model=True, throw_exception=True)
|
387
|
+
agent = Agent.example()
|
388
|
+
return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
|
389
|
+
return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
|
390
|
+
|
391
|
+
|
392
|
+
if __name__ == "__main__":
|
393
|
+
import doctest
|
394
|
+
|
395
|
+
# add ellipsis
|
396
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|