edsl 0.1.39.dev3__py3-none-any.whl → 0.1.39.dev5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +413 -332
- edsl/BaseDiff.py +260 -260
- edsl/TemplateLoader.py +24 -24
- edsl/__init__.py +57 -49
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +1071 -867
- edsl/agents/AgentList.py +551 -413
- edsl/agents/Invigilator.py +284 -233
- edsl/agents/InvigilatorBase.py +257 -270
- edsl/agents/PromptConstructor.py +272 -354
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/__init__.py +2 -3
- edsl/agents/descriptors.py +99 -99
- edsl/agents/prompt_helpers.py +129 -129
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -117
- edsl/auto/StageBase.py +243 -230
- edsl/auto/StageGenerateSurvey.py +178 -178
- edsl/auto/StageLabelQuestions.py +125 -125
- edsl/auto/StagePersona.py +61 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
- edsl/auto/StagePersonaDimensionValues.py +74 -74
- edsl/auto/StagePersonaDimensions.py +69 -69
- edsl/auto/StageQuestions.py +74 -73
- edsl/auto/SurveyCreatorPipeline.py +21 -21
- edsl/auto/utilities.py +218 -224
- edsl/base/Base.py +279 -279
- edsl/config.py +177 -157
- edsl/conversation/Conversation.py +290 -290
- edsl/conversation/car_buying.py +59 -58
- edsl/conversation/chips.py +95 -95
- edsl/conversation/mug_negotiation.py +81 -81
- edsl/conversation/next_speaker_utilities.py +93 -93
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -54
- edsl/coop/__init__.py +2 -2
- edsl/coop/coop.py +1106 -1028
- edsl/coop/utils.py +131 -131
- edsl/data/Cache.py +573 -555
- edsl/data/CacheEntry.py +230 -233
- edsl/data/CacheHandler.py +168 -149
- edsl/data/RemoteCacheSync.py +186 -78
- edsl/data/SQLiteDict.py +292 -292
- edsl/data/__init__.py +5 -4
- edsl/data/orm.py +10 -10
- edsl/data_transfer_models.py +74 -73
- edsl/enums.py +202 -175
- edsl/exceptions/BaseException.py +21 -21
- edsl/exceptions/__init__.py +54 -54
- edsl/exceptions/agents.py +54 -42
- edsl/exceptions/cache.py +5 -5
- edsl/exceptions/configuration.py +16 -16
- edsl/exceptions/coop.py +10 -10
- edsl/exceptions/data.py +14 -14
- edsl/exceptions/general.py +34 -34
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +33 -33
- edsl/exceptions/language_models.py +63 -63
- edsl/exceptions/prompts.py +15 -15
- edsl/exceptions/questions.py +109 -91
- edsl/exceptions/results.py +29 -29
- edsl/exceptions/scenarios.py +29 -22
- edsl/exceptions/surveys.py +37 -37
- edsl/inference_services/AnthropicService.py +106 -87
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -120
- edsl/inference_services/AzureAI.py +215 -217
- edsl/inference_services/DeepInfraService.py +18 -18
- edsl/inference_services/GoogleService.py +143 -148
- edsl/inference_services/GroqService.py +20 -20
- edsl/inference_services/InferenceServiceABC.py +80 -147
- edsl/inference_services/InferenceServicesCollection.py +138 -97
- edsl/inference_services/MistralAIService.py +120 -123
- edsl/inference_services/OllamaService.py +18 -18
- edsl/inference_services/OpenAIService.py +236 -224
- edsl/inference_services/PerplexityService.py +160 -163
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -89
- edsl/inference_services/TogetherAIService.py +172 -170
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -118
- edsl/inference_services/rate_limits_cache.py +25 -25
- edsl/inference_services/registry.py +41 -41
- edsl/inference_services/write_available.py +10 -10
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +43 -56
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +823 -898
- edsl/jobs/JobsChecks.py +172 -147
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -268
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -239
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/__init__.py +1 -1
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -63
- edsl/jobs/buckets/ModelBuckets.py +65 -65
- edsl/jobs/buckets/TokenBucket.py +283 -251
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +396 -661
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
- edsl/jobs/interviews/InterviewStatistic.py +63 -63
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
- edsl/jobs/interviews/InterviewStatusLog.py +92 -92
- edsl/jobs/interviews/ReportErrors.py +66 -66
- edsl/jobs/interviews/interview_status_enum.py +9 -9
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -466
- edsl/jobs/runners/JobsRunnerStatus.py +297 -330
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -242
- edsl/jobs/tasks/TaskCreators.py +64 -64
- edsl/jobs/tasks/TaskHistory.py +470 -450
- edsl/jobs/tasks/TaskStatusLog.py +23 -23
- edsl/jobs/tasks/task_status_enum.py +161 -163
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
- edsl/jobs/tokens/TokenUsage.py +34 -34
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +626 -668
- edsl/language_models/ModelList.py +164 -155
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/__init__.py +2 -3
- edsl/language_models/fake_openai_call.py +15 -15
- edsl/language_models/fake_openai_service.py +61 -61
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +156 -156
- edsl/language_models/utilities.py +65 -64
- edsl/notebooks/Notebook.py +263 -258
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -1
- edsl/prompts/Prompt.py +352 -362
- edsl/prompts/__init__.py +2 -2
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -664
- edsl/questions/QuestionBasePromptsMixin.py +221 -217
- edsl/questions/QuestionBudget.py +227 -227
- edsl/questions/QuestionCheckBox.py +359 -359
- edsl/questions/QuestionExtract.py +180 -182
- edsl/questions/QuestionFreeText.py +113 -114
- edsl/questions/QuestionFunctional.py +166 -166
- edsl/questions/QuestionList.py +223 -231
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +330 -286
- edsl/questions/QuestionNumerical.py +151 -153
- edsl/questions/QuestionRank.py +314 -324
- edsl/questions/Quick.py +41 -41
- edsl/questions/SimpleAskMixin.py +74 -73
- edsl/questions/__init__.py +27 -26
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +334 -289
- edsl/questions/compose_questions.py +98 -98
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -21
- edsl/questions/derived/QuestionLikertFive.py +76 -76
- edsl/questions/derived/QuestionLinearScale.py +90 -87
- edsl/questions/derived/QuestionTopK.py +93 -93
- edsl/questions/derived/QuestionYesNo.py +82 -82
- edsl/questions/descriptors.py +427 -413
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -13
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
- edsl/questions/prompt_templates/question_extract.jinja +11 -11
- edsl/questions/prompt_templates/question_free_text.jinja +3 -3
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
- edsl/questions/prompt_templates/question_list.jinja +17 -17
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
- edsl/questions/prompt_templates/question_numerical.jinja +36 -36
- edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +168 -161
- edsl/questions/question_registry.py +177 -177
- edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +71 -71
- edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +188 -174
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +12 -12
- edsl/questions/templates/budget/answering_instructions.jinja +7 -7
- edsl/questions/templates/budget/question_presentation.jinja +7 -7
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
- edsl/questions/templates/extract/answering_instructions.jinja +7 -7
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
- edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
- edsl/questions/templates/list/answering_instructions.jinja +3 -3
- edsl/questions/templates/list/question_presentation.jinja +5 -5
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
- edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
- edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
- edsl/questions/templates/numerical/question_presentation.jinja +6 -6
- edsl/questions/templates/rank/answering_instructions.jinja +11 -11
- edsl/questions/templates/rank/question_presentation.jinja +15 -15
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
- edsl/questions/templates/top_k/question_presentation.jinja +22 -22
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
- edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
- edsl/results/CSSParameterizer.py +108 -108
- edsl/results/Dataset.py +587 -424
- edsl/results/DatasetExportMixin.py +594 -731
- edsl/results/DatasetTree.py +295 -275
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +557 -465
- edsl/results/Results.py +1183 -1165
- edsl/results/ResultsExportMixin.py +45 -43
- edsl/results/ResultsGGMixin.py +121 -121
- edsl/results/TableDisplay.py +125 -198
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +2 -2
- edsl/results/file_exports.py +252 -0
- edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +33 -33
- edsl/results/{Selector.py → results_selector.py} +145 -135
- edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +98 -98
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +77 -77
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -115
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +511 -632
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +498 -601
- edsl/scenarios/ScenarioHtmlMixin.py +65 -64
- edsl/scenarios/ScenarioList.py +1458 -1287
- edsl/scenarios/ScenarioListExportMixin.py +45 -52
- edsl/scenarios/ScenarioListPdfMixin.py +239 -261
- edsl/scenarios/__init__.py +3 -4
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +38 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +131 -127
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -1
- edsl/study/ObjectEntry.py +173 -173
- edsl/study/ProofOfWork.py +113 -113
- edsl/study/SnapShot.py +80 -80
- edsl/study/Study.py +521 -528
- edsl/study/__init__.py +4 -4
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +148 -148
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +31 -31
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +244 -244
- edsl/surveys/Rule.py +327 -326
- edsl/surveys/RuleCollection.py +385 -387
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1280 -1801
- edsl/surveys/SurveyCSS.py +273 -261
- edsl/surveys/SurveyExportMixin.py +259 -259
- edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +181 -179
- edsl/surveys/SurveyQualtricsImport.py +284 -284
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +5 -3
- edsl/surveys/base.py +53 -53
- edsl/surveys/descriptors.py +60 -56
- edsl/surveys/instructions/ChangeInstruction.py +48 -49
- edsl/surveys/instructions/Instruction.py +56 -65
- edsl/surveys/instructions/InstructionCollection.py +82 -77
- edsl/templates/error_reporting/base.html +23 -23
- edsl/templates/error_reporting/exceptions_by_model.html +34 -34
- edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
- edsl/templates/error_reporting/exceptions_by_type.html +16 -16
- edsl/templates/error_reporting/interview_details.html +115 -115
- edsl/templates/error_reporting/interviews.html +19 -19
- edsl/templates/error_reporting/overview.html +4 -4
- edsl/templates/error_reporting/performance_plot.html +1 -1
- edsl/templates/error_reporting/report.css +73 -73
- edsl/templates/error_reporting/report.html +117 -117
- edsl/templates/error_reporting/report.js +25 -25
- edsl/tools/__init__.py +1 -1
- edsl/tools/clusters.py +192 -192
- edsl/tools/embeddings.py +27 -27
- edsl/tools/embeddings_plotting.py +118 -118
- edsl/tools/plotting.py +112 -112
- edsl/tools/summarize.py +18 -18
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +28 -28
- edsl/utilities/__init__.py +22 -22
- edsl/utilities/ast_utilities.py +25 -25
- edsl/utilities/data/Registry.py +6 -6
- edsl/utilities/data/__init__.py +1 -1
- edsl/utilities/data/scooter_results.json +1 -1
- edsl/utilities/decorators.py +77 -77
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
- edsl/utilities/interface.py +627 -627
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -263
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -28
- edsl/utilities/restricted_python.py +70 -70
- edsl/utilities/utilities.py +436 -424
- {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev5.dist-info}/LICENSE +21 -21
- {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev5.dist-info}/METADATA +13 -11
- edsl-0.1.39.dev5.dist-info/RECORD +358 -0
- {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev5.dist-info}/WHEEL +1 -1
- edsl/language_models/KeyLookup.py +0 -30
- edsl/language_models/registry.py +0 -190
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/results/ResultsDBMixin.py +0 -238
- edsl-0.1.39.dev3.dist-info/RECORD +0 -277
@@ -1,118 +1,118 @@
|
|
1
|
-
models_available = {
|
2
|
-
"openai": [
|
3
|
-
"gpt-3.5-turbo-1106",
|
4
|
-
"gpt-4-0125-preview",
|
5
|
-
"gpt-4-turbo-preview",
|
6
|
-
"gpt-3.5-turbo-16k",
|
7
|
-
"gpt-4-1106-preview",
|
8
|
-
"gpt-4-turbo-2024-04-09",
|
9
|
-
"gpt-3.5-turbo-16k-0613",
|
10
|
-
"gpt-4o-2024-05-13",
|
11
|
-
"gpt-4-turbo",
|
12
|
-
"gpt-3.5-turbo-0613",
|
13
|
-
"gpt-4",
|
14
|
-
"gpt-4-0613",
|
15
|
-
"gpt-3.5-turbo-0125",
|
16
|
-
"gpt-3.5-turbo",
|
17
|
-
"gpt-3.5-turbo-instruct",
|
18
|
-
"gpt-3.5-turbo-instruct-0914",
|
19
|
-
"gpt-3.5-turbo-0301",
|
20
|
-
"gpt-4-vision-preview",
|
21
|
-
"gpt-4-1106-vision-preview",
|
22
|
-
"gpt-4o",
|
23
|
-
],
|
24
|
-
"anthropic": [
|
25
|
-
"claude-3-5-sonnet-20240620",
|
26
|
-
"claude-3-opus-20240229",
|
27
|
-
"claude-3-sonnet-20240229",
|
28
|
-
"claude-3-haiku-20240307",
|
29
|
-
],
|
30
|
-
"deep_infra": [
|
31
|
-
"meta-llama/Llama-2-13b-chat-hf",
|
32
|
-
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
33
|
-
"Gryphe/MythoMax-L2-13b-turbo",
|
34
|
-
"mistralai/Mistral-7B-Instruct-v0.1",
|
35
|
-
"Austism/chronos-hermes-13b-v2",
|
36
|
-
"meta-llama/Llama-2-70b-chat-hf",
|
37
|
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
38
|
-
"meta-llama/Llama-2-7b-chat-hf",
|
39
|
-
"Qwen/Qwen2-72B-Instruct",
|
40
|
-
"HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
41
|
-
"cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
42
|
-
"bigcode/starcoder2-15b",
|
43
|
-
"microsoft/WizardLM-2-8x22B",
|
44
|
-
"codellama/CodeLlama-70b-Instruct-hf",
|
45
|
-
"Gryphe/MythoMax-L2-13b",
|
46
|
-
"microsoft/WizardLM-2-7B",
|
47
|
-
"01-ai/Yi-34B-Chat",
|
48
|
-
"bigcode/starcoder2-15b-instruct-v0.1",
|
49
|
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
50
|
-
"openchat/openchat-3.6-8b",
|
51
|
-
"meta-llama/Meta-Llama-3-8B-Instruct",
|
52
|
-
"microsoft/Phi-3-medium-4k-instruct",
|
53
|
-
"Phind/Phind-CodeLlama-34B-v2",
|
54
|
-
"google/codegemma-7b-it",
|
55
|
-
"mistralai/Mistral-7B-Instruct-v0.2",
|
56
|
-
"deepinfra/airoboros-70b",
|
57
|
-
"mistralai/Mixtral-8x22B-v0.1",
|
58
|
-
"llava-hf/llava-1.5-7b-hf",
|
59
|
-
"codellama/CodeLlama-34b-Instruct-hf",
|
60
|
-
"google/gemma-1.1-7b-it",
|
61
|
-
"lizpreciatior/lzlv_70b_fp16_hf",
|
62
|
-
"databricks/dbrx-instruct",
|
63
|
-
"nvidia/Nemotron-4-340B-Instruct",
|
64
|
-
"Qwen/Qwen2-7B-Instruct",
|
65
|
-
"meta-llama/Meta-Llama-3-70B-Instruct",
|
66
|
-
"openchat/openchat_3.5",
|
67
|
-
],
|
68
|
-
"google": [
|
69
|
-
"gemini-1.0-pro",
|
70
|
-
"gemini-1.0-pro-001",
|
71
|
-
"gemini-1.0-pro-latest",
|
72
|
-
"gemini-1.0-pro-vision-latest",
|
73
|
-
"gemini-1.5-flash",
|
74
|
-
"gemini-1.5-flash-001",
|
75
|
-
"gemini-1.5-flash-001-tuning",
|
76
|
-
"gemini-1.5-flash-002",
|
77
|
-
"gemini-1.5-flash-8b",
|
78
|
-
"gemini-1.5-flash-8b-001",
|
79
|
-
"gemini-1.5-flash-8b-exp-0827",
|
80
|
-
"gemini-1.5-flash-8b-exp-0924",
|
81
|
-
"gemini-1.5-flash-8b-latest",
|
82
|
-
"gemini-1.5-flash-exp-0827",
|
83
|
-
"gemini-1.5-flash-latest",
|
84
|
-
"gemini-1.5-pro",
|
85
|
-
"gemini-1.5-pro-001",
|
86
|
-
"gemini-1.5-pro-002",
|
87
|
-
"gemini-1.5-pro-exp-0801",
|
88
|
-
"gemini-1.5-pro-exp-0827",
|
89
|
-
"gemini-1.5-pro-latest",
|
90
|
-
"gemini-pro",
|
91
|
-
"gemini-pro-vision",
|
92
|
-
],
|
93
|
-
"bedrock": [
|
94
|
-
"amazon.titan-tg1-large",
|
95
|
-
"amazon.titan-text-lite-v1",
|
96
|
-
"amazon.titan-text-express-v1",
|
97
|
-
"anthropic.claude-instant-v1",
|
98
|
-
"anthropic.claude-v2:1",
|
99
|
-
"anthropic.claude-v2",
|
100
|
-
"anthropic.claude-3-sonnet-20240229-v1:0",
|
101
|
-
"anthropic.claude-3-haiku-20240307-v1:0",
|
102
|
-
"anthropic.claude-3-opus-20240229-v1:0",
|
103
|
-
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
104
|
-
"cohere.command-text-v14",
|
105
|
-
"cohere.command-r-v1:0",
|
106
|
-
"cohere.command-r-plus-v1:0",
|
107
|
-
"cohere.command-light-text-v14",
|
108
|
-
"meta.llama3-8b-instruct-v1:0",
|
109
|
-
"meta.llama3-70b-instruct-v1:0",
|
110
|
-
"meta.llama3-1-8b-instruct-v1:0",
|
111
|
-
"meta.llama3-1-70b-instruct-v1:0",
|
112
|
-
"meta.llama3-1-405b-instruct-v1:0",
|
113
|
-
"mistral.mistral-7b-instruct-v0:2",
|
114
|
-
"mistral.mixtral-8x7b-instruct-v0:1",
|
115
|
-
"mistral.mistral-large-2402-v1:0",
|
116
|
-
"mistral.mistral-large-2407-v1:0",
|
117
|
-
],
|
118
|
-
}
|
1
|
+
models_available = {
|
2
|
+
"openai": [
|
3
|
+
"gpt-3.5-turbo-1106",
|
4
|
+
"gpt-4-0125-preview",
|
5
|
+
"gpt-4-turbo-preview",
|
6
|
+
"gpt-3.5-turbo-16k",
|
7
|
+
"gpt-4-1106-preview",
|
8
|
+
"gpt-4-turbo-2024-04-09",
|
9
|
+
"gpt-3.5-turbo-16k-0613",
|
10
|
+
"gpt-4o-2024-05-13",
|
11
|
+
"gpt-4-turbo",
|
12
|
+
"gpt-3.5-turbo-0613",
|
13
|
+
"gpt-4",
|
14
|
+
"gpt-4-0613",
|
15
|
+
"gpt-3.5-turbo-0125",
|
16
|
+
"gpt-3.5-turbo",
|
17
|
+
"gpt-3.5-turbo-instruct",
|
18
|
+
"gpt-3.5-turbo-instruct-0914",
|
19
|
+
"gpt-3.5-turbo-0301",
|
20
|
+
"gpt-4-vision-preview",
|
21
|
+
"gpt-4-1106-vision-preview",
|
22
|
+
"gpt-4o",
|
23
|
+
],
|
24
|
+
"anthropic": [
|
25
|
+
"claude-3-5-sonnet-20240620",
|
26
|
+
"claude-3-opus-20240229",
|
27
|
+
"claude-3-sonnet-20240229",
|
28
|
+
"claude-3-haiku-20240307",
|
29
|
+
],
|
30
|
+
"deep_infra": [
|
31
|
+
"meta-llama/Llama-2-13b-chat-hf",
|
32
|
+
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
33
|
+
"Gryphe/MythoMax-L2-13b-turbo",
|
34
|
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
35
|
+
"Austism/chronos-hermes-13b-v2",
|
36
|
+
"meta-llama/Llama-2-70b-chat-hf",
|
37
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
38
|
+
"meta-llama/Llama-2-7b-chat-hf",
|
39
|
+
"Qwen/Qwen2-72B-Instruct",
|
40
|
+
"HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
41
|
+
"cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
42
|
+
"bigcode/starcoder2-15b",
|
43
|
+
"microsoft/WizardLM-2-8x22B",
|
44
|
+
"codellama/CodeLlama-70b-Instruct-hf",
|
45
|
+
"Gryphe/MythoMax-L2-13b",
|
46
|
+
"microsoft/WizardLM-2-7B",
|
47
|
+
"01-ai/Yi-34B-Chat",
|
48
|
+
"bigcode/starcoder2-15b-instruct-v0.1",
|
49
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
50
|
+
"openchat/openchat-3.6-8b",
|
51
|
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
52
|
+
"microsoft/Phi-3-medium-4k-instruct",
|
53
|
+
"Phind/Phind-CodeLlama-34B-v2",
|
54
|
+
"google/codegemma-7b-it",
|
55
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
56
|
+
"deepinfra/airoboros-70b",
|
57
|
+
"mistralai/Mixtral-8x22B-v0.1",
|
58
|
+
"llava-hf/llava-1.5-7b-hf",
|
59
|
+
"codellama/CodeLlama-34b-Instruct-hf",
|
60
|
+
"google/gemma-1.1-7b-it",
|
61
|
+
"lizpreciatior/lzlv_70b_fp16_hf",
|
62
|
+
"databricks/dbrx-instruct",
|
63
|
+
"nvidia/Nemotron-4-340B-Instruct",
|
64
|
+
"Qwen/Qwen2-7B-Instruct",
|
65
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
66
|
+
"openchat/openchat_3.5",
|
67
|
+
],
|
68
|
+
"google": [
|
69
|
+
"gemini-1.0-pro",
|
70
|
+
"gemini-1.0-pro-001",
|
71
|
+
"gemini-1.0-pro-latest",
|
72
|
+
"gemini-1.0-pro-vision-latest",
|
73
|
+
"gemini-1.5-flash",
|
74
|
+
"gemini-1.5-flash-001",
|
75
|
+
"gemini-1.5-flash-001-tuning",
|
76
|
+
"gemini-1.5-flash-002",
|
77
|
+
"gemini-1.5-flash-8b",
|
78
|
+
"gemini-1.5-flash-8b-001",
|
79
|
+
"gemini-1.5-flash-8b-exp-0827",
|
80
|
+
"gemini-1.5-flash-8b-exp-0924",
|
81
|
+
"gemini-1.5-flash-8b-latest",
|
82
|
+
"gemini-1.5-flash-exp-0827",
|
83
|
+
"gemini-1.5-flash-latest",
|
84
|
+
"gemini-1.5-pro",
|
85
|
+
"gemini-1.5-pro-001",
|
86
|
+
"gemini-1.5-pro-002",
|
87
|
+
"gemini-1.5-pro-exp-0801",
|
88
|
+
"gemini-1.5-pro-exp-0827",
|
89
|
+
"gemini-1.5-pro-latest",
|
90
|
+
"gemini-pro",
|
91
|
+
"gemini-pro-vision",
|
92
|
+
],
|
93
|
+
"bedrock": [
|
94
|
+
"amazon.titan-tg1-large",
|
95
|
+
"amazon.titan-text-lite-v1",
|
96
|
+
"amazon.titan-text-express-v1",
|
97
|
+
"anthropic.claude-instant-v1",
|
98
|
+
"anthropic.claude-v2:1",
|
99
|
+
"anthropic.claude-v2",
|
100
|
+
"anthropic.claude-3-sonnet-20240229-v1:0",
|
101
|
+
"anthropic.claude-3-haiku-20240307-v1:0",
|
102
|
+
"anthropic.claude-3-opus-20240229-v1:0",
|
103
|
+
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
104
|
+
"cohere.command-text-v14",
|
105
|
+
"cohere.command-r-v1:0",
|
106
|
+
"cohere.command-r-plus-v1:0",
|
107
|
+
"cohere.command-light-text-v14",
|
108
|
+
"meta.llama3-8b-instruct-v1:0",
|
109
|
+
"meta.llama3-70b-instruct-v1:0",
|
110
|
+
"meta.llama3-1-8b-instruct-v1:0",
|
111
|
+
"meta.llama3-1-70b-instruct-v1:0",
|
112
|
+
"meta.llama3-1-405b-instruct-v1:0",
|
113
|
+
"mistral.mistral-7b-instruct-v0:2",
|
114
|
+
"mistral.mixtral-8x7b-instruct-v0:1",
|
115
|
+
"mistral.mistral-large-2402-v1:0",
|
116
|
+
"mistral.mistral-large-2407-v1:0",
|
117
|
+
],
|
118
|
+
}
|
@@ -1,25 +1,25 @@
|
|
1
|
-
rate_limits = {
|
2
|
-
"openai": {
|
3
|
-
"date": "Tue, 02 Jul 2024 15:25:28 GMT",
|
4
|
-
"content-type": "application/json",
|
5
|
-
"transfer-encoding": "chunked",
|
6
|
-
"connection": "keep-alive",
|
7
|
-
"openai-organization": "user-wmu32omw8ulzzutk6mjhtqgk",
|
8
|
-
"openai-processing-ms": "760",
|
9
|
-
"openai-version": "2020-10-01",
|
10
|
-
"strict-transport-security": "max-age=31536000; includeSubDomains",
|
11
|
-
"x-ratelimit-limit-requests": "5000",
|
12
|
-
"x-ratelimit-limit-tokens": "600000",
|
13
|
-
"x-ratelimit-remaining-requests": "4999",
|
14
|
-
"x-ratelimit-remaining-tokens": "599978",
|
15
|
-
"x-ratelimit-reset-requests": "12ms",
|
16
|
-
"x-ratelimit-reset-tokens": "2ms",
|
17
|
-
"x-request-id": "req_971608f3647f660a0cd6537fbe21f69c",
|
18
|
-
"cf-cache-status": "DYNAMIC",
|
19
|
-
"set-cookie": "__cf_bm=MJfUk.0TXdjtiNkUUqlUO2gaN3wzm0iHsRQRWExy52o-1719933928-1.0.1.1-0xk9gFxy_mD1KzAsKQ_HpL2pdQJ90D4B5frt65xU.c9k9QwD0oTBILqXB0rykXNh04Pm1UB1.H_W9sFJVOcSaw; path=/; expires=Tue, 02-Jul-24 15:55:28 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=GbheRct_iw9_I8iLWmt5ZRcLYZ_QVnroCrAt8QMVsUg-1719933928399-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
20
|
-
"server": "cloudflare",
|
21
|
-
"cf-ray": "89cfa6059bb9b68f-OTP",
|
22
|
-
"content-encoding": "gzip",
|
23
|
-
"alt-svc": 'h3=":443"; ma=86400',
|
24
|
-
}
|
25
|
-
}
|
1
|
+
rate_limits = {
|
2
|
+
"openai": {
|
3
|
+
"date": "Tue, 02 Jul 2024 15:25:28 GMT",
|
4
|
+
"content-type": "application/json",
|
5
|
+
"transfer-encoding": "chunked",
|
6
|
+
"connection": "keep-alive",
|
7
|
+
"openai-organization": "user-wmu32omw8ulzzutk6mjhtqgk",
|
8
|
+
"openai-processing-ms": "760",
|
9
|
+
"openai-version": "2020-10-01",
|
10
|
+
"strict-transport-security": "max-age=31536000; includeSubDomains",
|
11
|
+
"x-ratelimit-limit-requests": "5000",
|
12
|
+
"x-ratelimit-limit-tokens": "600000",
|
13
|
+
"x-ratelimit-remaining-requests": "4999",
|
14
|
+
"x-ratelimit-remaining-tokens": "599978",
|
15
|
+
"x-ratelimit-reset-requests": "12ms",
|
16
|
+
"x-ratelimit-reset-tokens": "2ms",
|
17
|
+
"x-request-id": "req_971608f3647f660a0cd6537fbe21f69c",
|
18
|
+
"cf-cache-status": "DYNAMIC",
|
19
|
+
"set-cookie": "__cf_bm=MJfUk.0TXdjtiNkUUqlUO2gaN3wzm0iHsRQRWExy52o-1719933928-1.0.1.1-0xk9gFxy_mD1KzAsKQ_HpL2pdQJ90D4B5frt65xU.c9k9QwD0oTBILqXB0rykXNh04Pm1UB1.H_W9sFJVOcSaw; path=/; expires=Tue, 02-Jul-24 15:55:28 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=GbheRct_iw9_I8iLWmt5ZRcLYZ_QVnroCrAt8QMVsUg-1719933928399-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
20
|
+
"server": "cloudflare",
|
21
|
+
"cf-ray": "89cfa6059bb9b68f-OTP",
|
22
|
+
"content-encoding": "gzip",
|
23
|
+
"alt-svc": 'h3=":443"; ma=86400',
|
24
|
+
}
|
25
|
+
}
|
@@ -1,41 +1,41 @@
|
|
1
|
-
from edsl.inference_services.InferenceServicesCollection import (
|
2
|
-
InferenceServicesCollection,
|
3
|
-
)
|
4
|
-
|
5
|
-
from edsl.inference_services.OpenAIService import OpenAIService
|
6
|
-
from edsl.inference_services.AnthropicService import AnthropicService
|
7
|
-
from edsl.inference_services.DeepInfraService import DeepInfraService
|
8
|
-
from edsl.inference_services.GoogleService import GoogleService
|
9
|
-
from edsl.inference_services.GroqService import GroqService
|
10
|
-
from edsl.inference_services.AwsBedrock import AwsBedrockService
|
11
|
-
from edsl.inference_services.AzureAI import AzureAIService
|
12
|
-
from edsl.inference_services.OllamaService import OllamaService
|
13
|
-
from edsl.inference_services.TestService import TestService
|
14
|
-
from edsl.inference_services.TogetherAIService import TogetherAIService
|
15
|
-
from edsl.inference_services.PerplexityService import PerplexityService
|
16
|
-
|
17
|
-
try:
|
18
|
-
from edsl.inference_services.MistralAIService import MistralAIService
|
19
|
-
|
20
|
-
mistral_available = True
|
21
|
-
except Exception as e:
|
22
|
-
mistral_available = False
|
23
|
-
|
24
|
-
services = [
|
25
|
-
OpenAIService,
|
26
|
-
AnthropicService,
|
27
|
-
DeepInfraService,
|
28
|
-
GoogleService,
|
29
|
-
GroqService,
|
30
|
-
AwsBedrockService,
|
31
|
-
AzureAIService,
|
32
|
-
OllamaService,
|
33
|
-
TestService,
|
34
|
-
TogetherAIService,
|
35
|
-
PerplexityService,
|
36
|
-
]
|
37
|
-
|
38
|
-
if mistral_available:
|
39
|
-
services.append(MistralAIService)
|
40
|
-
|
41
|
-
default = InferenceServicesCollection(services)
|
1
|
+
from edsl.inference_services.InferenceServicesCollection import (
|
2
|
+
InferenceServicesCollection,
|
3
|
+
)
|
4
|
+
|
5
|
+
from edsl.inference_services.OpenAIService import OpenAIService
|
6
|
+
from edsl.inference_services.AnthropicService import AnthropicService
|
7
|
+
from edsl.inference_services.DeepInfraService import DeepInfraService
|
8
|
+
from edsl.inference_services.GoogleService import GoogleService
|
9
|
+
from edsl.inference_services.GroqService import GroqService
|
10
|
+
from edsl.inference_services.AwsBedrock import AwsBedrockService
|
11
|
+
from edsl.inference_services.AzureAI import AzureAIService
|
12
|
+
from edsl.inference_services.OllamaService import OllamaService
|
13
|
+
from edsl.inference_services.TestService import TestService
|
14
|
+
from edsl.inference_services.TogetherAIService import TogetherAIService
|
15
|
+
from edsl.inference_services.PerplexityService import PerplexityService
|
16
|
+
|
17
|
+
try:
|
18
|
+
from edsl.inference_services.MistralAIService import MistralAIService
|
19
|
+
|
20
|
+
mistral_available = True
|
21
|
+
except Exception as e:
|
22
|
+
mistral_available = False
|
23
|
+
|
24
|
+
services = [
|
25
|
+
OpenAIService,
|
26
|
+
AnthropicService,
|
27
|
+
DeepInfraService,
|
28
|
+
GoogleService,
|
29
|
+
GroqService,
|
30
|
+
AwsBedrockService,
|
31
|
+
AzureAIService,
|
32
|
+
OllamaService,
|
33
|
+
TestService,
|
34
|
+
TogetherAIService,
|
35
|
+
PerplexityService,
|
36
|
+
]
|
37
|
+
|
38
|
+
if mistral_available:
|
39
|
+
services.append(MistralAIService)
|
40
|
+
|
41
|
+
default = InferenceServicesCollection(services)
|
@@ -1,10 +1,10 @@
|
|
1
|
-
from edsl.inference_services.registry import default
|
2
|
-
|
3
|
-
|
4
|
-
def write_available():
|
5
|
-
d = {}
|
6
|
-
for service in default.services:
|
7
|
-
d[service._inference_service_] = service.available()
|
8
|
-
|
9
|
-
with open("models_available_cache.py", "w") as f:
|
10
|
-
f.write(f"models_available = {d}")
|
1
|
+
from edsl.inference_services.registry import default
|
2
|
+
|
3
|
+
|
4
|
+
def write_available():
|
5
|
+
d = {}
|
6
|
+
for service in default.services:
|
7
|
+
d[service._inference_service_] = service.available()
|
8
|
+
|
9
|
+
with open("models_available_cache.py", "w") as f:
|
10
|
+
f.write(f"models_available = {d}")
|
@@ -0,0 +1,223 @@
|
|
1
|
+
import copy
|
2
|
+
import asyncio
|
3
|
+
|
4
|
+
from typing import Union, Type, Callable, TYPE_CHECKING
|
5
|
+
|
6
|
+
if TYPE_CHECKING:
|
7
|
+
from edsl.questions.QuestionBase import QuestionBase
|
8
|
+
from edsl.jobs.interviews.Interview import Interview
|
9
|
+
from edsl.language_models.key_management.KeyLookup import KeyLookup
|
10
|
+
|
11
|
+
from edsl.surveys.base import EndOfSurvey
|
12
|
+
from edsl.jobs.tasks.task_status_enum import TaskStatus
|
13
|
+
|
14
|
+
from edsl.jobs.FetchInvigilator import FetchInvigilator
|
15
|
+
from edsl.exceptions.language_models import LanguageModelNoResponseError
|
16
|
+
from edsl.exceptions.questions import QuestionAnswerValidationError
|
17
|
+
from edsl.data_transfer_models import AgentResponseDict, EDSLResultObjectInput
|
18
|
+
|
19
|
+
from edsl.jobs.Answers import Answers
|
20
|
+
|
21
|
+
|
22
|
+
class RetryConfig:
|
23
|
+
from edsl.config import CONFIG
|
24
|
+
|
25
|
+
EDSL_BACKOFF_START_SEC = float(CONFIG.get("EDSL_BACKOFF_START_SEC"))
|
26
|
+
EDSL_BACKOFF_MAX_SEC = float(CONFIG.get("EDSL_BACKOFF_MAX_SEC"))
|
27
|
+
EDSL_MAX_ATTEMPTS = int(CONFIG.get("EDSL_MAX_ATTEMPTS"))
|
28
|
+
|
29
|
+
|
30
|
+
class SkipHandler:
|
31
|
+
|
32
|
+
def __init__(self, interview: "Interview"):
|
33
|
+
self.interview = interview
|
34
|
+
self.question_index = self.interview.to_index
|
35
|
+
|
36
|
+
self.skip_function: Callable = (
|
37
|
+
self.interview.survey.rule_collection.skip_question_before_running
|
38
|
+
)
|
39
|
+
|
40
|
+
def should_skip(self, current_question: "QuestionBase") -> bool:
|
41
|
+
"""Determine if the current question should be skipped."""
|
42
|
+
current_question_index = self.question_index[current_question.question_name]
|
43
|
+
combined_answers = (
|
44
|
+
self.interview.answers
|
45
|
+
| self.interview.scenario
|
46
|
+
| self.interview.agent["traits"]
|
47
|
+
)
|
48
|
+
return self.skip_function(current_question_index, combined_answers)
|
49
|
+
|
50
|
+
def cancel_skipped_questions(self, current_question: "QuestionBase") -> None:
|
51
|
+
"""Cancel the tasks for questions that should be skipped."""
|
52
|
+
current_question_index: int = self.interview.to_index[
|
53
|
+
current_question.question_name
|
54
|
+
]
|
55
|
+
answers = (
|
56
|
+
self.interview.answers
|
57
|
+
| self.interview.scenario
|
58
|
+
| self.interview.agent["traits"]
|
59
|
+
)
|
60
|
+
|
61
|
+
# Get the index of the next question, which could also be the end of the survey
|
62
|
+
next_question: Union[int, EndOfSurvey] = (
|
63
|
+
self.interview.survey.rule_collection.next_question(
|
64
|
+
q_now=current_question_index,
|
65
|
+
answers=answers,
|
66
|
+
)
|
67
|
+
)
|
68
|
+
|
69
|
+
def cancel_between(start, end):
|
70
|
+
"""Cancel the tasks for questions between the start and end indices."""
|
71
|
+
for i in range(start, end):
|
72
|
+
self.interview.tasks[i].cancel()
|
73
|
+
|
74
|
+
if (next_question_index := next_question.next_q) == EndOfSurvey:
|
75
|
+
cancel_between(
|
76
|
+
current_question_index + 1, len(self.interview.survey.questions)
|
77
|
+
)
|
78
|
+
return
|
79
|
+
|
80
|
+
if next_question_index > (current_question_index + 1):
|
81
|
+
cancel_between(current_question_index + 1, next_question_index)
|
82
|
+
|
83
|
+
|
84
|
+
class AnswerQuestionFunctionConstructor:
|
85
|
+
"""Constructs a function that answers a question and records the answer."""
|
86
|
+
|
87
|
+
def __init__(self, interview: "Interview", key_lookup: "KeyLookup"):
|
88
|
+
self.interview = interview
|
89
|
+
self.key_lookup = key_lookup
|
90
|
+
|
91
|
+
self.had_language_model_no_response_error: bool = False
|
92
|
+
self.question_index = self.interview.to_index
|
93
|
+
|
94
|
+
self.skip_function: Callable = (
|
95
|
+
self.interview.survey.rule_collection.skip_question_before_running
|
96
|
+
)
|
97
|
+
|
98
|
+
self.invigilator_fetcher = FetchInvigilator(
|
99
|
+
self.interview, key_lookup=self.key_lookup
|
100
|
+
)
|
101
|
+
self.skip_handler = SkipHandler(self.interview)
|
102
|
+
|
103
|
+
def _handle_exception(
|
104
|
+
self, e: Exception, invigilator: "InvigilatorBase", task=None
|
105
|
+
):
|
106
|
+
"""Handle an exception that occurred while answering a question."""
|
107
|
+
|
108
|
+
from edsl.jobs.interviews.InterviewExceptionEntry import InterviewExceptionEntry
|
109
|
+
|
110
|
+
answers = copy.copy(
|
111
|
+
self.interview.answers
|
112
|
+
) # copy to freeze the answers here for logging
|
113
|
+
exception_entry = InterviewExceptionEntry(
|
114
|
+
exception=e,
|
115
|
+
invigilator=invigilator,
|
116
|
+
answers=answers,
|
117
|
+
)
|
118
|
+
if task:
|
119
|
+
task.task_status = TaskStatus.FAILED
|
120
|
+
|
121
|
+
self.interview.exceptions.add(
|
122
|
+
invigilator.question.question_name, exception_entry
|
123
|
+
)
|
124
|
+
|
125
|
+
if self.interview.raise_validation_errors and isinstance(
|
126
|
+
e, QuestionAnswerValidationError
|
127
|
+
):
|
128
|
+
raise e
|
129
|
+
|
130
|
+
stop_on_exception = getattr(self.interview, "stop_on_exception", False)
|
131
|
+
if stop_on_exception:
|
132
|
+
raise e
|
133
|
+
|
134
|
+
def __call__(self):
|
135
|
+
return self.answer_question_and_record_task
|
136
|
+
|
137
|
+
async def answer_question_and_record_task(
|
138
|
+
self,
|
139
|
+
*,
|
140
|
+
question: "QuestionBase",
|
141
|
+
task=None,
|
142
|
+
) -> "AgentResponseDict":
|
143
|
+
|
144
|
+
from tenacity import (
|
145
|
+
retry,
|
146
|
+
stop_after_attempt,
|
147
|
+
wait_exponential,
|
148
|
+
retry_if_exception_type,
|
149
|
+
RetryError,
|
150
|
+
)
|
151
|
+
|
152
|
+
@retry(
|
153
|
+
stop=stop_after_attempt(RetryConfig.EDSL_MAX_ATTEMPTS),
|
154
|
+
wait=wait_exponential(
|
155
|
+
multiplier=RetryConfig.EDSL_BACKOFF_START_SEC,
|
156
|
+
max=RetryConfig.EDSL_BACKOFF_MAX_SEC,
|
157
|
+
),
|
158
|
+
retry=retry_if_exception_type(LanguageModelNoResponseError),
|
159
|
+
reraise=True,
|
160
|
+
)
|
161
|
+
async def attempt_answer():
|
162
|
+
invigilator = self.invigilator_fetcher(question)
|
163
|
+
|
164
|
+
if self.skip_handler.should_skip(question):
|
165
|
+
return invigilator.get_failed_task_result(
|
166
|
+
failure_reason="Question skipped."
|
167
|
+
)
|
168
|
+
|
169
|
+
try:
|
170
|
+
response: EDSLResultObjectInput = (
|
171
|
+
await invigilator.async_answer_question()
|
172
|
+
)
|
173
|
+
if response.validated:
|
174
|
+
self.interview.answers.add_answer(
|
175
|
+
response=response, question=question
|
176
|
+
)
|
177
|
+
|
178
|
+
self.skip_handler.cancel_skipped_questions(question)
|
179
|
+
else:
|
180
|
+
if (
|
181
|
+
hasattr(response, "exception_occurred")
|
182
|
+
and response.exception_occurred
|
183
|
+
):
|
184
|
+
raise response.exception_occurred
|
185
|
+
|
186
|
+
except QuestionAnswerValidationError as e:
|
187
|
+
self._handle_exception(e, invigilator, task)
|
188
|
+
return invigilator.get_failed_task_result(
|
189
|
+
failure_reason="Question answer validation failed."
|
190
|
+
)
|
191
|
+
|
192
|
+
except asyncio.TimeoutError as e:
|
193
|
+
self._handle_exception(e, invigilator, task)
|
194
|
+
had_language_model_no_response_error = True
|
195
|
+
raise LanguageModelNoResponseError(
|
196
|
+
f"Language model timed out for question '{question.question_name}.'"
|
197
|
+
)
|
198
|
+
|
199
|
+
except Exception as e:
|
200
|
+
self._handle_exception(e, invigilator, task)
|
201
|
+
|
202
|
+
if "response" not in locals():
|
203
|
+
had_language_model_no_response_error = True
|
204
|
+
raise LanguageModelNoResponseError(
|
205
|
+
f"Language model did not return a response for question '{question.question_name}.'"
|
206
|
+
)
|
207
|
+
|
208
|
+
if (
|
209
|
+
question.question_name in self.interview.exceptions
|
210
|
+
and had_language_model_no_response_error
|
211
|
+
):
|
212
|
+
self.interview.exceptions.record_fixed_question(question.question_name)
|
213
|
+
|
214
|
+
return response
|
215
|
+
|
216
|
+
try:
|
217
|
+
return await attempt_answer()
|
218
|
+
except RetryError as retry_error:
|
219
|
+
original_error = retry_error.last_attempt.exception()
|
220
|
+
self._handle_exception(
|
221
|
+
original_error, self.invigilator_fetcher(question), task
|
222
|
+
)
|
223
|
+
raise original_error
|