edsl 0.1.38.dev3__py3-none-any.whl → 0.1.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +413 -303
- edsl/BaseDiff.py +260 -260
- edsl/TemplateLoader.py +24 -24
- edsl/__init__.py +57 -49
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +1071 -858
- edsl/agents/AgentList.py +551 -362
- edsl/agents/Invigilator.py +284 -222
- edsl/agents/InvigilatorBase.py +257 -284
- edsl/agents/PromptConstructor.py +272 -353
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/__init__.py +2 -3
- edsl/agents/descriptors.py +99 -99
- edsl/agents/prompt_helpers.py +129 -129
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -117
- edsl/auto/StageBase.py +243 -230
- edsl/auto/StageGenerateSurvey.py +178 -178
- edsl/auto/StageLabelQuestions.py +125 -125
- edsl/auto/StagePersona.py +61 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
- edsl/auto/StagePersonaDimensionValues.py +74 -74
- edsl/auto/StagePersonaDimensions.py +69 -69
- edsl/auto/StageQuestions.py +74 -73
- edsl/auto/SurveyCreatorPipeline.py +21 -21
- edsl/auto/utilities.py +218 -224
- edsl/base/Base.py +279 -279
- edsl/config.py +177 -149
- edsl/conversation/Conversation.py +290 -290
- edsl/conversation/car_buying.py +59 -58
- edsl/conversation/chips.py +95 -95
- edsl/conversation/mug_negotiation.py +81 -81
- edsl/conversation/next_speaker_utilities.py +93 -93
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -54
- edsl/coop/__init__.py +2 -2
- edsl/coop/coop.py +1106 -961
- edsl/coop/utils.py +131 -131
- edsl/data/Cache.py +573 -530
- edsl/data/CacheEntry.py +230 -228
- edsl/data/CacheHandler.py +168 -149
- edsl/data/RemoteCacheSync.py +186 -97
- edsl/data/SQLiteDict.py +292 -292
- edsl/data/__init__.py +5 -4
- edsl/data/orm.py +10 -10
- edsl/data_transfer_models.py +74 -73
- edsl/enums.py +202 -173
- edsl/exceptions/BaseException.py +21 -21
- edsl/exceptions/__init__.py +54 -54
- edsl/exceptions/agents.py +54 -42
- edsl/exceptions/cache.py +5 -5
- edsl/exceptions/configuration.py +16 -16
- edsl/exceptions/coop.py +10 -10
- edsl/exceptions/data.py +14 -14
- edsl/exceptions/general.py +34 -34
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +33 -33
- edsl/exceptions/language_models.py +63 -63
- edsl/exceptions/prompts.py +15 -15
- edsl/exceptions/questions.py +109 -91
- edsl/exceptions/results.py +29 -29
- edsl/exceptions/scenarios.py +29 -22
- edsl/exceptions/surveys.py +37 -37
- edsl/inference_services/AnthropicService.py +106 -87
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -120
- edsl/inference_services/AzureAI.py +215 -217
- edsl/inference_services/DeepInfraService.py +18 -18
- edsl/inference_services/GoogleService.py +143 -156
- edsl/inference_services/GroqService.py +20 -20
- edsl/inference_services/InferenceServiceABC.py +80 -147
- edsl/inference_services/InferenceServicesCollection.py +138 -97
- edsl/inference_services/MistralAIService.py +120 -123
- edsl/inference_services/OllamaService.py +18 -18
- edsl/inference_services/OpenAIService.py +236 -224
- edsl/inference_services/PerplexityService.py +160 -0
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -89
- edsl/inference_services/TogetherAIService.py +172 -170
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -118
- edsl/inference_services/rate_limits_cache.py +25 -25
- edsl/inference_services/registry.py +41 -39
- edsl/inference_services/write_available.py +10 -10
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +43 -56
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +823 -1358
- edsl/jobs/JobsChecks.py +172 -0
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -0
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -0
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/__init__.py +1 -1
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -63
- edsl/jobs/buckets/ModelBuckets.py +65 -65
- edsl/jobs/buckets/TokenBucket.py +283 -251
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +396 -661
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
- edsl/jobs/interviews/InterviewStatistic.py +63 -63
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
- edsl/jobs/interviews/InterviewStatusLog.py +92 -92
- edsl/jobs/interviews/ReportErrors.py +66 -66
- edsl/jobs/interviews/interview_status_enum.py +9 -9
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -361
- edsl/jobs/runners/JobsRunnerStatus.py +298 -332
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -242
- edsl/jobs/tasks/TaskCreators.py +64 -64
- edsl/jobs/tasks/TaskHistory.py +470 -451
- edsl/jobs/tasks/TaskStatusLog.py +23 -23
- edsl/jobs/tasks/task_status_enum.py +161 -163
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
- edsl/jobs/tokens/TokenUsage.py +34 -34
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +626 -708
- edsl/language_models/ModelList.py +164 -109
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/__init__.py +2 -3
- edsl/language_models/fake_openai_call.py +15 -15
- edsl/language_models/fake_openai_service.py +61 -61
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +156 -156
- edsl/language_models/utilities.py +65 -64
- edsl/notebooks/Notebook.py +263 -258
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -1
- edsl/prompts/Prompt.py +352 -357
- edsl/prompts/__init__.py +2 -2
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -660
- edsl/questions/QuestionBasePromptsMixin.py +221 -217
- edsl/questions/QuestionBudget.py +227 -227
- edsl/questions/QuestionCheckBox.py +359 -359
- edsl/questions/QuestionExtract.py +180 -183
- edsl/questions/QuestionFreeText.py +113 -114
- edsl/questions/QuestionFunctional.py +166 -166
- edsl/questions/QuestionList.py +223 -231
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +330 -286
- edsl/questions/QuestionNumerical.py +151 -153
- edsl/questions/QuestionRank.py +314 -324
- edsl/questions/Quick.py +41 -41
- edsl/questions/SimpleAskMixin.py +74 -73
- edsl/questions/__init__.py +27 -26
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +334 -289
- edsl/questions/compose_questions.py +98 -98
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -21
- edsl/questions/derived/QuestionLikertFive.py +76 -76
- edsl/questions/derived/QuestionLinearScale.py +90 -87
- edsl/questions/derived/QuestionTopK.py +93 -93
- edsl/questions/derived/QuestionYesNo.py +82 -82
- edsl/questions/descriptors.py +427 -413
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -13
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
- edsl/questions/prompt_templates/question_extract.jinja +11 -11
- edsl/questions/prompt_templates/question_free_text.jinja +3 -3
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
- edsl/questions/prompt_templates/question_list.jinja +17 -17
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
- edsl/questions/prompt_templates/question_numerical.jinja +36 -36
- edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +168 -161
- edsl/questions/question_registry.py +177 -147
- edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +71 -71
- edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +188 -174
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +12 -12
- edsl/questions/templates/budget/answering_instructions.jinja +7 -7
- edsl/questions/templates/budget/question_presentation.jinja +7 -7
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
- edsl/questions/templates/extract/answering_instructions.jinja +7 -7
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
- edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
- edsl/questions/templates/list/answering_instructions.jinja +3 -3
- edsl/questions/templates/list/question_presentation.jinja +5 -5
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
- edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
- edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
- edsl/questions/templates/numerical/question_presentation.jinja +6 -6
- edsl/questions/templates/rank/answering_instructions.jinja +11 -11
- edsl/questions/templates/rank/question_presentation.jinja +15 -15
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
- edsl/questions/templates/top_k/question_presentation.jinja +22 -22
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
- edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
- edsl/results/CSSParameterizer.py +108 -0
- edsl/results/Dataset.py +587 -293
- edsl/results/DatasetExportMixin.py +594 -717
- edsl/results/DatasetTree.py +295 -145
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +557 -456
- edsl/results/Results.py +1183 -1071
- edsl/results/ResultsExportMixin.py +45 -43
- edsl/results/ResultsGGMixin.py +121 -121
- edsl/results/TableDisplay.py +125 -0
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +2 -2
- edsl/results/file_exports.py +252 -0
- edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +33 -33
- edsl/results/{Selector.py → results_selector.py} +145 -135
- edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +98 -98
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +78 -0
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -115
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +543 -458
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +498 -544
- edsl/scenarios/ScenarioHtmlMixin.py +65 -64
- edsl/scenarios/ScenarioList.py +1458 -1112
- edsl/scenarios/ScenarioListExportMixin.py +45 -52
- edsl/scenarios/ScenarioListPdfMixin.py +239 -261
- edsl/scenarios/__init__.py +3 -4
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +49 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/scenario_join.py +131 -0
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -1
- edsl/study/ObjectEntry.py +173 -173
- edsl/study/ProofOfWork.py +113 -113
- edsl/study/SnapShot.py +80 -80
- edsl/study/Study.py +521 -528
- edsl/study/__init__.py +4 -4
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +148 -148
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +31 -31
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +244 -244
- edsl/surveys/Rule.py +327 -326
- edsl/surveys/RuleCollection.py +385 -387
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1280 -1787
- edsl/surveys/SurveyCSS.py +273 -261
- edsl/surveys/SurveyExportMixin.py +259 -259
- edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +181 -121
- edsl/surveys/SurveyQualtricsImport.py +284 -284
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +5 -3
- edsl/surveys/base.py +53 -53
- edsl/surveys/descriptors.py +60 -56
- edsl/surveys/instructions/ChangeInstruction.py +48 -49
- edsl/surveys/instructions/Instruction.py +56 -53
- edsl/surveys/instructions/InstructionCollection.py +82 -77
- edsl/templates/error_reporting/base.html +23 -23
- edsl/templates/error_reporting/exceptions_by_model.html +34 -34
- edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
- edsl/templates/error_reporting/exceptions_by_type.html +16 -16
- edsl/templates/error_reporting/interview_details.html +115 -115
- edsl/templates/error_reporting/interviews.html +19 -10
- edsl/templates/error_reporting/overview.html +4 -4
- edsl/templates/error_reporting/performance_plot.html +1 -1
- edsl/templates/error_reporting/report.css +73 -73
- edsl/templates/error_reporting/report.html +117 -117
- edsl/templates/error_reporting/report.js +25 -25
- edsl/tools/__init__.py +1 -1
- edsl/tools/clusters.py +192 -192
- edsl/tools/embeddings.py +27 -27
- edsl/tools/embeddings_plotting.py +118 -118
- edsl/tools/plotting.py +112 -112
- edsl/tools/summarize.py +18 -18
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +28 -28
- edsl/utilities/__init__.py +22 -22
- edsl/utilities/ast_utilities.py +25 -25
- edsl/utilities/data/Registry.py +6 -6
- edsl/utilities/data/__init__.py +1 -1
- edsl/utilities/data/scooter_results.json +1 -1
- edsl/utilities/decorators.py +77 -77
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
- edsl/utilities/interface.py +627 -627
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -263
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -28
- edsl/utilities/restricted_python.py +70 -70
- edsl/utilities/utilities.py +436 -409
- {edsl-0.1.38.dev3.dist-info → edsl-0.1.39.dist-info}/LICENSE +21 -21
- {edsl-0.1.38.dev3.dist-info → edsl-0.1.39.dist-info}/METADATA +13 -10
- edsl-0.1.39.dist-info/RECORD +358 -0
- {edsl-0.1.38.dev3.dist-info → edsl-0.1.39.dist-info}/WHEEL +1 -1
- edsl/language_models/KeyLookup.py +0 -30
- edsl/language_models/registry.py +0 -137
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/results/ResultsDBMixin.py +0 -238
- edsl-0.1.38.dev3.dist-info/RECORD +0 -269
edsl/jobs/Jobs.py
CHANGED
@@ -1,1358 +1,823 @@
|
|
1
|
-
# """The Jobs class is a collection of agents, scenarios and models and one survey."""
|
2
|
-
from __future__ import annotations
|
3
|
-
import
|
4
|
-
import
|
5
|
-
from
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
from edsl.
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
return
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
)
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
from edsl.
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
)
|
375
|
-
|
376
|
-
|
377
|
-
)
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
from edsl.
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
from edsl import
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
639
|
-
|
640
|
-
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
|
673
|
-
|
674
|
-
|
675
|
-
|
676
|
-
|
677
|
-
|
678
|
-
|
679
|
-
|
680
|
-
|
681
|
-
|
682
|
-
|
683
|
-
|
684
|
-
|
685
|
-
|
686
|
-
|
687
|
-
|
688
|
-
|
689
|
-
)
|
690
|
-
|
691
|
-
|
692
|
-
|
693
|
-
|
694
|
-
|
695
|
-
|
696
|
-
|
697
|
-
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
705
|
-
|
706
|
-
|
707
|
-
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
|
714
|
-
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
720
|
-
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
|
755
|
-
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
except CoopServerResponseError as e:
|
825
|
-
pass
|
826
|
-
|
827
|
-
return False
|
828
|
-
|
829
|
-
def use_remote_cache(self, disable_remote_cache: bool) -> bool:
|
830
|
-
if disable_remote_cache:
|
831
|
-
return False
|
832
|
-
if not disable_remote_cache:
|
833
|
-
try:
|
834
|
-
from edsl import Coop
|
835
|
-
|
836
|
-
user_edsl_settings = Coop().edsl_settings
|
837
|
-
return user_edsl_settings.get("remote_caching", False)
|
838
|
-
except requests.ConnectionError:
|
839
|
-
pass
|
840
|
-
except CoopServerResponseError as e:
|
841
|
-
pass
|
842
|
-
|
843
|
-
return False
|
844
|
-
|
845
|
-
def check_api_keys(self) -> None:
|
846
|
-
from edsl import Model
|
847
|
-
|
848
|
-
for model in self.models + [Model()]:
|
849
|
-
if not model.has_valid_api_key():
|
850
|
-
raise MissingAPIKeyError(
|
851
|
-
model_name=str(model.model),
|
852
|
-
inference_service=model._inference_service_,
|
853
|
-
)
|
854
|
-
|
855
|
-
def get_missing_api_keys(self) -> set:
|
856
|
-
"""
|
857
|
-
Returns a list of the api keys that a user needs to run this job, but does not currently have in their .env file.
|
858
|
-
"""
|
859
|
-
|
860
|
-
missing_api_keys = set()
|
861
|
-
|
862
|
-
from edsl import Model
|
863
|
-
from edsl.enums import service_to_api_keyname
|
864
|
-
|
865
|
-
for model in self.models + [Model()]:
|
866
|
-
if not model.has_valid_api_key():
|
867
|
-
key_name = service_to_api_keyname.get(
|
868
|
-
model._inference_service_, "NOT FOUND"
|
869
|
-
)
|
870
|
-
missing_api_keys.add(key_name)
|
871
|
-
|
872
|
-
return missing_api_keys
|
873
|
-
|
874
|
-
def user_has_all_model_keys(self):
|
875
|
-
"""
|
876
|
-
Returns True if the user has all model keys required to run their job.
|
877
|
-
|
878
|
-
Otherwise, returns False.
|
879
|
-
"""
|
880
|
-
|
881
|
-
try:
|
882
|
-
self.check_api_keys()
|
883
|
-
return True
|
884
|
-
except MissingAPIKeyError:
|
885
|
-
return False
|
886
|
-
except Exception:
|
887
|
-
raise
|
888
|
-
|
889
|
-
def user_has_ep_api_key(self) -> bool:
|
890
|
-
"""
|
891
|
-
Returns True if the user has an EXPECTED_PARROT_API_KEY in their env.
|
892
|
-
|
893
|
-
Otherwise, returns False.
|
894
|
-
"""
|
895
|
-
|
896
|
-
import os
|
897
|
-
|
898
|
-
coop_api_key = os.getenv("EXPECTED_PARROT_API_KEY")
|
899
|
-
|
900
|
-
if coop_api_key is not None:
|
901
|
-
return True
|
902
|
-
else:
|
903
|
-
return False
|
904
|
-
|
905
|
-
def needs_external_llms(self) -> bool:
|
906
|
-
"""
|
907
|
-
Returns True if the job needs external LLMs to run.
|
908
|
-
|
909
|
-
Otherwise, returns False.
|
910
|
-
"""
|
911
|
-
# These cases are necessary to skip the API key check during doctests
|
912
|
-
|
913
|
-
# Accounts for Results.example()
|
914
|
-
all_agents_answer_questions_directly = len(self.agents) > 0 and all(
|
915
|
-
[hasattr(a, "answer_question_directly") for a in self.agents]
|
916
|
-
)
|
917
|
-
|
918
|
-
# Accounts for InterviewExceptionEntry.example()
|
919
|
-
only_model_is_test = set([m.model for m in self.models]) == set(["test"])
|
920
|
-
|
921
|
-
# Accounts for Survey.__call__
|
922
|
-
all_questions_are_functional = set(
|
923
|
-
[q.question_type for q in self.survey.questions]
|
924
|
-
) == set(["functional"])
|
925
|
-
|
926
|
-
if (
|
927
|
-
all_agents_answer_questions_directly
|
928
|
-
or only_model_is_test
|
929
|
-
or all_questions_are_functional
|
930
|
-
):
|
931
|
-
return False
|
932
|
-
else:
|
933
|
-
return True
|
934
|
-
|
935
|
-
def run(
|
936
|
-
self,
|
937
|
-
n: int = 1,
|
938
|
-
progress_bar: bool = False,
|
939
|
-
stop_on_exception: bool = False,
|
940
|
-
cache: Union[Cache, bool] = None,
|
941
|
-
check_api_keys: bool = False,
|
942
|
-
sidecar_model: Optional[LanguageModel] = None,
|
943
|
-
verbose: bool = False,
|
944
|
-
print_exceptions=True,
|
945
|
-
remote_cache_description: Optional[str] = None,
|
946
|
-
remote_inference_description: Optional[str] = None,
|
947
|
-
remote_inference_results_visibility: Optional[
|
948
|
-
Literal["private", "public", "unlisted"]
|
949
|
-
] = "unlisted",
|
950
|
-
skip_retry: bool = False,
|
951
|
-
raise_validation_errors: bool = False,
|
952
|
-
disable_remote_cache: bool = False,
|
953
|
-
disable_remote_inference: bool = False,
|
954
|
-
) -> Results:
|
955
|
-
"""
|
956
|
-
Runs the Job: conducts Interviews and returns their results.
|
957
|
-
|
958
|
-
:param n: How many times to run each interview
|
959
|
-
:param progress_bar: Whether to show a progress bar
|
960
|
-
:param stop_on_exception: Stops the job if an exception is raised
|
961
|
-
:param cache: A Cache object to store results
|
962
|
-
:param check_api_keys: Raises an error if API keys are invalid
|
963
|
-
:param verbose: Prints extra messages
|
964
|
-
:param remote_cache_description: Specifies a description for this group of entries in the remote cache
|
965
|
-
:param remote_inference_description: Specifies a description for the remote inference job
|
966
|
-
:param remote_inference_results_visibility: The initial visibility of the Results object on Coop. This will only be used for remote jobs!
|
967
|
-
:param disable_remote_cache: If True, the job will not use remote cache. This only works for local jobs!
|
968
|
-
:param disable_remote_inference: If True, the job will not use remote inference
|
969
|
-
"""
|
970
|
-
from edsl.coop.coop import Coop
|
971
|
-
|
972
|
-
self._check_parameters()
|
973
|
-
self._skip_retry = skip_retry
|
974
|
-
self._raise_validation_errors = raise_validation_errors
|
975
|
-
|
976
|
-
self.verbose = verbose
|
977
|
-
|
978
|
-
if (
|
979
|
-
not self.user_has_all_model_keys()
|
980
|
-
and not self.user_has_ep_api_key()
|
981
|
-
and self.needs_external_llms()
|
982
|
-
):
|
983
|
-
import secrets
|
984
|
-
from dotenv import load_dotenv
|
985
|
-
from edsl import CONFIG
|
986
|
-
from edsl.coop.coop import Coop
|
987
|
-
from edsl.utilities.utilities import write_api_key_to_env
|
988
|
-
|
989
|
-
missing_api_keys = self.get_missing_api_keys()
|
990
|
-
|
991
|
-
edsl_auth_token = secrets.token_urlsafe(16)
|
992
|
-
|
993
|
-
print("You're missing some of the API keys needed to run this job:")
|
994
|
-
for api_key in missing_api_keys:
|
995
|
-
print(f" 🔑 {api_key}")
|
996
|
-
print(
|
997
|
-
"\nYou can either add the missing keys to your .env file, or use remote inference."
|
998
|
-
)
|
999
|
-
print("Remote inference allows you to run jobs on our server.")
|
1000
|
-
print("\n🚀 To use remote inference, sign up at the following link:")
|
1001
|
-
|
1002
|
-
coop = Coop()
|
1003
|
-
coop._display_login_url(edsl_auth_token=edsl_auth_token)
|
1004
|
-
|
1005
|
-
print(
|
1006
|
-
"\nOnce you log in, we will automatically retrieve your Expected Parrot API key and continue your job remotely."
|
1007
|
-
)
|
1008
|
-
|
1009
|
-
api_key = coop._poll_for_api_key(edsl_auth_token)
|
1010
|
-
|
1011
|
-
if api_key is None:
|
1012
|
-
print("\nTimed out waiting for login. Please try again.")
|
1013
|
-
return
|
1014
|
-
|
1015
|
-
write_api_key_to_env(api_key)
|
1016
|
-
print("✨ API key retrieved and written to .env file.\n")
|
1017
|
-
|
1018
|
-
# Retrieve API key so we can continue running the job
|
1019
|
-
load_dotenv()
|
1020
|
-
|
1021
|
-
if remote_inference := self.use_remote_inference(disable_remote_inference):
|
1022
|
-
remote_job_creation_data = self.create_remote_inference_job(
|
1023
|
-
iterations=n,
|
1024
|
-
remote_inference_description=remote_inference_description,
|
1025
|
-
remote_inference_results_visibility=remote_inference_results_visibility,
|
1026
|
-
)
|
1027
|
-
results = self.poll_remote_inference_job(remote_job_creation_data)
|
1028
|
-
if results is None:
|
1029
|
-
self._output("Job failed.")
|
1030
|
-
return results
|
1031
|
-
|
1032
|
-
if check_api_keys:
|
1033
|
-
self.check_api_keys()
|
1034
|
-
|
1035
|
-
# handle cache
|
1036
|
-
if cache is None or cache is True:
|
1037
|
-
from edsl.data.CacheHandler import CacheHandler
|
1038
|
-
|
1039
|
-
cache = CacheHandler().get_cache()
|
1040
|
-
if cache is False:
|
1041
|
-
from edsl.data.Cache import Cache
|
1042
|
-
|
1043
|
-
cache = Cache()
|
1044
|
-
|
1045
|
-
remote_cache = self.use_remote_cache(disable_remote_cache)
|
1046
|
-
with RemoteCacheSync(
|
1047
|
-
coop=Coop(),
|
1048
|
-
cache=cache,
|
1049
|
-
output_func=self._output,
|
1050
|
-
remote_cache=remote_cache,
|
1051
|
-
remote_cache_description=remote_cache_description,
|
1052
|
-
) as r:
|
1053
|
-
results = self._run_local(
|
1054
|
-
n=n,
|
1055
|
-
progress_bar=progress_bar,
|
1056
|
-
cache=cache,
|
1057
|
-
stop_on_exception=stop_on_exception,
|
1058
|
-
sidecar_model=sidecar_model,
|
1059
|
-
print_exceptions=print_exceptions,
|
1060
|
-
raise_validation_errors=raise_validation_errors,
|
1061
|
-
)
|
1062
|
-
|
1063
|
-
results.cache = cache.new_entries_cache()
|
1064
|
-
return results
|
1065
|
-
|
1066
|
-
async def create_and_poll_remote_job(
|
1067
|
-
self,
|
1068
|
-
iterations: int = 1,
|
1069
|
-
remote_inference_description: Optional[str] = None,
|
1070
|
-
remote_inference_results_visibility: Optional[
|
1071
|
-
Literal["private", "public", "unlisted"]
|
1072
|
-
] = "unlisted",
|
1073
|
-
) -> Union[Results, None]:
|
1074
|
-
"""
|
1075
|
-
Creates and polls a remote inference job asynchronously.
|
1076
|
-
Reuses existing synchronous methods but runs them in an async context.
|
1077
|
-
|
1078
|
-
:param iterations: Number of times to run each interview
|
1079
|
-
:param remote_inference_description: Optional description for the remote job
|
1080
|
-
:param remote_inference_results_visibility: Visibility setting for results
|
1081
|
-
:return: Results object if successful, None if job fails or is cancelled
|
1082
|
-
"""
|
1083
|
-
import asyncio
|
1084
|
-
from functools import partial
|
1085
|
-
|
1086
|
-
# Create job using existing method
|
1087
|
-
loop = asyncio.get_event_loop()
|
1088
|
-
remote_job_creation_data = await loop.run_in_executor(
|
1089
|
-
None,
|
1090
|
-
partial(
|
1091
|
-
self.create_remote_inference_job,
|
1092
|
-
iterations=iterations,
|
1093
|
-
remote_inference_description=remote_inference_description,
|
1094
|
-
remote_inference_results_visibility=remote_inference_results_visibility,
|
1095
|
-
),
|
1096
|
-
)
|
1097
|
-
|
1098
|
-
# Poll using existing method but with async sleep
|
1099
|
-
return await loop.run_in_executor(
|
1100
|
-
None, partial(self.poll_remote_inference_job, remote_job_creation_data)
|
1101
|
-
)
|
1102
|
-
|
1103
|
-
async def run_async(
|
1104
|
-
self,
|
1105
|
-
cache=None,
|
1106
|
-
n=1,
|
1107
|
-
disable_remote_inference: bool = False,
|
1108
|
-
remote_inference_description: Optional[str] = None,
|
1109
|
-
remote_inference_results_visibility: Optional[
|
1110
|
-
Literal["private", "public", "unlisted"]
|
1111
|
-
] = "unlisted",
|
1112
|
-
**kwargs,
|
1113
|
-
):
|
1114
|
-
"""Run the job asynchronously, either locally or remotely.
|
1115
|
-
|
1116
|
-
:param cache: Cache object or boolean
|
1117
|
-
:param n: Number of iterations
|
1118
|
-
:param disable_remote_inference: If True, forces local execution
|
1119
|
-
:param remote_inference_description: Description for remote jobs
|
1120
|
-
:param remote_inference_results_visibility: Visibility setting for remote results
|
1121
|
-
:param kwargs: Additional arguments passed to local execution
|
1122
|
-
:return: Results object
|
1123
|
-
"""
|
1124
|
-
# Check if we should use remote inference
|
1125
|
-
if remote_inference := self.use_remote_inference(disable_remote_inference):
|
1126
|
-
results = await self.create_and_poll_remote_job(
|
1127
|
-
iterations=n,
|
1128
|
-
remote_inference_description=remote_inference_description,
|
1129
|
-
remote_inference_results_visibility=remote_inference_results_visibility,
|
1130
|
-
)
|
1131
|
-
if results is None:
|
1132
|
-
self._output("Job failed.")
|
1133
|
-
return results
|
1134
|
-
|
1135
|
-
# If not using remote inference, run locally with async
|
1136
|
-
return await JobsRunnerAsyncio(self).run_async(cache=cache, n=n, **kwargs)
|
1137
|
-
|
1138
|
-
def _run_local(self, *args, **kwargs):
|
1139
|
-
"""Run the job locally."""
|
1140
|
-
|
1141
|
-
results = JobsRunnerAsyncio(self).run(*args, **kwargs)
|
1142
|
-
return results
|
1143
|
-
|
1144
|
-
def all_question_parameters(self):
|
1145
|
-
"""Return all the fields in the questions in the survey.
|
1146
|
-
>>> from edsl.jobs import Jobs
|
1147
|
-
>>> Jobs.example().all_question_parameters()
|
1148
|
-
{'period'}
|
1149
|
-
"""
|
1150
|
-
return set.union(*[question.parameters for question in self.survey.questions])
|
1151
|
-
|
1152
|
-
#######################
|
1153
|
-
# Dunder methods
|
1154
|
-
#######################
|
1155
|
-
def print(self):
|
1156
|
-
from rich import print_json
|
1157
|
-
import json
|
1158
|
-
|
1159
|
-
print_json(json.dumps(self.to_dict()))
|
1160
|
-
|
1161
|
-
def __repr__(self) -> str:
|
1162
|
-
"""Return an eval-able string representation of the Jobs instance."""
|
1163
|
-
return f"Jobs(survey={repr(self.survey)}, agents={repr(self.agents)}, models={repr(self.models)}, scenarios={repr(self.scenarios)})"
|
1164
|
-
|
1165
|
-
def _repr_html_(self) -> str:
|
1166
|
-
from rich import print_json
|
1167
|
-
import json
|
1168
|
-
|
1169
|
-
print_json(json.dumps(self.to_dict()))
|
1170
|
-
|
1171
|
-
def __len__(self) -> int:
|
1172
|
-
"""Return the maximum number of questions that will be asked while running this job.
|
1173
|
-
Note that this is the maximum number of questions, not the actual number of questions that will be asked, as some questions may be skipped.
|
1174
|
-
|
1175
|
-
>>> from edsl.jobs import Jobs
|
1176
|
-
>>> len(Jobs.example())
|
1177
|
-
8
|
1178
|
-
"""
|
1179
|
-
number_of_questions = (
|
1180
|
-
len(self.agents or [1])
|
1181
|
-
* len(self.scenarios or [1])
|
1182
|
-
* len(self.models or [1])
|
1183
|
-
* len(self.survey)
|
1184
|
-
)
|
1185
|
-
return number_of_questions
|
1186
|
-
|
1187
|
-
#######################
|
1188
|
-
# Serialization methods
|
1189
|
-
#######################
|
1190
|
-
|
1191
|
-
def to_dict(self, add_edsl_version=True):
|
1192
|
-
d = {
|
1193
|
-
"survey": self.survey.to_dict(add_edsl_version=add_edsl_version),
|
1194
|
-
"agents": [
|
1195
|
-
agent.to_dict(add_edsl_version=add_edsl_version)
|
1196
|
-
for agent in self.agents
|
1197
|
-
],
|
1198
|
-
"models": [
|
1199
|
-
model.to_dict(add_edsl_version=add_edsl_version)
|
1200
|
-
for model in self.models
|
1201
|
-
],
|
1202
|
-
"scenarios": [
|
1203
|
-
scenario.to_dict(add_edsl_version=add_edsl_version)
|
1204
|
-
for scenario in self.scenarios
|
1205
|
-
],
|
1206
|
-
}
|
1207
|
-
if add_edsl_version:
|
1208
|
-
from edsl import __version__
|
1209
|
-
|
1210
|
-
d["edsl_version"] = __version__
|
1211
|
-
d["edsl_class_name"] = "Jobs"
|
1212
|
-
|
1213
|
-
return d
|
1214
|
-
|
1215
|
-
@classmethod
|
1216
|
-
@remove_edsl_version
|
1217
|
-
def from_dict(cls, data: dict) -> Jobs:
|
1218
|
-
"""Creates a Jobs instance from a dictionary."""
|
1219
|
-
from edsl import Survey
|
1220
|
-
from edsl.agents.Agent import Agent
|
1221
|
-
from edsl.language_models.LanguageModel import LanguageModel
|
1222
|
-
from edsl.scenarios.Scenario import Scenario
|
1223
|
-
|
1224
|
-
return cls(
|
1225
|
-
survey=Survey.from_dict(data["survey"]),
|
1226
|
-
agents=[Agent.from_dict(agent) for agent in data["agents"]],
|
1227
|
-
models=[LanguageModel.from_dict(model) for model in data["models"]],
|
1228
|
-
scenarios=[Scenario.from_dict(scenario) for scenario in data["scenarios"]],
|
1229
|
-
)
|
1230
|
-
|
1231
|
-
def __eq__(self, other: Jobs) -> bool:
|
1232
|
-
"""Return True if the Jobs instance is equal to another Jobs instance.
|
1233
|
-
|
1234
|
-
>>> from edsl.jobs import Jobs
|
1235
|
-
>>> Jobs.example() == Jobs.example()
|
1236
|
-
True
|
1237
|
-
|
1238
|
-
"""
|
1239
|
-
return self.to_dict() == other.to_dict()
|
1240
|
-
|
1241
|
-
#######################
|
1242
|
-
# Example methods
|
1243
|
-
#######################
|
1244
|
-
@classmethod
|
1245
|
-
def example(
|
1246
|
-
cls,
|
1247
|
-
throw_exception_probability: float = 0.0,
|
1248
|
-
randomize: bool = False,
|
1249
|
-
test_model=False,
|
1250
|
-
) -> Jobs:
|
1251
|
-
"""Return an example Jobs instance.
|
1252
|
-
|
1253
|
-
:param throw_exception_probability: the probability that an exception will be thrown when answering a question. This is useful for testing error handling.
|
1254
|
-
:param randomize: whether to randomize the job by adding a random string to the period
|
1255
|
-
:param test_model: whether to use a test model
|
1256
|
-
|
1257
|
-
>>> Jobs.example()
|
1258
|
-
Jobs(...)
|
1259
|
-
|
1260
|
-
"""
|
1261
|
-
import random
|
1262
|
-
from uuid import uuid4
|
1263
|
-
from edsl.questions import QuestionMultipleChoice
|
1264
|
-
from edsl.agents.Agent import Agent
|
1265
|
-
from edsl.scenarios.Scenario import Scenario
|
1266
|
-
|
1267
|
-
addition = "" if not randomize else str(uuid4())
|
1268
|
-
|
1269
|
-
if test_model:
|
1270
|
-
from edsl.language_models import LanguageModel
|
1271
|
-
|
1272
|
-
m = LanguageModel.example(test_model=True)
|
1273
|
-
|
1274
|
-
# (status, question, period)
|
1275
|
-
agent_answers = {
|
1276
|
-
("Joyful", "how_feeling", "morning"): "OK",
|
1277
|
-
("Joyful", "how_feeling", "afternoon"): "Great",
|
1278
|
-
("Joyful", "how_feeling_yesterday", "morning"): "Great",
|
1279
|
-
("Joyful", "how_feeling_yesterday", "afternoon"): "Good",
|
1280
|
-
("Sad", "how_feeling", "morning"): "Terrible",
|
1281
|
-
("Sad", "how_feeling", "afternoon"): "OK",
|
1282
|
-
("Sad", "how_feeling_yesterday", "morning"): "OK",
|
1283
|
-
("Sad", "how_feeling_yesterday", "afternoon"): "Terrible",
|
1284
|
-
}
|
1285
|
-
|
1286
|
-
def answer_question_directly(self, question, scenario):
|
1287
|
-
"""Return the answer to a question. This is a method that can be added to an agent."""
|
1288
|
-
|
1289
|
-
if random.random() < throw_exception_probability:
|
1290
|
-
raise Exception("Error!")
|
1291
|
-
return agent_answers[
|
1292
|
-
(self.traits["status"], question.question_name, scenario["period"])
|
1293
|
-
]
|
1294
|
-
|
1295
|
-
sad_agent = Agent(traits={"status": "Sad"})
|
1296
|
-
joy_agent = Agent(traits={"status": "Joyful"})
|
1297
|
-
|
1298
|
-
sad_agent.add_direct_question_answering_method(answer_question_directly)
|
1299
|
-
joy_agent.add_direct_question_answering_method(answer_question_directly)
|
1300
|
-
|
1301
|
-
q1 = QuestionMultipleChoice(
|
1302
|
-
question_text="How are you this {{ period }}?",
|
1303
|
-
question_options=["Good", "Great", "OK", "Terrible"],
|
1304
|
-
question_name="how_feeling",
|
1305
|
-
)
|
1306
|
-
q2 = QuestionMultipleChoice(
|
1307
|
-
question_text="How were you feeling yesterday {{ period }}?",
|
1308
|
-
question_options=["Good", "Great", "OK", "Terrible"],
|
1309
|
-
question_name="how_feeling_yesterday",
|
1310
|
-
)
|
1311
|
-
from edsl import Survey, ScenarioList
|
1312
|
-
|
1313
|
-
base_survey = Survey(questions=[q1, q2])
|
1314
|
-
|
1315
|
-
scenario_list = ScenarioList(
|
1316
|
-
[
|
1317
|
-
Scenario({"period": f"morning{addition}"}),
|
1318
|
-
Scenario({"period": "afternoon"}),
|
1319
|
-
]
|
1320
|
-
)
|
1321
|
-
if test_model:
|
1322
|
-
job = base_survey.by(m).by(scenario_list).by(joy_agent, sad_agent)
|
1323
|
-
else:
|
1324
|
-
job = base_survey.by(scenario_list).by(joy_agent, sad_agent)
|
1325
|
-
|
1326
|
-
return job
|
1327
|
-
|
1328
|
-
def rich_print(self):
|
1329
|
-
"""Print a rich representation of the Jobs instance."""
|
1330
|
-
from rich.table import Table
|
1331
|
-
|
1332
|
-
table = Table(title="Jobs")
|
1333
|
-
table.add_column("Jobs")
|
1334
|
-
table.add_row(self.survey.rich_print())
|
1335
|
-
return table
|
1336
|
-
|
1337
|
-
def code(self):
|
1338
|
-
"""Return the code to create this instance."""
|
1339
|
-
raise NotImplementedError
|
1340
|
-
|
1341
|
-
|
1342
|
-
def main():
|
1343
|
-
"""Run the module's doctests."""
|
1344
|
-
from edsl.jobs import Jobs
|
1345
|
-
from edsl.data.Cache import Cache
|
1346
|
-
|
1347
|
-
job = Jobs.example()
|
1348
|
-
len(job) == 8
|
1349
|
-
results = job.run(cache=Cache())
|
1350
|
-
len(results) == 8
|
1351
|
-
results
|
1352
|
-
|
1353
|
-
|
1354
|
-
if __name__ == "__main__":
|
1355
|
-
"""Run the module's doctests."""
|
1356
|
-
import doctest
|
1357
|
-
|
1358
|
-
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
1
|
+
# """The Jobs class is a collection of agents, scenarios and models and one survey."""
|
2
|
+
from __future__ import annotations
|
3
|
+
import asyncio
|
4
|
+
from inspect import signature
|
5
|
+
from typing import (
|
6
|
+
Literal,
|
7
|
+
Optional,
|
8
|
+
Union,
|
9
|
+
Sequence,
|
10
|
+
Generator,
|
11
|
+
TYPE_CHECKING,
|
12
|
+
Callable,
|
13
|
+
Tuple,
|
14
|
+
)
|
15
|
+
|
16
|
+
from edsl.Base import Base
|
17
|
+
|
18
|
+
from edsl.jobs.buckets.BucketCollection import BucketCollection
|
19
|
+
from edsl.jobs.JobsPrompts import JobsPrompts
|
20
|
+
from edsl.jobs.interviews.Interview import Interview
|
21
|
+
from edsl.utilities.remove_edsl_version import remove_edsl_version
|
22
|
+
from edsl.jobs.runners.JobsRunnerAsyncio import JobsRunnerAsyncio
|
23
|
+
from edsl.data.RemoteCacheSync import RemoteCacheSync
|
24
|
+
from edsl.exceptions.coop import CoopServerResponseError
|
25
|
+
|
26
|
+
from edsl.jobs.JobsChecks import JobsChecks
|
27
|
+
from edsl.jobs.data_structures import RunEnvironment, RunParameters, RunConfig
|
28
|
+
|
29
|
+
if TYPE_CHECKING:
|
30
|
+
from edsl.agents.Agent import Agent
|
31
|
+
from edsl.agents.AgentList import AgentList
|
32
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
33
|
+
from edsl.scenarios.Scenario import Scenario
|
34
|
+
from edsl.scenarios.ScenarioList import ScenarioList
|
35
|
+
from edsl.surveys.Survey import Survey
|
36
|
+
from edsl.results.Results import Results
|
37
|
+
from edsl.results.Dataset import Dataset
|
38
|
+
from edsl.language_models.ModelList import ModelList
|
39
|
+
from edsl.data.Cache import Cache
|
40
|
+
from edsl.language_models.key_management.KeyLookup import KeyLookup
|
41
|
+
|
42
|
+
VisibilityType = Literal["private", "public", "unlisted"]
|
43
|
+
|
44
|
+
from dataclasses import dataclass
|
45
|
+
from typing import Optional, Union, TypeVar, Callable, cast
|
46
|
+
from functools import wraps
|
47
|
+
|
48
|
+
try:
|
49
|
+
from typing import ParamSpec
|
50
|
+
except ImportError:
|
51
|
+
from typing_extensions import ParamSpec
|
52
|
+
|
53
|
+
|
54
|
+
P = ParamSpec("P")
|
55
|
+
T = TypeVar("T")
|
56
|
+
|
57
|
+
|
58
|
+
from edsl.jobs.check_survey_scenario_compatibility import (
|
59
|
+
CheckSurveyScenarioCompatibility,
|
60
|
+
)
|
61
|
+
|
62
|
+
|
63
|
+
def with_config(f: Callable[P, T]) -> Callable[P, T]:
|
64
|
+
"This decorator make it so that the run function parameters match the RunConfig dataclass."
|
65
|
+
parameter_fields = {
|
66
|
+
name: field.default
|
67
|
+
for name, field in RunParameters.__dataclass_fields__.items()
|
68
|
+
}
|
69
|
+
environment_fields = {
|
70
|
+
name: field.default
|
71
|
+
for name, field in RunEnvironment.__dataclass_fields__.items()
|
72
|
+
}
|
73
|
+
combined = {**parameter_fields, **environment_fields}
|
74
|
+
|
75
|
+
@wraps(f)
|
76
|
+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
|
77
|
+
environment = RunEnvironment(
|
78
|
+
**{k: v for k, v in kwargs.items() if k in environment_fields}
|
79
|
+
)
|
80
|
+
parameters = RunParameters(
|
81
|
+
**{k: v for k, v in kwargs.items() if k in parameter_fields}
|
82
|
+
)
|
83
|
+
config = RunConfig(environment=environment, parameters=parameters)
|
84
|
+
return f(*args, config=config)
|
85
|
+
|
86
|
+
# Update the wrapper's signature to include all RunConfig parameters
|
87
|
+
# old_sig = signature(f)
|
88
|
+
# wrapper.__signature__ = old_sig.replace(
|
89
|
+
# parameters=list(old_sig.parameters.values())[:-1]
|
90
|
+
# + [
|
91
|
+
# old_sig.parameters["config"].replace(
|
92
|
+
# default=parameter_fields[name], name=name
|
93
|
+
# )
|
94
|
+
# for name in combined
|
95
|
+
# ]
|
96
|
+
# )
|
97
|
+
|
98
|
+
return cast(Callable[P, T], wrapper)
|
99
|
+
|
100
|
+
|
101
|
+
class Jobs(Base):
|
102
|
+
"""
|
103
|
+
A collection of agents, scenarios and models and one survey that creates 'interviews'
|
104
|
+
"""
|
105
|
+
|
106
|
+
__documentation__ = "https://docs.expectedparrot.com/en/latest/jobs.html"
|
107
|
+
|
108
|
+
def __init__(
|
109
|
+
self,
|
110
|
+
survey: "Survey",
|
111
|
+
agents: Optional[Union[list[Agent], AgentList]] = None,
|
112
|
+
models: Optional[Union[ModelList, list[LanguageModel]]] = None,
|
113
|
+
scenarios: Optional[Union[ScenarioList, list[Scenario]]] = None,
|
114
|
+
):
|
115
|
+
"""Initialize a Jobs instance.
|
116
|
+
|
117
|
+
:param survey: the survey to be used in the job
|
118
|
+
:param agents: a list of agents
|
119
|
+
:param models: a list of models
|
120
|
+
:param scenarios: a list of scenarios
|
121
|
+
"""
|
122
|
+
self.run_config = RunConfig(
|
123
|
+
environment=RunEnvironment(), parameters=RunParameters()
|
124
|
+
)
|
125
|
+
|
126
|
+
self.survey = survey
|
127
|
+
self.agents: AgentList = agents
|
128
|
+
self.scenarios: ScenarioList = scenarios
|
129
|
+
self.models: ModelList = models
|
130
|
+
|
131
|
+
def add_running_env(self, running_env: RunEnvironment):
|
132
|
+
self.run_config.add_environment(running_env)
|
133
|
+
return self
|
134
|
+
|
135
|
+
def using_cache(self, cache: "Cache") -> Jobs:
|
136
|
+
"""
|
137
|
+
Add a Cache to the job.
|
138
|
+
|
139
|
+
:param cache: the cache to add
|
140
|
+
"""
|
141
|
+
self.run_config.add_cache(cache)
|
142
|
+
return self
|
143
|
+
|
144
|
+
def using_bucket_collection(self, bucket_collection: BucketCollection) -> Jobs:
|
145
|
+
"""
|
146
|
+
Add a BucketCollection to the job.
|
147
|
+
|
148
|
+
:param bucket_collection: the bucket collection to add
|
149
|
+
"""
|
150
|
+
self.run_config.add_bucket_collection(bucket_collection)
|
151
|
+
return self
|
152
|
+
|
153
|
+
def using_key_lookup(self, key_lookup: KeyLookup) -> Jobs:
|
154
|
+
"""
|
155
|
+
Add a KeyLookup to the job.
|
156
|
+
|
157
|
+
:param key_lookup: the key lookup to add
|
158
|
+
"""
|
159
|
+
self.run_config.add_key_lookup(key_lookup)
|
160
|
+
return self
|
161
|
+
|
162
|
+
def using(self, obj: Union[Cache, BucketCollection, KeyLookup]) -> Jobs:
|
163
|
+
"""
|
164
|
+
Add a Cache, BucketCollection, or KeyLookup to the job.
|
165
|
+
|
166
|
+
:param obj: the object to add
|
167
|
+
"""
|
168
|
+
from edsl.data.Cache import Cache
|
169
|
+
from edsl.language_models.key_management.KeyLookup import KeyLookup
|
170
|
+
|
171
|
+
if isinstance(obj, Cache):
|
172
|
+
self.using_cache(obj)
|
173
|
+
elif isinstance(obj, BucketCollection):
|
174
|
+
self.using_bucket_collection(obj)
|
175
|
+
elif isinstance(obj, KeyLookup):
|
176
|
+
self.using_key_lookup(obj)
|
177
|
+
return self
|
178
|
+
|
179
|
+
@property
|
180
|
+
def models(self):
|
181
|
+
return self._models
|
182
|
+
|
183
|
+
@models.setter
|
184
|
+
def models(self, value):
|
185
|
+
from edsl.language_models.ModelList import ModelList
|
186
|
+
|
187
|
+
if value:
|
188
|
+
if not isinstance(value, ModelList):
|
189
|
+
self._models = ModelList(value)
|
190
|
+
else:
|
191
|
+
self._models = value
|
192
|
+
else:
|
193
|
+
self._models = ModelList([])
|
194
|
+
|
195
|
+
# update the bucket collection if it exists
|
196
|
+
if self.run_config.environment.bucket_collection is None:
|
197
|
+
self.run_config.environment.bucket_collection = (
|
198
|
+
self.create_bucket_collection()
|
199
|
+
)
|
200
|
+
|
201
|
+
@property
|
202
|
+
def agents(self):
|
203
|
+
return self._agents
|
204
|
+
|
205
|
+
@agents.setter
|
206
|
+
def agents(self, value):
|
207
|
+
from edsl.agents.AgentList import AgentList
|
208
|
+
|
209
|
+
if value:
|
210
|
+
if not isinstance(value, AgentList):
|
211
|
+
self._agents = AgentList(value)
|
212
|
+
else:
|
213
|
+
self._agents = value
|
214
|
+
else:
|
215
|
+
self._agents = AgentList([])
|
216
|
+
|
217
|
+
@property
|
218
|
+
def scenarios(self):
|
219
|
+
return self._scenarios
|
220
|
+
|
221
|
+
@scenarios.setter
|
222
|
+
def scenarios(self, value):
|
223
|
+
from edsl.scenarios.ScenarioList import ScenarioList
|
224
|
+
from edsl.results.Dataset import Dataset
|
225
|
+
|
226
|
+
if value:
|
227
|
+
if isinstance(
|
228
|
+
value, Dataset
|
229
|
+
): # if the user passes in a Dataset, convert it to a ScenarioList
|
230
|
+
value = value.to_scenario_list()
|
231
|
+
|
232
|
+
if not isinstance(value, ScenarioList):
|
233
|
+
self._scenarios = ScenarioList(value)
|
234
|
+
else:
|
235
|
+
self._scenarios = value
|
236
|
+
else:
|
237
|
+
self._scenarios = ScenarioList([])
|
238
|
+
|
239
|
+
def by(
|
240
|
+
self,
|
241
|
+
*args: Union[
|
242
|
+
Agent,
|
243
|
+
Scenario,
|
244
|
+
LanguageModel,
|
245
|
+
Sequence[Union["Agent", "Scenario", "LanguageModel"]],
|
246
|
+
],
|
247
|
+
) -> Jobs:
|
248
|
+
"""
|
249
|
+
Add Agents, Scenarios and LanguageModels to a job.
|
250
|
+
|
251
|
+
:param args: objects or a sequence (list, tuple, ...) of objects of the same type
|
252
|
+
|
253
|
+
If no objects of this type exist in the Jobs instance, it stores the new objects as a list in the corresponding attribute.
|
254
|
+
Otherwise, it combines the new objects with existing objects using the object's `__add__` method.
|
255
|
+
|
256
|
+
This 'by' is intended to create a fluent interface.
|
257
|
+
|
258
|
+
>>> from edsl.surveys.Survey import Survey
|
259
|
+
>>> from edsl.questions.QuestionFreeText import QuestionFreeText
|
260
|
+
>>> q = QuestionFreeText(question_name="name", question_text="What is your name?")
|
261
|
+
>>> j = Jobs(survey = Survey(questions=[q]))
|
262
|
+
>>> j
|
263
|
+
Jobs(survey=Survey(...), agents=AgentList([]), models=ModelList([]), scenarios=ScenarioList([]))
|
264
|
+
>>> from edsl.agents.Agent import Agent; a = Agent(traits = {"status": "Sad"})
|
265
|
+
>>> j.by(a).agents
|
266
|
+
AgentList([Agent(traits = {'status': 'Sad'})])
|
267
|
+
|
268
|
+
|
269
|
+
Notes:
|
270
|
+
- all objects must implement the 'get_value', 'set_value', and `__add__` methods
|
271
|
+
- agents: traits of new agents are combined with traits of existing agents. New and existing agents should not have overlapping traits, and do not increase the # agents in the instance
|
272
|
+
- scenarios: traits of new scenarios are combined with traits of old existing. New scenarios will overwrite overlapping traits, and do not increase the number of scenarios in the instance
|
273
|
+
- models: new models overwrite old models.
|
274
|
+
"""
|
275
|
+
from edsl.jobs.JobsComponentConstructor import JobsComponentConstructor
|
276
|
+
|
277
|
+
return JobsComponentConstructor(self).by(*args)
|
278
|
+
|
279
|
+
def prompts(self) -> "Dataset":
|
280
|
+
"""Return a Dataset of prompts that will be used.
|
281
|
+
|
282
|
+
|
283
|
+
>>> from edsl.jobs import Jobs
|
284
|
+
>>> Jobs.example().prompts()
|
285
|
+
Dataset(...)
|
286
|
+
"""
|
287
|
+
return JobsPrompts(self).prompts()
|
288
|
+
|
289
|
+
def show_prompts(self, all: bool = False) -> None:
|
290
|
+
"""Print the prompts."""
|
291
|
+
if all:
|
292
|
+
return self.prompts().to_scenario_list().table()
|
293
|
+
else:
|
294
|
+
return (
|
295
|
+
self.prompts().to_scenario_list().table("user_prompt", "system_prompt")
|
296
|
+
)
|
297
|
+
|
298
|
+
@staticmethod
|
299
|
+
def estimate_prompt_cost(
|
300
|
+
system_prompt: str,
|
301
|
+
user_prompt: str,
|
302
|
+
price_lookup: dict,
|
303
|
+
inference_service: str,
|
304
|
+
model: str,
|
305
|
+
) -> dict:
|
306
|
+
"""
|
307
|
+
Estimate the cost of running the prompts.
|
308
|
+
:param iterations: the number of iterations to run
|
309
|
+
:param system_prompt: the system prompt
|
310
|
+
:param user_prompt: the user prompt
|
311
|
+
:param price_lookup: the price lookup
|
312
|
+
:param inference_service: the inference service
|
313
|
+
:param model: the model name
|
314
|
+
"""
|
315
|
+
return JobsPrompts.estimate_prompt_cost(
|
316
|
+
system_prompt, user_prompt, price_lookup, inference_service, model
|
317
|
+
)
|
318
|
+
|
319
|
+
def estimate_job_cost(self, iterations: int = 1) -> dict:
|
320
|
+
"""
|
321
|
+
Estimate the cost of running the job.
|
322
|
+
|
323
|
+
:param iterations: the number of iterations to run
|
324
|
+
"""
|
325
|
+
return JobsPrompts(self).estimate_job_cost(iterations)
|
326
|
+
|
327
|
+
def estimate_job_cost_from_external_prices(
|
328
|
+
self, price_lookup: dict, iterations: int = 1
|
329
|
+
) -> dict:
|
330
|
+
return JobsPrompts(self).estimate_job_cost_from_external_prices(
|
331
|
+
price_lookup, iterations
|
332
|
+
)
|
333
|
+
|
334
|
+
@staticmethod
|
335
|
+
def compute_job_cost(job_results: Results) -> float:
|
336
|
+
"""
|
337
|
+
Computes the cost of a completed job in USD.
|
338
|
+
"""
|
339
|
+
return job_results.compute_job_cost()
|
340
|
+
|
341
|
+
def replace_missing_objects(self) -> None:
|
342
|
+
from edsl.agents.Agent import Agent
|
343
|
+
from edsl.language_models.model import Model
|
344
|
+
from edsl.scenarios.Scenario import Scenario
|
345
|
+
|
346
|
+
self.agents = self.agents or [Agent()]
|
347
|
+
self.models = self.models or [Model()]
|
348
|
+
self.scenarios = self.scenarios or [Scenario()]
|
349
|
+
|
350
|
+
def generate_interviews(self) -> Generator[Interview, None, None]:
|
351
|
+
"""
|
352
|
+
Generate interviews.
|
353
|
+
|
354
|
+
Note that this sets the agents, model and scenarios if they have not been set. This is a side effect of the method.
|
355
|
+
This is useful because a user can create a job without setting the agents, models, or scenarios, and the job will still run,
|
356
|
+
with us filling in defaults.
|
357
|
+
|
358
|
+
"""
|
359
|
+
from edsl.jobs.InterviewsConstructor import InterviewsConstructor
|
360
|
+
|
361
|
+
self.replace_missing_objects()
|
362
|
+
yield from InterviewsConstructor(
|
363
|
+
self, cache=self.run_config.environment.cache
|
364
|
+
).create_interviews()
|
365
|
+
|
366
|
+
def interviews(self) -> list[Interview]:
|
367
|
+
"""
|
368
|
+
Return a list of :class:`edsl.jobs.interviews.Interview` objects.
|
369
|
+
|
370
|
+
It returns one Interview for each combination of Agent, Scenario, and LanguageModel.
|
371
|
+
If any of Agents, Scenarios, or LanguageModels are missing, it fills in with defaults.
|
372
|
+
|
373
|
+
>>> from edsl.jobs import Jobs
|
374
|
+
>>> j = Jobs.example()
|
375
|
+
>>> len(j.interviews())
|
376
|
+
4
|
377
|
+
>>> j.interviews()[0]
|
378
|
+
Interview(agent = Agent(traits = {'status': 'Joyful'}), survey = Survey(...), scenario = Scenario({'period': 'morning'}), model = Model(...))
|
379
|
+
"""
|
380
|
+
return list(self.generate_interviews())
|
381
|
+
|
382
|
+
@classmethod
|
383
|
+
def from_interviews(cls, interview_list) -> "Jobs":
|
384
|
+
"""Return a Jobs instance from a list of interviews.
|
385
|
+
|
386
|
+
This is useful when you have, say, a list of failed interviews and you want to create
|
387
|
+
a new job with only those interviews.
|
388
|
+
"""
|
389
|
+
survey = interview_list[0].survey
|
390
|
+
# get all the models
|
391
|
+
models = list(set([interview.model for interview in interview_list]))
|
392
|
+
jobs = cls(survey)
|
393
|
+
jobs.models = models
|
394
|
+
jobs._interviews = interview_list
|
395
|
+
return jobs
|
396
|
+
|
397
|
+
def create_bucket_collection(self) -> BucketCollection:
|
398
|
+
"""
|
399
|
+
Create a collection of buckets for each model.
|
400
|
+
|
401
|
+
These buckets are used to track API calls and token usage.
|
402
|
+
|
403
|
+
>>> from edsl.jobs import Jobs
|
404
|
+
>>> from edsl import Model
|
405
|
+
>>> j = Jobs.example().by(Model(temperature = 1), Model(temperature = 0.5))
|
406
|
+
>>> bc = j.create_bucket_collection()
|
407
|
+
>>> bc
|
408
|
+
BucketCollection(...)
|
409
|
+
"""
|
410
|
+
return BucketCollection.from_models(self.models)
|
411
|
+
|
412
|
+
def html(self):
|
413
|
+
"""Return the HTML representations for each scenario"""
|
414
|
+
links = []
|
415
|
+
for index, scenario in enumerate(self.scenarios):
|
416
|
+
links.append(
|
417
|
+
self.survey.html(
|
418
|
+
scenario=scenario, return_link=True, cta=f"Scenario {index}"
|
419
|
+
)
|
420
|
+
)
|
421
|
+
return links
|
422
|
+
|
423
|
+
def __hash__(self):
|
424
|
+
"""Allow the model to be used as a key in a dictionary.
|
425
|
+
|
426
|
+
>>> from edsl.jobs import Jobs
|
427
|
+
>>> hash(Jobs.example())
|
428
|
+
846655441787442972
|
429
|
+
|
430
|
+
"""
|
431
|
+
from edsl.utilities.utilities import dict_hash
|
432
|
+
|
433
|
+
return dict_hash(self.to_dict(add_edsl_version=False))
|
434
|
+
|
435
|
+
def _output(self, message) -> None:
|
436
|
+
"""Check if a Job is verbose. If so, print the message."""
|
437
|
+
if self.run_config.parameters.verbose:
|
438
|
+
print(message)
|
439
|
+
# if hasattr(self, "verbose") and self.verbose:
|
440
|
+
# print(message)
|
441
|
+
|
442
|
+
def all_question_parameters(self) -> set:
|
443
|
+
"""Return all the fields in the questions in the survey.
|
444
|
+
>>> from edsl.jobs import Jobs
|
445
|
+
>>> Jobs.example().all_question_parameters()
|
446
|
+
{'period'}
|
447
|
+
"""
|
448
|
+
return set.union(*[question.parameters for question in self.survey.questions])
|
449
|
+
|
450
|
+
def use_remote_cache(self) -> bool:
|
451
|
+
import requests
|
452
|
+
|
453
|
+
if self.run_config.parameters.disable_remote_cache:
|
454
|
+
return False
|
455
|
+
if not self.run_config.parameters.disable_remote_cache:
|
456
|
+
try:
|
457
|
+
from edsl.coop.coop import Coop
|
458
|
+
|
459
|
+
user_edsl_settings = Coop().edsl_settings
|
460
|
+
return user_edsl_settings.get("remote_caching", False)
|
461
|
+
except requests.ConnectionError:
|
462
|
+
pass
|
463
|
+
except CoopServerResponseError as e:
|
464
|
+
pass
|
465
|
+
|
466
|
+
return False
|
467
|
+
|
468
|
+
def _remote_results(
|
469
|
+
self,
|
470
|
+
) -> Union["Results", None]:
|
471
|
+
from edsl.jobs.JobsRemoteInferenceHandler import JobsRemoteInferenceHandler
|
472
|
+
|
473
|
+
jh = JobsRemoteInferenceHandler(
|
474
|
+
self, verbose=self.run_config.parameters.verbose
|
475
|
+
)
|
476
|
+
if jh.use_remote_inference(self.run_config.parameters.disable_remote_inference):
|
477
|
+
job_info = jh.create_remote_inference_job(
|
478
|
+
iterations=self.run_config.parameters.n,
|
479
|
+
remote_inference_description=self.run_config.parameters.remote_inference_description,
|
480
|
+
remote_inference_results_visibility=self.run_config.parameters.remote_inference_results_visibility,
|
481
|
+
)
|
482
|
+
results = jh.poll_remote_inference_job(job_info)
|
483
|
+
return results
|
484
|
+
else:
|
485
|
+
return None
|
486
|
+
|
487
|
+
def _prepare_to_run(self) -> None:
|
488
|
+
"This makes sure that the job is ready to run and that keys are in place for a remote job."
|
489
|
+
CheckSurveyScenarioCompatibility(self.survey, self.scenarios).check()
|
490
|
+
|
491
|
+
def _check_if_remote_keys_ok(self):
|
492
|
+
jc = JobsChecks(self)
|
493
|
+
if jc.needs_key_process():
|
494
|
+
jc.key_process()
|
495
|
+
|
496
|
+
def _check_if_local_keys_ok(self):
|
497
|
+
jc = JobsChecks(self)
|
498
|
+
if self.run_config.parameters.check_api_keys:
|
499
|
+
jc.check_api_keys()
|
500
|
+
|
501
|
+
async def _execute_with_remote_cache(self, run_job_async: bool) -> Results:
|
502
|
+
|
503
|
+
use_remote_cache = self.use_remote_cache()
|
504
|
+
|
505
|
+
from edsl.coop.coop import Coop
|
506
|
+
from edsl.jobs.runners.JobsRunnerAsyncio import JobsRunnerAsyncio
|
507
|
+
from edsl.data.Cache import Cache
|
508
|
+
|
509
|
+
assert isinstance(self.run_config.environment.cache, Cache)
|
510
|
+
|
511
|
+
with RemoteCacheSync(
|
512
|
+
coop=Coop(),
|
513
|
+
cache=self.run_config.environment.cache,
|
514
|
+
output_func=self._output,
|
515
|
+
remote_cache=use_remote_cache,
|
516
|
+
remote_cache_description=self.run_config.parameters.remote_cache_description,
|
517
|
+
):
|
518
|
+
runner = JobsRunnerAsyncio(self, environment=self.run_config.environment)
|
519
|
+
if run_job_async:
|
520
|
+
results = await runner.run_async(self.run_config.parameters)
|
521
|
+
else:
|
522
|
+
results = runner.run(self.run_config.parameters)
|
523
|
+
return results
|
524
|
+
|
525
|
+
def _setup_and_check(self) -> Tuple[RunConfig, Optional[Results]]:
|
526
|
+
|
527
|
+
self._prepare_to_run()
|
528
|
+
self._check_if_remote_keys_ok()
|
529
|
+
|
530
|
+
# first try to run the job remotely
|
531
|
+
if results := self._remote_results():
|
532
|
+
return results
|
533
|
+
|
534
|
+
self._check_if_local_keys_ok()
|
535
|
+
return None
|
536
|
+
|
537
|
+
@property
|
538
|
+
def num_interviews(self):
|
539
|
+
if self.run_config.parameters.n is None:
|
540
|
+
return len(self)
|
541
|
+
else:
|
542
|
+
len(self) * self.run_config.parameters.n
|
543
|
+
|
544
|
+
def _run(self, config: RunConfig):
|
545
|
+
"Shared code for run and run_async"
|
546
|
+
if config.environment.cache is not None:
|
547
|
+
self.run_config.environment.cache = config.environment.cache
|
548
|
+
|
549
|
+
if config.environment.bucket_collection is not None:
|
550
|
+
self.run_config.environment.bucket_collection = (
|
551
|
+
config.environment.bucket_collection
|
552
|
+
)
|
553
|
+
|
554
|
+
if config.environment.key_lookup is not None:
|
555
|
+
self.run_config.environment.key_lookup = config.environment.key_lookup
|
556
|
+
|
557
|
+
# replace the parameters with the ones from the config
|
558
|
+
self.run_config.parameters = config.parameters
|
559
|
+
|
560
|
+
self.replace_missing_objects()
|
561
|
+
|
562
|
+
# try to run remotely first
|
563
|
+
self._prepare_to_run()
|
564
|
+
self._check_if_remote_keys_ok()
|
565
|
+
|
566
|
+
if (
|
567
|
+
self.run_config.environment.cache is None
|
568
|
+
or self.run_config.environment.cache is True
|
569
|
+
):
|
570
|
+
from edsl.data.CacheHandler import CacheHandler
|
571
|
+
|
572
|
+
self.run_config.environment.cache = CacheHandler().get_cache()
|
573
|
+
|
574
|
+
if self.run_config.environment.cache is False:
|
575
|
+
from edsl.data.Cache import Cache
|
576
|
+
|
577
|
+
self.run_config.environment.cache = Cache(immediate_write=False)
|
578
|
+
|
579
|
+
# first try to run the job remotely
|
580
|
+
if results := self._remote_results():
|
581
|
+
return results
|
582
|
+
|
583
|
+
self._check_if_local_keys_ok()
|
584
|
+
|
585
|
+
if config.environment.bucket_collection is None:
|
586
|
+
self.run_config.environment.bucket_collection = (
|
587
|
+
self.create_bucket_collection()
|
588
|
+
)
|
589
|
+
|
590
|
+
@with_config
|
591
|
+
def run(self, *, config: RunConfig) -> "Results":
|
592
|
+
"""
|
593
|
+
Runs the Job: conducts Interviews and returns their results.
|
594
|
+
|
595
|
+
:param n: How many times to run each interview
|
596
|
+
:param progress_bar: Whether to show a progress bar
|
597
|
+
:param stop_on_exception: Stops the job if an exception is raised
|
598
|
+
:param check_api_keys: Raises an error if API keys are invalid
|
599
|
+
:param verbose: Prints extra messages
|
600
|
+
:param remote_cache_description: Specifies a description for this group of entries in the remote cache
|
601
|
+
:param remote_inference_description: Specifies a description for the remote inference job
|
602
|
+
:param remote_inference_results_visibility: The initial visibility of the Results object on Coop. This will only be used for remote jobs!
|
603
|
+
:param disable_remote_cache: If True, the job will not use remote cache. This only works for local jobs!
|
604
|
+
:param disable_remote_inference: If True, the job will not use remote inference
|
605
|
+
:param cache: A Cache object to store results
|
606
|
+
:param bucket_collection: A BucketCollection object to track API calls
|
607
|
+
:param key_lookup: A KeyLookup object to manage API keys
|
608
|
+
"""
|
609
|
+
self._run(config)
|
610
|
+
|
611
|
+
return asyncio.run(self._execute_with_remote_cache(run_job_async=False))
|
612
|
+
|
613
|
+
@with_config
|
614
|
+
async def run_async(self, *, config: RunConfig) -> "Results":
|
615
|
+
"""
|
616
|
+
Runs the Job: conducts Interviews and returns their results.
|
617
|
+
|
618
|
+
:param n: How many times to run each interview
|
619
|
+
:param progress_bar: Whether to show a progress bar
|
620
|
+
:param stop_on_exception: Stops the job if an exception is raised
|
621
|
+
:param check_api_keys: Raises an error if API keys are invalid
|
622
|
+
:param verbose: Prints extra messages
|
623
|
+
:param remote_cache_description: Specifies a description for this group of entries in the remote cache
|
624
|
+
:param remote_inference_description: Specifies a description for the remote inference job
|
625
|
+
:param remote_inference_results_visibility: The initial visibility of the Results object on Coop. This will only be used for remote jobs!
|
626
|
+
:param disable_remote_cache: If True, the job will not use remote cache. This only works for local jobs!
|
627
|
+
:param disable_remote_inference: If True, the job will not use remote inference
|
628
|
+
:param cache: A Cache object to store results
|
629
|
+
:param bucket_collection: A BucketCollection object to track API calls
|
630
|
+
:param key_lookup: A KeyLookup object to manage API keys
|
631
|
+
"""
|
632
|
+
self._run(config)
|
633
|
+
|
634
|
+
return await self._execute_with_remote_cache(run_job_async=True)
|
635
|
+
|
636
|
+
def __repr__(self) -> str:
|
637
|
+
"""Return an eval-able string representation of the Jobs instance."""
|
638
|
+
return f"Jobs(survey={repr(self.survey)}, agents={repr(self.agents)}, models={repr(self.models)}, scenarios={repr(self.scenarios)})"
|
639
|
+
|
640
|
+
def _summary(self):
|
641
|
+
return {
|
642
|
+
"questions": len(self.survey),
|
643
|
+
"agents": len(self.agents or [1]),
|
644
|
+
"models": len(self.models or [1]),
|
645
|
+
"scenarios": len(self.scenarios or [1]),
|
646
|
+
}
|
647
|
+
|
648
|
+
def __len__(self) -> int:
|
649
|
+
"""Return the maximum number of questions that will be asked while running this job.
|
650
|
+
Note that this is the maximum number of questions, not the actual number of questions that will be asked, as some questions may be skipped.
|
651
|
+
|
652
|
+
>>> from edsl.jobs import Jobs
|
653
|
+
>>> len(Jobs.example())
|
654
|
+
8
|
655
|
+
"""
|
656
|
+
number_of_questions = (
|
657
|
+
len(self.agents or [1])
|
658
|
+
* len(self.scenarios or [1])
|
659
|
+
* len(self.models or [1])
|
660
|
+
* len(self.survey)
|
661
|
+
)
|
662
|
+
return number_of_questions
|
663
|
+
|
664
|
+
def to_dict(self, add_edsl_version=True):
|
665
|
+
d = {
|
666
|
+
"survey": self.survey.to_dict(add_edsl_version=add_edsl_version),
|
667
|
+
"agents": [
|
668
|
+
agent.to_dict(add_edsl_version=add_edsl_version)
|
669
|
+
for agent in self.agents
|
670
|
+
],
|
671
|
+
"models": [
|
672
|
+
model.to_dict(add_edsl_version=add_edsl_version)
|
673
|
+
for model in self.models
|
674
|
+
],
|
675
|
+
"scenarios": [
|
676
|
+
scenario.to_dict(add_edsl_version=add_edsl_version)
|
677
|
+
for scenario in self.scenarios
|
678
|
+
],
|
679
|
+
}
|
680
|
+
if add_edsl_version:
|
681
|
+
from edsl import __version__
|
682
|
+
|
683
|
+
d["edsl_version"] = __version__
|
684
|
+
d["edsl_class_name"] = "Jobs"
|
685
|
+
|
686
|
+
return d
|
687
|
+
|
688
|
+
def table(self):
|
689
|
+
return self.prompts().to_scenario_list().table()
|
690
|
+
|
691
|
+
@classmethod
|
692
|
+
@remove_edsl_version
|
693
|
+
def from_dict(cls, data: dict) -> Jobs:
|
694
|
+
"""Creates a Jobs instance from a dictionary."""
|
695
|
+
from edsl.surveys.Survey import Survey
|
696
|
+
from edsl.agents.Agent import Agent
|
697
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
698
|
+
from edsl.scenarios.Scenario import Scenario
|
699
|
+
|
700
|
+
return cls(
|
701
|
+
survey=Survey.from_dict(data["survey"]),
|
702
|
+
agents=[Agent.from_dict(agent) for agent in data["agents"]],
|
703
|
+
models=[LanguageModel.from_dict(model) for model in data["models"]],
|
704
|
+
scenarios=[Scenario.from_dict(scenario) for scenario in data["scenarios"]],
|
705
|
+
)
|
706
|
+
|
707
|
+
def __eq__(self, other: Jobs) -> bool:
|
708
|
+
"""Return True if the Jobs instance is equal to another Jobs instance.
|
709
|
+
|
710
|
+
>>> from edsl.jobs import Jobs
|
711
|
+
>>> Jobs.example() == Jobs.example()
|
712
|
+
True
|
713
|
+
|
714
|
+
"""
|
715
|
+
return hash(self) == hash(other)
|
716
|
+
|
717
|
+
@classmethod
|
718
|
+
def example(
|
719
|
+
cls,
|
720
|
+
throw_exception_probability: float = 0.0,
|
721
|
+
randomize: bool = False,
|
722
|
+
test_model=False,
|
723
|
+
) -> Jobs:
|
724
|
+
"""Return an example Jobs instance.
|
725
|
+
|
726
|
+
:param throw_exception_probability: the probability that an exception will be thrown when answering a question. This is useful for testing error handling.
|
727
|
+
:param randomize: whether to randomize the job by adding a random string to the period
|
728
|
+
:param test_model: whether to use a test model
|
729
|
+
|
730
|
+
>>> Jobs.example()
|
731
|
+
Jobs(...)
|
732
|
+
|
733
|
+
"""
|
734
|
+
import random
|
735
|
+
from uuid import uuid4
|
736
|
+
from edsl.questions.QuestionMultipleChoice import QuestionMultipleChoice
|
737
|
+
from edsl.agents.Agent import Agent
|
738
|
+
from edsl.scenarios.Scenario import Scenario
|
739
|
+
|
740
|
+
addition = "" if not randomize else str(uuid4())
|
741
|
+
|
742
|
+
if test_model:
|
743
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
744
|
+
|
745
|
+
m = LanguageModel.example(test_model=True)
|
746
|
+
|
747
|
+
# (status, question, period)
|
748
|
+
agent_answers = {
|
749
|
+
("Joyful", "how_feeling", "morning"): "OK",
|
750
|
+
("Joyful", "how_feeling", "afternoon"): "Great",
|
751
|
+
("Joyful", "how_feeling_yesterday", "morning"): "Great",
|
752
|
+
("Joyful", "how_feeling_yesterday", "afternoon"): "Good",
|
753
|
+
("Sad", "how_feeling", "morning"): "Terrible",
|
754
|
+
("Sad", "how_feeling", "afternoon"): "OK",
|
755
|
+
("Sad", "how_feeling_yesterday", "morning"): "OK",
|
756
|
+
("Sad", "how_feeling_yesterday", "afternoon"): "Terrible",
|
757
|
+
}
|
758
|
+
|
759
|
+
def answer_question_directly(self, question, scenario):
|
760
|
+
"""Return the answer to a question. This is a method that can be added to an agent."""
|
761
|
+
|
762
|
+
if random.random() < throw_exception_probability:
|
763
|
+
raise Exception("Error!")
|
764
|
+
return agent_answers[
|
765
|
+
(self.traits["status"], question.question_name, scenario["period"])
|
766
|
+
]
|
767
|
+
|
768
|
+
sad_agent = Agent(traits={"status": "Sad"})
|
769
|
+
joy_agent = Agent(traits={"status": "Joyful"})
|
770
|
+
|
771
|
+
sad_agent.add_direct_question_answering_method(answer_question_directly)
|
772
|
+
joy_agent.add_direct_question_answering_method(answer_question_directly)
|
773
|
+
|
774
|
+
q1 = QuestionMultipleChoice(
|
775
|
+
question_text="How are you this {{ period }}?",
|
776
|
+
question_options=["Good", "Great", "OK", "Terrible"],
|
777
|
+
question_name="how_feeling",
|
778
|
+
)
|
779
|
+
q2 = QuestionMultipleChoice(
|
780
|
+
question_text="How were you feeling yesterday {{ period }}?",
|
781
|
+
question_options=["Good", "Great", "OK", "Terrible"],
|
782
|
+
question_name="how_feeling_yesterday",
|
783
|
+
)
|
784
|
+
from edsl.surveys.Survey import Survey
|
785
|
+
from edsl.scenarios.ScenarioList import ScenarioList
|
786
|
+
|
787
|
+
base_survey = Survey(questions=[q1, q2])
|
788
|
+
|
789
|
+
scenario_list = ScenarioList(
|
790
|
+
[
|
791
|
+
Scenario({"period": f"morning{addition}"}),
|
792
|
+
Scenario({"period": "afternoon"}),
|
793
|
+
]
|
794
|
+
)
|
795
|
+
if test_model:
|
796
|
+
job = base_survey.by(m).by(scenario_list).by(joy_agent, sad_agent)
|
797
|
+
else:
|
798
|
+
job = base_survey.by(scenario_list).by(joy_agent, sad_agent)
|
799
|
+
|
800
|
+
return job
|
801
|
+
|
802
|
+
def code(self):
|
803
|
+
"""Return the code to create this instance."""
|
804
|
+
raise NotImplementedError
|
805
|
+
|
806
|
+
|
807
|
+
def main():
|
808
|
+
"""Run the module's doctests."""
|
809
|
+
from edsl.jobs.Jobs import Jobs
|
810
|
+
from edsl.data.Cache import Cache
|
811
|
+
|
812
|
+
job = Jobs.example()
|
813
|
+
len(job) == 8
|
814
|
+
results = job.run(cache=Cache())
|
815
|
+
len(results) == 8
|
816
|
+
results
|
817
|
+
|
818
|
+
|
819
|
+
if __name__ == "__main__":
|
820
|
+
"""Run the module's doctests."""
|
821
|
+
import doctest
|
822
|
+
|
823
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|