edsl 0.1.14__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +348 -38
- edsl/BaseDiff.py +260 -0
- edsl/TemplateLoader.py +24 -0
- edsl/__init__.py +46 -10
- edsl/__version__.py +1 -0
- edsl/agents/Agent.py +842 -144
- edsl/agents/AgentList.py +521 -25
- edsl/agents/Invigilator.py +250 -374
- edsl/agents/InvigilatorBase.py +257 -0
- edsl/agents/PromptConstructor.py +272 -0
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/descriptors.py +43 -13
- edsl/agents/prompt_helpers.py +129 -0
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -0
- edsl/auto/StageBase.py +243 -0
- edsl/auto/StageGenerateSurvey.py +178 -0
- edsl/auto/StageLabelQuestions.py +125 -0
- edsl/auto/StagePersona.py +61 -0
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -0
- edsl/auto/StagePersonaDimensionValues.py +74 -0
- edsl/auto/StagePersonaDimensions.py +69 -0
- edsl/auto/StageQuestions.py +74 -0
- edsl/auto/SurveyCreatorPipeline.py +21 -0
- edsl/auto/utilities.py +218 -0
- edsl/base/Base.py +279 -0
- edsl/config.py +121 -104
- edsl/conversation/Conversation.py +290 -0
- edsl/conversation/car_buying.py +59 -0
- edsl/conversation/chips.py +95 -0
- edsl/conversation/mug_negotiation.py +81 -0
- edsl/conversation/next_speaker_utilities.py +93 -0
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -0
- edsl/coop/__init__.py +1 -0
- edsl/coop/coop.py +1029 -134
- edsl/coop/utils.py +131 -0
- edsl/data/Cache.py +560 -89
- edsl/data/CacheEntry.py +230 -0
- edsl/data/CacheHandler.py +168 -0
- edsl/data/RemoteCacheSync.py +186 -0
- edsl/data/SQLiteDict.py +292 -0
- edsl/data/__init__.py +5 -3
- edsl/data/orm.py +6 -33
- edsl/data_transfer_models.py +74 -27
- edsl/enums.py +165 -8
- edsl/exceptions/BaseException.py +21 -0
- edsl/exceptions/__init__.py +52 -46
- edsl/exceptions/agents.py +33 -15
- edsl/exceptions/cache.py +5 -0
- edsl/exceptions/coop.py +8 -0
- edsl/exceptions/general.py +34 -0
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +15 -0
- edsl/exceptions/language_models.py +46 -1
- edsl/exceptions/questions.py +80 -5
- edsl/exceptions/results.py +16 -5
- edsl/exceptions/scenarios.py +29 -0
- edsl/exceptions/surveys.py +13 -10
- edsl/inference_services/AnthropicService.py +106 -0
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -0
- edsl/inference_services/AzureAI.py +215 -0
- edsl/inference_services/DeepInfraService.py +18 -0
- edsl/inference_services/GoogleService.py +143 -0
- edsl/inference_services/GroqService.py +20 -0
- edsl/inference_services/InferenceServiceABC.py +80 -0
- edsl/inference_services/InferenceServicesCollection.py +138 -0
- edsl/inference_services/MistralAIService.py +120 -0
- edsl/inference_services/OllamaService.py +18 -0
- edsl/inference_services/OpenAIService.py +236 -0
- edsl/inference_services/PerplexityService.py +160 -0
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -0
- edsl/inference_services/TogetherAIService.py +172 -0
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -0
- edsl/inference_services/rate_limits_cache.py +25 -0
- edsl/inference_services/registry.py +41 -0
- edsl/inference_services/write_available.py +10 -0
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +21 -20
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +684 -204
- edsl/jobs/JobsChecks.py +172 -0
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -0
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -0
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -0
- edsl/jobs/buckets/ModelBuckets.py +65 -0
- edsl/jobs/buckets/TokenBucket.py +283 -0
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +392 -0
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -0
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -0
- edsl/jobs/interviews/InterviewStatistic.py +63 -0
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -0
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -0
- edsl/jobs/interviews/InterviewStatusLog.py +92 -0
- edsl/jobs/interviews/ReportErrors.py +66 -0
- edsl/jobs/interviews/interview_status_enum.py +9 -0
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -110
- edsl/jobs/runners/JobsRunnerStatus.py +298 -0
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -0
- edsl/jobs/tasks/TaskCreators.py +64 -0
- edsl/jobs/tasks/TaskHistory.py +470 -0
- edsl/jobs/tasks/TaskStatusLog.py +23 -0
- edsl/jobs/tasks/task_status_enum.py +161 -0
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -0
- edsl/jobs/tokens/TokenUsage.py +34 -0
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +507 -386
- edsl/language_models/ModelList.py +164 -0
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -0
- edsl/language_models/__init__.py +1 -8
- edsl/language_models/fake_openai_call.py +15 -0
- edsl/language_models/fake_openai_service.py +61 -0
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +109 -41
- edsl/language_models/utilities.py +65 -0
- edsl/notebooks/Notebook.py +263 -0
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -0
- edsl/prompts/Prompt.py +222 -93
- edsl/prompts/__init__.py +1 -1
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -0
- edsl/questions/QuestionBasePromptsMixin.py +221 -0
- edsl/questions/QuestionBudget.py +164 -67
- edsl/questions/QuestionCheckBox.py +281 -62
- edsl/questions/QuestionDict.py +343 -0
- edsl/questions/QuestionExtract.py +136 -50
- edsl/questions/QuestionFreeText.py +79 -55
- edsl/questions/QuestionFunctional.py +138 -41
- edsl/questions/QuestionList.py +184 -57
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +293 -69
- edsl/questions/QuestionNumerical.py +109 -56
- edsl/questions/QuestionRank.py +244 -49
- edsl/questions/Quick.py +41 -0
- edsl/questions/SimpleAskMixin.py +74 -0
- edsl/questions/__init__.py +9 -6
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +153 -38
- edsl/questions/compose_questions.py +13 -7
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -0
- edsl/questions/derived/QuestionLikertFive.py +28 -26
- edsl/questions/derived/QuestionLinearScale.py +41 -28
- edsl/questions/derived/QuestionTopK.py +34 -26
- edsl/questions/derived/QuestionYesNo.py +40 -27
- edsl/questions/descriptors.py +228 -74
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -0
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -0
- edsl/questions/prompt_templates/question_extract.jinja +11 -0
- edsl/questions/prompt_templates/question_free_text.jinja +3 -0
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -0
- edsl/questions/prompt_templates/question_list.jinja +17 -0
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -0
- edsl/questions/prompt_templates/question_numerical.jinja +37 -0
- edsl/questions/question_base_gen_mixin.py +168 -0
- edsl/questions/question_registry.py +130 -46
- edsl/questions/register_questions_meta.py +71 -0
- edsl/questions/response_validator_abc.py +188 -0
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +5 -2
- edsl/questions/templates/__init__.py +0 -0
- edsl/questions/templates/budget/__init__.py +0 -0
- edsl/questions/templates/budget/answering_instructions.jinja +7 -0
- edsl/questions/templates/budget/question_presentation.jinja +7 -0
- edsl/questions/templates/checkbox/__init__.py +0 -0
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -0
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -0
- edsl/questions/templates/dict/__init__.py +0 -0
- edsl/questions/templates/dict/answering_instructions.jinja +21 -0
- edsl/questions/templates/dict/question_presentation.jinja +1 -0
- edsl/questions/templates/extract/__init__.py +0 -0
- edsl/questions/templates/extract/answering_instructions.jinja +7 -0
- edsl/questions/templates/extract/question_presentation.jinja +1 -0
- edsl/questions/templates/free_text/__init__.py +0 -0
- edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
- edsl/questions/templates/free_text/question_presentation.jinja +1 -0
- edsl/questions/templates/likert_five/__init__.py +0 -0
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -0
- edsl/questions/templates/likert_five/question_presentation.jinja +12 -0
- edsl/questions/templates/linear_scale/__init__.py +0 -0
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -0
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -0
- edsl/questions/templates/list/__init__.py +0 -0
- edsl/questions/templates/list/answering_instructions.jinja +4 -0
- edsl/questions/templates/list/question_presentation.jinja +5 -0
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/__init__.py +0 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -0
- edsl/questions/templates/multiple_choice/html.jinja +0 -0
- edsl/questions/templates/multiple_choice/question_presentation.jinja +12 -0
- edsl/questions/templates/numerical/__init__.py +0 -0
- edsl/questions/templates/numerical/answering_instructions.jinja +7 -0
- edsl/questions/templates/numerical/question_presentation.jinja +7 -0
- edsl/questions/templates/rank/__init__.py +0 -0
- edsl/questions/templates/rank/answering_instructions.jinja +11 -0
- edsl/questions/templates/rank/question_presentation.jinja +15 -0
- edsl/questions/templates/top_k/__init__.py +0 -0
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -0
- edsl/questions/templates/top_k/question_presentation.jinja +22 -0
- edsl/questions/templates/yes_no/__init__.py +0 -0
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -0
- edsl/questions/templates/yes_no/question_presentation.jinja +12 -0
- edsl/results/CSSParameterizer.py +108 -0
- edsl/results/Dataset.py +550 -19
- edsl/results/DatasetExportMixin.py +594 -0
- edsl/results/DatasetTree.py +295 -0
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +477 -173
- edsl/results/Results.py +987 -269
- edsl/results/ResultsExportMixin.py +28 -125
- edsl/results/ResultsGGMixin.py +83 -15
- edsl/results/TableDisplay.py +125 -0
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +1 -1
- edsl/results/file_exports.py +252 -0
- edsl/results/results_fetch_mixin.py +33 -0
- edsl/results/results_selector.py +145 -0
- edsl/results/results_tools_mixin.py +98 -0
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +78 -0
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -0
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +543 -0
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +431 -62
- edsl/scenarios/ScenarioHtmlMixin.py +65 -0
- edsl/scenarios/ScenarioList.py +1415 -45
- edsl/scenarios/ScenarioListExportMixin.py +45 -0
- edsl/scenarios/ScenarioListPdfMixin.py +239 -0
- edsl/scenarios/__init__.py +2 -0
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +49 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/scenario_join.py +131 -0
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -0
- edsl/study/ObjectEntry.py +173 -0
- edsl/study/ProofOfWork.py +113 -0
- edsl/study/SnapShot.py +80 -0
- edsl/study/Study.py +521 -0
- edsl/study/__init__.py +4 -0
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +92 -11
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +9 -4
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +156 -35
- edsl/surveys/Rule.py +221 -74
- edsl/surveys/RuleCollection.py +241 -61
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1079 -339
- edsl/surveys/SurveyCSS.py +273 -0
- edsl/surveys/SurveyExportMixin.py +235 -40
- edsl/surveys/SurveyFlowVisualization.py +181 -0
- edsl/surveys/SurveyQualtricsImport.py +284 -0
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +4 -2
- edsl/surveys/base.py +19 -3
- edsl/surveys/descriptors.py +17 -6
- edsl/surveys/instructions/ChangeInstruction.py +48 -0
- edsl/surveys/instructions/Instruction.py +56 -0
- edsl/surveys/instructions/InstructionCollection.py +82 -0
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/templates/error_reporting/base.html +24 -0
- edsl/templates/error_reporting/exceptions_by_model.html +35 -0
- edsl/templates/error_reporting/exceptions_by_question_name.html +17 -0
- edsl/templates/error_reporting/exceptions_by_type.html +17 -0
- edsl/templates/error_reporting/interview_details.html +116 -0
- edsl/templates/error_reporting/interviews.html +19 -0
- edsl/templates/error_reporting/overview.html +5 -0
- edsl/templates/error_reporting/performance_plot.html +2 -0
- edsl/templates/error_reporting/report.css +74 -0
- edsl/templates/error_reporting/report.html +118 -0
- edsl/templates/error_reporting/report.js +25 -0
- edsl/tools/__init__.py +1 -0
- edsl/tools/clusters.py +192 -0
- edsl/tools/embeddings.py +27 -0
- edsl/tools/embeddings_plotting.py +118 -0
- edsl/tools/plotting.py +112 -0
- edsl/tools/summarize.py +18 -0
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +5 -0
- edsl/utilities/__init__.py +21 -20
- edsl/utilities/ast_utilities.py +3 -0
- edsl/utilities/data/Registry.py +2 -0
- edsl/utilities/decorators.py +41 -0
- edsl/utilities/gcp_bucket/__init__.py +0 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -0
- edsl/utilities/interface.py +310 -60
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -0
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -0
- edsl/utilities/restricted_python.py +70 -0
- edsl/utilities/utilities.py +203 -13
- edsl-0.1.40.dist-info/METADATA +111 -0
- edsl-0.1.40.dist-info/RECORD +362 -0
- {edsl-0.1.14.dist-info → edsl-0.1.40.dist-info}/WHEEL +1 -1
- edsl/agents/AgentListExportMixin.py +0 -24
- edsl/coop/old.py +0 -31
- edsl/data/Database.py +0 -141
- edsl/data/crud.py +0 -121
- edsl/jobs/Interview.py +0 -417
- edsl/jobs/JobsRunner.py +0 -63
- edsl/jobs/JobsRunnerStatusMixin.py +0 -115
- edsl/jobs/base.py +0 -47
- edsl/jobs/buckets.py +0 -166
- edsl/jobs/runners/JobsRunnerDryRun.py +0 -19
- edsl/jobs/runners/JobsRunnerStreaming.py +0 -54
- edsl/jobs/task_management.py +0 -218
- edsl/jobs/token_tracking.py +0 -78
- edsl/language_models/DeepInfra.py +0 -69
- edsl/language_models/OpenAI.py +0 -98
- edsl/language_models/model_interfaces/GeminiPro.py +0 -66
- edsl/language_models/model_interfaces/LanguageModelOpenAIFour.py +0 -8
- edsl/language_models/model_interfaces/LanguageModelOpenAIThreeFiveTurbo.py +0 -8
- edsl/language_models/model_interfaces/LlamaTwo13B.py +0 -21
- edsl/language_models/model_interfaces/LlamaTwo70B.py +0 -21
- edsl/language_models/model_interfaces/Mixtral8x7B.py +0 -24
- edsl/language_models/registry.py +0 -81
- edsl/language_models/schemas.py +0 -15
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/prompts/QuestionInstructionsBase.py +0 -6
- edsl/prompts/library/agent_instructions.py +0 -29
- edsl/prompts/library/agent_persona.py +0 -17
- edsl/prompts/library/question_budget.py +0 -26
- edsl/prompts/library/question_checkbox.py +0 -32
- edsl/prompts/library/question_extract.py +0 -19
- edsl/prompts/library/question_freetext.py +0 -14
- edsl/prompts/library/question_linear_scale.py +0 -20
- edsl/prompts/library/question_list.py +0 -22
- edsl/prompts/library/question_multiple_choice.py +0 -44
- edsl/prompts/library/question_numerical.py +0 -31
- edsl/prompts/library/question_rank.py +0 -21
- edsl/prompts/prompt_config.py +0 -33
- edsl/prompts/registry.py +0 -185
- edsl/questions/Question.py +0 -240
- edsl/report/InputOutputDataTypes.py +0 -134
- edsl/report/RegressionMixin.py +0 -28
- edsl/report/ReportOutputs.py +0 -1228
- edsl/report/ResultsFetchMixin.py +0 -106
- edsl/report/ResultsOutputMixin.py +0 -14
- edsl/report/demo.ipynb +0 -645
- edsl/results/ResultsDBMixin.py +0 -184
- edsl/surveys/SurveyFlowVisualizationMixin.py +0 -92
- edsl/trackers/Tracker.py +0 -91
- edsl/trackers/TrackerAPI.py +0 -196
- edsl/trackers/TrackerTasks.py +0 -70
- edsl/utilities/pastebin.py +0 -141
- edsl-0.1.14.dist-info/METADATA +0 -69
- edsl-0.1.14.dist-info/RECORD +0 -141
- /edsl/{language_models/model_interfaces → inference_services}/__init__.py +0 -0
- /edsl/{report/__init__.py → jobs/runners/JobsRunnerStatusData.py} +0 -0
- /edsl/{trackers/__init__.py → language_models/ServiceDataSources.py} +0 -0
- {edsl-0.1.14.dist-info → edsl-0.1.40.dist-info}/LICENSE +0 -0
@@ -0,0 +1,257 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
import asyncio
|
3
|
+
from typing import Coroutine, Dict, Any, Optional, TYPE_CHECKING
|
4
|
+
|
5
|
+
from edsl.utilities.decorators import jupyter_nb_handler
|
6
|
+
from edsl.data_transfer_models import AgentResponseDict
|
7
|
+
|
8
|
+
if TYPE_CHECKING:
|
9
|
+
from edsl.prompts.Prompt import Prompt
|
10
|
+
from edsl.data.Cache import Cache
|
11
|
+
from edsl.questions.QuestionBase import QuestionBase
|
12
|
+
from edsl.scenarios.Scenario import Scenario
|
13
|
+
from edsl.surveys.MemoryPlan import MemoryPlan
|
14
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
15
|
+
from edsl.surveys.Survey import Survey
|
16
|
+
from edsl.agents.Agent import Agent
|
17
|
+
from edsl.language_models.key_management.KeyLookup import KeyLookup
|
18
|
+
|
19
|
+
from edsl.data_transfer_models import EDSLResultObjectInput
|
20
|
+
from edsl.agents.PromptConstructor import PromptConstructor
|
21
|
+
from edsl.agents.prompt_helpers import PromptPlan
|
22
|
+
|
23
|
+
|
24
|
+
class InvigilatorBase(ABC):
|
25
|
+
"""An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
|
26
|
+
|
27
|
+
>>> InvigilatorBase.example().answer_question()
|
28
|
+
{'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
|
29
|
+
|
30
|
+
>>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
|
31
|
+
'Failed to get response'
|
32
|
+
|
33
|
+
This returns an empty prompt because there is no memory the agent needs to have at q0.
|
34
|
+
"""
|
35
|
+
|
36
|
+
def __init__(
|
37
|
+
self,
|
38
|
+
agent: "Agent",
|
39
|
+
question: "QuestionBase",
|
40
|
+
scenario: "Scenario",
|
41
|
+
model: "LanguageModel",
|
42
|
+
memory_plan: "MemoryPlan",
|
43
|
+
current_answers: dict,
|
44
|
+
survey: Optional["Survey"],
|
45
|
+
cache: Optional["Cache"] = None,
|
46
|
+
iteration: Optional[int] = 1,
|
47
|
+
additional_prompt_data: Optional[dict] = None,
|
48
|
+
raise_validation_errors: Optional[bool] = True,
|
49
|
+
prompt_plan: Optional["PromptPlan"] = None,
|
50
|
+
key_lookup: Optional["KeyLookup"] = None,
|
51
|
+
):
|
52
|
+
"""Initialize a new Invigilator."""
|
53
|
+
self.agent = agent
|
54
|
+
self.question = question
|
55
|
+
self.scenario = scenario
|
56
|
+
self.model = model
|
57
|
+
self.memory_plan = memory_plan
|
58
|
+
self.current_answers = current_answers or {}
|
59
|
+
self.iteration = iteration
|
60
|
+
self.additional_prompt_data = additional_prompt_data
|
61
|
+
self.cache = cache
|
62
|
+
self.survey = survey
|
63
|
+
self.raise_validation_errors = raise_validation_errors
|
64
|
+
self.key_lookup = key_lookup
|
65
|
+
|
66
|
+
if prompt_plan is None:
|
67
|
+
self.prompt_plan = PromptPlan()
|
68
|
+
else:
|
69
|
+
self.prompt_plan = prompt_plan
|
70
|
+
|
71
|
+
# placeholder to store the raw model response
|
72
|
+
self.raw_model_response = None
|
73
|
+
|
74
|
+
@property
|
75
|
+
def prompt_constructor(self) -> PromptConstructor:
|
76
|
+
"""Return the prompt constructor."""
|
77
|
+
return PromptConstructor(self, prompt_plan=self.prompt_plan)
|
78
|
+
|
79
|
+
def to_dict(self, include_cache=False) -> Dict[str, Any]:
|
80
|
+
attributes = [
|
81
|
+
"agent",
|
82
|
+
"question",
|
83
|
+
"scenario",
|
84
|
+
"model",
|
85
|
+
"memory_plan",
|
86
|
+
"current_answers",
|
87
|
+
"iteration",
|
88
|
+
"additional_prompt_data",
|
89
|
+
"survey",
|
90
|
+
]
|
91
|
+
if include_cache:
|
92
|
+
attributes.append("cache")
|
93
|
+
|
94
|
+
def serialize_attribute(attr):
|
95
|
+
value = getattr(self, attr)
|
96
|
+
if value is None:
|
97
|
+
return None
|
98
|
+
if hasattr(value, "to_dict"):
|
99
|
+
return value.to_dict()
|
100
|
+
if isinstance(value, (int, float, str, bool, dict, list)):
|
101
|
+
return value
|
102
|
+
return str(value)
|
103
|
+
|
104
|
+
return {attr: serialize_attribute(attr) for attr in attributes}
|
105
|
+
|
106
|
+
@classmethod
|
107
|
+
def from_dict(cls, data) -> "InvigilatorBase":
|
108
|
+
from edsl.agents.Agent import Agent
|
109
|
+
from edsl.questions import QuestionBase
|
110
|
+
from edsl.scenarios.Scenario import Scenario
|
111
|
+
from edsl.surveys.MemoryPlan import MemoryPlan
|
112
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
113
|
+
from edsl.surveys.Survey import Survey
|
114
|
+
from edsl.data.Cache import Cache
|
115
|
+
|
116
|
+
attributes_to_classes = {
|
117
|
+
"agent": Agent,
|
118
|
+
"question": QuestionBase,
|
119
|
+
"scenario": Scenario,
|
120
|
+
"model": LanguageModel,
|
121
|
+
"memory_plan": MemoryPlan,
|
122
|
+
"survey": Survey,
|
123
|
+
"cache": Cache,
|
124
|
+
}
|
125
|
+
d = {}
|
126
|
+
for attr, cls_ in attributes_to_classes.items():
|
127
|
+
if attr in data and data[attr] is not None:
|
128
|
+
if attr not in data:
|
129
|
+
d[attr] = {}
|
130
|
+
else:
|
131
|
+
d[attr] = cls_.from_dict(data[attr])
|
132
|
+
|
133
|
+
d["current_answers"] = data["current_answers"]
|
134
|
+
d["iteration"] = data["iteration"]
|
135
|
+
d["additional_prompt_data"] = data["additional_prompt_data"]
|
136
|
+
|
137
|
+
d = cls(**d)
|
138
|
+
|
139
|
+
def __repr__(self) -> str:
|
140
|
+
"""Return a string representation of the Invigilator.
|
141
|
+
|
142
|
+
>>> InvigilatorBase.example().__repr__()
|
143
|
+
'InvigilatorExample(...)'
|
144
|
+
|
145
|
+
"""
|
146
|
+
return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scneario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration{repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)})"
|
147
|
+
|
148
|
+
def get_failed_task_result(self, failure_reason: str) -> EDSLResultObjectInput:
|
149
|
+
"""Return an AgentResponseDict used in case the question-asking fails.
|
150
|
+
|
151
|
+
Possible reasons include:
|
152
|
+
- Legimately skipped because of skip logic
|
153
|
+
- Failed to get response from the model
|
154
|
+
|
155
|
+
"""
|
156
|
+
data = {
|
157
|
+
"answer": None,
|
158
|
+
"generated_tokens": None,
|
159
|
+
"comment": failure_reason,
|
160
|
+
"question_name": self.question.question_name,
|
161
|
+
"prompts": self.get_prompts(),
|
162
|
+
"cached_response": None,
|
163
|
+
"raw_model_response": None,
|
164
|
+
"cache_used": None,
|
165
|
+
"cache_key": None,
|
166
|
+
}
|
167
|
+
return EDSLResultObjectInput(**data)
|
168
|
+
|
169
|
+
def get_prompts(self) -> Dict[str, "Prompt"]:
|
170
|
+
"""Return the prompt used."""
|
171
|
+
from edsl.prompts.Prompt import Prompt
|
172
|
+
|
173
|
+
return {
|
174
|
+
"user_prompt": Prompt("NA"),
|
175
|
+
"system_prompt": Prompt("NA"),
|
176
|
+
}
|
177
|
+
|
178
|
+
@abstractmethod
|
179
|
+
async def async_answer_question(self):
|
180
|
+
"""Asnwer a question."""
|
181
|
+
pass
|
182
|
+
|
183
|
+
@jupyter_nb_handler
|
184
|
+
def answer_question(self) -> Coroutine:
|
185
|
+
"""Return a function that gets the answers to the question."""
|
186
|
+
|
187
|
+
async def main():
|
188
|
+
"""Return the answer to the question."""
|
189
|
+
results = await asyncio.gather(self.async_answer_question())
|
190
|
+
return results[0] # Since there's only one task, return its result
|
191
|
+
|
192
|
+
return main()
|
193
|
+
|
194
|
+
@classmethod
|
195
|
+
def example(
|
196
|
+
cls, throw_an_exception=False, question=None, scenario=None, survey=None
|
197
|
+
) -> "InvigilatorBase":
|
198
|
+
"""Return an example invigilator.
|
199
|
+
|
200
|
+
>>> InvigilatorBase.example()
|
201
|
+
InvigilatorExample(...)
|
202
|
+
|
203
|
+
>>> InvigilatorBase.example().answer_question()
|
204
|
+
{'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
|
205
|
+
|
206
|
+
>>> InvigilatorBase.example(throw_an_exception=True).answer_question()
|
207
|
+
Traceback (most recent call last):
|
208
|
+
...
|
209
|
+
Exception: This is a test error
|
210
|
+
"""
|
211
|
+
from edsl.agents.Agent import Agent
|
212
|
+
from edsl.scenarios.Scenario import Scenario
|
213
|
+
from edsl.surveys.MemoryPlan import MemoryPlan
|
214
|
+
from edsl.language_models.model import Model
|
215
|
+
from edsl.surveys.Survey import Survey
|
216
|
+
|
217
|
+
model = Model("test", canned_response="SPAM!")
|
218
|
+
|
219
|
+
if throw_an_exception:
|
220
|
+
model.throw_exception = True
|
221
|
+
agent = Agent.example()
|
222
|
+
|
223
|
+
if not survey:
|
224
|
+
survey = Survey.example()
|
225
|
+
|
226
|
+
if question not in survey.questions and question is not None:
|
227
|
+
survey.add_question(question)
|
228
|
+
|
229
|
+
question = question or survey.questions[0]
|
230
|
+
scenario = scenario or Scenario.example()
|
231
|
+
memory_plan = MemoryPlan(survey=survey)
|
232
|
+
current_answers = None
|
233
|
+
|
234
|
+
class InvigilatorExample(cls):
|
235
|
+
"""An example invigilator."""
|
236
|
+
|
237
|
+
async def async_answer_question(self):
|
238
|
+
"""Answer a question."""
|
239
|
+
return await self.model.async_execute_model_call(
|
240
|
+
user_prompt="Hello", system_prompt="Hi"
|
241
|
+
)
|
242
|
+
|
243
|
+
return InvigilatorExample(
|
244
|
+
agent=agent,
|
245
|
+
question=question,
|
246
|
+
scenario=scenario,
|
247
|
+
survey=survey,
|
248
|
+
model=model,
|
249
|
+
memory_plan=memory_plan,
|
250
|
+
current_answers=current_answers,
|
251
|
+
)
|
252
|
+
|
253
|
+
|
254
|
+
if __name__ == "__main__":
|
255
|
+
import doctest
|
256
|
+
|
257
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
@@ -0,0 +1,272 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING
|
3
|
+
from functools import cached_property
|
4
|
+
|
5
|
+
from edsl.prompts.Prompt import Prompt
|
6
|
+
|
7
|
+
from dataclasses import dataclass
|
8
|
+
|
9
|
+
from .prompt_helpers import PromptPlan
|
10
|
+
from .QuestionTemplateReplacementsBuilder import (
|
11
|
+
QuestionTemplateReplacementsBuilder,
|
12
|
+
)
|
13
|
+
from .question_option_processor import QuestionOptionProcessor
|
14
|
+
|
15
|
+
if TYPE_CHECKING:
|
16
|
+
from edsl.agents.InvigilatorBase import InvigilatorBase
|
17
|
+
from edsl.questions.QuestionBase import QuestionBase
|
18
|
+
from edsl.agents.Agent import Agent
|
19
|
+
from edsl.surveys.Survey import Survey
|
20
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
21
|
+
from edsl.surveys.MemoryPlan import MemoryPlan
|
22
|
+
from edsl.questions.QuestionBase import QuestionBase
|
23
|
+
from edsl.scenarios.Scenario import Scenario
|
24
|
+
|
25
|
+
|
26
|
+
class BasePlaceholder:
|
27
|
+
"""Base class for placeholder values when a question is not yet answered."""
|
28
|
+
|
29
|
+
def __init__(self, placeholder_type: str = "answer"):
|
30
|
+
self.value = "N/A"
|
31
|
+
self.comment = "Will be populated by prior answer"
|
32
|
+
self._type = placeholder_type
|
33
|
+
|
34
|
+
def __getitem__(self, index):
|
35
|
+
return ""
|
36
|
+
|
37
|
+
def __str__(self):
|
38
|
+
return f"<<{self.__class__.__name__}:{self._type}>>"
|
39
|
+
|
40
|
+
def __repr__(self):
|
41
|
+
return self.__str__()
|
42
|
+
|
43
|
+
|
44
|
+
class PlaceholderAnswer(BasePlaceholder):
|
45
|
+
def __init__(self):
|
46
|
+
super().__init__("answer")
|
47
|
+
|
48
|
+
|
49
|
+
class PlaceholderComment(BasePlaceholder):
|
50
|
+
def __init__(self):
|
51
|
+
super().__init__("comment")
|
52
|
+
|
53
|
+
|
54
|
+
class PlaceholderGeneratedTokens(BasePlaceholder):
|
55
|
+
def __init__(self):
|
56
|
+
super().__init__("generated_tokens")
|
57
|
+
|
58
|
+
|
59
|
+
class PromptConstructor:
|
60
|
+
"""
|
61
|
+
This class constructs the prompts for the language model.
|
62
|
+
|
63
|
+
The pieces of a prompt are:
|
64
|
+
- The agent instructions - "You are answering questions as if you were a human. Do not break character."
|
65
|
+
- The persona prompt - "You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}"
|
66
|
+
- The question instructions - "You are being asked the following question: Do you like school? The options are 0: yes 1: no Return a valid JSON formatted like this, selecting only the number of the option: {"answer": <put answer code here>, "comment": "<put explanation here>"} Only 1 option may be selected."
|
67
|
+
- The memory prompt - "Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer"
|
68
|
+
"""
|
69
|
+
|
70
|
+
def __init__(
|
71
|
+
self, invigilator: "InvigilatorBase", prompt_plan: Optional["PromptPlan"] = None
|
72
|
+
):
|
73
|
+
self.invigilator = invigilator
|
74
|
+
self.prompt_plan = prompt_plan or PromptPlan()
|
75
|
+
|
76
|
+
self.agent = invigilator.agent
|
77
|
+
self.question = invigilator.question
|
78
|
+
self.scenario = invigilator.scenario
|
79
|
+
self.survey = invigilator.survey
|
80
|
+
self.model = invigilator.model
|
81
|
+
self.current_answers = invigilator.current_answers
|
82
|
+
self.memory_plan = invigilator.memory_plan
|
83
|
+
|
84
|
+
def get_question_options(self, question_data):
|
85
|
+
"""Get the question options."""
|
86
|
+
return QuestionOptionProcessor(self).get_question_options(question_data)
|
87
|
+
|
88
|
+
@cached_property
|
89
|
+
def agent_instructions_prompt(self) -> Prompt:
|
90
|
+
"""
|
91
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
92
|
+
>>> i = InvigilatorBase.example()
|
93
|
+
>>> i.prompt_constructor.agent_instructions_prompt
|
94
|
+
Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
|
95
|
+
"""
|
96
|
+
from edsl.agents.Agent import Agent
|
97
|
+
|
98
|
+
if self.agent == Agent(): # if agent is empty, then return an empty prompt
|
99
|
+
return Prompt(text="")
|
100
|
+
|
101
|
+
return Prompt(text=self.agent.instruction)
|
102
|
+
|
103
|
+
@cached_property
|
104
|
+
def agent_persona_prompt(self) -> Prompt:
|
105
|
+
"""
|
106
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
107
|
+
>>> i = InvigilatorBase.example()
|
108
|
+
>>> i.prompt_constructor.agent_persona_prompt
|
109
|
+
Prompt(text=\"""Your traits: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
|
110
|
+
"""
|
111
|
+
from edsl.agents.Agent import Agent
|
112
|
+
|
113
|
+
if self.agent == Agent(): # if agent is empty, then return an empty prompt
|
114
|
+
return Prompt(text="")
|
115
|
+
|
116
|
+
return self.agent.prompt()
|
117
|
+
|
118
|
+
def prior_answers_dict(self) -> dict[str, "QuestionBase"]:
|
119
|
+
"""This is a dictionary of prior answers, if they exist."""
|
120
|
+
return self._add_answers(
|
121
|
+
self.survey.question_names_to_questions(), self.current_answers
|
122
|
+
)
|
123
|
+
|
124
|
+
@staticmethod
|
125
|
+
def _extract_quetion_and_entry_type(key_entry) -> tuple[str, str]:
|
126
|
+
"""
|
127
|
+
Extracts the question name and type for the current answer dictionary key entry.
|
128
|
+
|
129
|
+
>>> PromptConstructor._extract_quetion_and_entry_type("q0")
|
130
|
+
('q0', 'answer')
|
131
|
+
>>> PromptConstructor._extract_quetion_and_entry_type("q0_comment")
|
132
|
+
('q0', 'comment')
|
133
|
+
>>> PromptConstructor._extract_quetion_and_entry_type("q0_alternate_generated_tokens")
|
134
|
+
('q0_alternate', 'generated_tokens')
|
135
|
+
>>> PromptConstructor._extract_quetion_and_entry_type("q0_alt_comment")
|
136
|
+
('q0_alt', 'comment')
|
137
|
+
"""
|
138
|
+
split_list = key_entry.rsplit("_", maxsplit=1)
|
139
|
+
if len(split_list) == 1:
|
140
|
+
question_name = split_list[0]
|
141
|
+
entry_type = "answer"
|
142
|
+
else:
|
143
|
+
if split_list[1] == "comment":
|
144
|
+
question_name = split_list[0]
|
145
|
+
entry_type = "comment"
|
146
|
+
elif split_list[1] == "tokens": # it's actually 'generated_tokens'
|
147
|
+
question_name = key_entry.replace("_generated_tokens", "")
|
148
|
+
entry_type = "generated_tokens"
|
149
|
+
else:
|
150
|
+
question_name = key_entry
|
151
|
+
entry_type = "answer"
|
152
|
+
return question_name, entry_type
|
153
|
+
|
154
|
+
@staticmethod
|
155
|
+
def _augmented_answers_dict(current_answers: dict) -> dict:
|
156
|
+
"""
|
157
|
+
>>> PromptConstructor._augmented_answers_dict({"q0": "LOVE IT!", "q0_comment": "I love school!"})
|
158
|
+
{'q0': {'answer': 'LOVE IT!', 'comment': 'I love school!'}}
|
159
|
+
"""
|
160
|
+
from collections import defaultdict
|
161
|
+
|
162
|
+
d = defaultdict(dict)
|
163
|
+
for key, value in current_answers.items():
|
164
|
+
question_name, entry_type = (
|
165
|
+
PromptConstructor._extract_quetion_and_entry_type(key)
|
166
|
+
)
|
167
|
+
d[question_name][entry_type] = value
|
168
|
+
return dict(d)
|
169
|
+
|
170
|
+
@staticmethod
|
171
|
+
def _add_answers(
|
172
|
+
answer_dict: dict, current_answers: dict
|
173
|
+
) -> dict[str, "QuestionBase"]:
|
174
|
+
"""
|
175
|
+
>>> from edsl import QuestionFreeText
|
176
|
+
>>> d = {"q0": QuestionFreeText(question_text="Do you like school?", question_name = "q0")}
|
177
|
+
>>> current_answers = {"q0": "LOVE IT!"}
|
178
|
+
>>> PromptConstructor._add_answers(d, current_answers)['q0'].answer
|
179
|
+
'LOVE IT!'
|
180
|
+
"""
|
181
|
+
augmented_answers = PromptConstructor._augmented_answers_dict(current_answers)
|
182
|
+
|
183
|
+
for question in answer_dict:
|
184
|
+
if question in augmented_answers:
|
185
|
+
for entry_type, value in augmented_answers[question].items():
|
186
|
+
setattr(answer_dict[question], entry_type, value)
|
187
|
+
else:
|
188
|
+
answer_dict[question].answer = PlaceholderAnswer()
|
189
|
+
answer_dict[question].comment = PlaceholderComment()
|
190
|
+
answer_dict[question].generated_tokens = PlaceholderGeneratedTokens()
|
191
|
+
return answer_dict
|
192
|
+
|
193
|
+
@cached_property
|
194
|
+
def question_file_keys(self) -> list:
|
195
|
+
"""Extracts the file keys from the question text.
|
196
|
+
It checks if the variables in the question text are in the scenario file keys.
|
197
|
+
"""
|
198
|
+
return QuestionTemplateReplacementsBuilder(self).question_file_keys()
|
199
|
+
|
200
|
+
@cached_property
|
201
|
+
def question_instructions_prompt(self) -> Prompt:
|
202
|
+
"""
|
203
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
204
|
+
>>> i = InvigilatorBase.example()
|
205
|
+
>>> i.prompt_constructor.question_instructions_prompt
|
206
|
+
Prompt(text=\"""...
|
207
|
+
...
|
208
|
+
"""
|
209
|
+
return self.build_question_instructions_prompt()
|
210
|
+
|
211
|
+
def build_question_instructions_prompt(self) -> Prompt:
|
212
|
+
"""Buils the question instructions prompt."""
|
213
|
+
from edsl.agents.QuestionInstructionPromptBuilder import (
|
214
|
+
QuestionInstructionPromptBuilder,
|
215
|
+
)
|
216
|
+
|
217
|
+
return QuestionInstructionPromptBuilder(self).build()
|
218
|
+
|
219
|
+
@cached_property
|
220
|
+
def prior_question_memory_prompt(self) -> Prompt:
|
221
|
+
memory_prompt = Prompt(text="")
|
222
|
+
if self.memory_plan is not None:
|
223
|
+
memory_prompt += self.create_memory_prompt(
|
224
|
+
self.question.question_name
|
225
|
+
).render(self.scenario | self.prior_answers_dict())
|
226
|
+
return memory_prompt
|
227
|
+
|
228
|
+
def create_memory_prompt(self, question_name: str) -> Prompt:
|
229
|
+
"""Create a memory for the agent.
|
230
|
+
|
231
|
+
The returns a memory prompt for the agent.
|
232
|
+
|
233
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
234
|
+
>>> i = InvigilatorBase.example()
|
235
|
+
>>> i.current_answers = {"q0": "Prior answer"}
|
236
|
+
>>> i.memory_plan.add_single_memory("q1", "q0")
|
237
|
+
>>> p = i.prompt_constructor.create_memory_prompt("q1")
|
238
|
+
>>> p.text.strip().replace("\\n", " ").replace("\\t", " ")
|
239
|
+
'Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer'
|
240
|
+
"""
|
241
|
+
return self.memory_plan.get_memory_prompt_fragment(
|
242
|
+
question_name, self.current_answers
|
243
|
+
)
|
244
|
+
|
245
|
+
def get_prompts(self) -> Dict[str, Prompt]:
|
246
|
+
"""Get both prompts for the LLM call.
|
247
|
+
|
248
|
+
>>> from edsl import QuestionFreeText
|
249
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
250
|
+
>>> q = QuestionFreeText(question_text="How are you today?", question_name="q_new")
|
251
|
+
>>> i = InvigilatorBase.example(question = q)
|
252
|
+
>>> i.get_prompts()
|
253
|
+
{'user_prompt': ..., 'system_prompt': ...}
|
254
|
+
"""
|
255
|
+
prompts = self.prompt_plan.get_prompts(
|
256
|
+
agent_instructions=self.agent_instructions_prompt,
|
257
|
+
agent_persona=self.agent_persona_prompt,
|
258
|
+
question_instructions=Prompt(self.question_instructions_prompt),
|
259
|
+
prior_question_memory=self.prior_question_memory_prompt,
|
260
|
+
)
|
261
|
+
if self.question_file_keys:
|
262
|
+
files_list = []
|
263
|
+
for key in self.question_file_keys:
|
264
|
+
files_list.append(self.scenario[key])
|
265
|
+
prompts["files_list"] = files_list
|
266
|
+
return prompts
|
267
|
+
|
268
|
+
|
269
|
+
if __name__ == "__main__":
|
270
|
+
import doctest
|
271
|
+
|
272
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
@@ -0,0 +1,128 @@
|
|
1
|
+
from typing import Dict, List, Set
|
2
|
+
from warnings import warn
|
3
|
+
from edsl.prompts.Prompt import Prompt
|
4
|
+
|
5
|
+
from edsl.agents.QuestionTemplateReplacementsBuilder import (
|
6
|
+
QuestionTemplateReplacementsBuilder as QTRB,
|
7
|
+
)
|
8
|
+
|
9
|
+
|
10
|
+
class QuestionInstructionPromptBuilder:
|
11
|
+
"""Handles the construction and rendering of question instructions."""
|
12
|
+
|
13
|
+
def __init__(self, prompt_constructor: "PromptConstructor"):
|
14
|
+
self.prompt_constructor = prompt_constructor
|
15
|
+
|
16
|
+
self.model = self.prompt_constructor.model
|
17
|
+
self.survey = self.prompt_constructor.survey
|
18
|
+
self.question = self.prompt_constructor.question
|
19
|
+
|
20
|
+
def build(self) -> Prompt:
|
21
|
+
"""Builds the complete question instructions prompt with all necessary components.
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
Prompt: The fully rendered question instructions
|
25
|
+
"""
|
26
|
+
base_prompt = self._create_base_prompt()
|
27
|
+
enriched_prompt = self._enrich_with_question_options(base_prompt)
|
28
|
+
rendered_prompt = self._render_prompt(enriched_prompt)
|
29
|
+
self._validate_template_variables(rendered_prompt)
|
30
|
+
|
31
|
+
return self._append_survey_instructions(rendered_prompt)
|
32
|
+
|
33
|
+
def _create_base_prompt(self) -> Dict:
|
34
|
+
"""Creates the initial prompt with basic question data.
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
Dict: Base question data
|
38
|
+
"""
|
39
|
+
return {
|
40
|
+
"prompt": Prompt(self.question.get_instructions(model=self.model.model)),
|
41
|
+
"data": self.question.data.copy(),
|
42
|
+
}
|
43
|
+
|
44
|
+
def _enrich_with_question_options(self, prompt_data: Dict) -> Dict:
|
45
|
+
"""Enriches the prompt data with question options if they exist.
|
46
|
+
|
47
|
+
Args:
|
48
|
+
prompt_data: Dictionary containing prompt and question data
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
Dict: Enriched prompt data
|
52
|
+
"""
|
53
|
+
if "question_options" in prompt_data["data"]:
|
54
|
+
from edsl.agents.question_option_processor import QuestionOptionProcessor
|
55
|
+
|
56
|
+
question_options = QuestionOptionProcessor(
|
57
|
+
self.prompt_constructor
|
58
|
+
).get_question_options(question_data=prompt_data["data"])
|
59
|
+
|
60
|
+
prompt_data["data"]["question_options"] = question_options
|
61
|
+
return prompt_data
|
62
|
+
|
63
|
+
def _render_prompt(self, prompt_data: Dict) -> Prompt:
|
64
|
+
"""Renders the prompt using the replacement dictionary.
|
65
|
+
|
66
|
+
Args:
|
67
|
+
prompt_data: Dictionary containing prompt and question data
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
Prompt: Rendered instructions
|
71
|
+
"""
|
72
|
+
|
73
|
+
replacement_dict = QTRB(self.prompt_constructor).build_replacement_dict(
|
74
|
+
prompt_data["data"]
|
75
|
+
)
|
76
|
+
return prompt_data["prompt"].render(replacement_dict)
|
77
|
+
|
78
|
+
def _validate_template_variables(self, rendered_prompt: Prompt) -> None:
|
79
|
+
"""Validates that all template variables have been properly replaced.
|
80
|
+
|
81
|
+
Args:
|
82
|
+
rendered_prompt: The rendered prompt to validate
|
83
|
+
|
84
|
+
Warns:
|
85
|
+
If any template variables remain undefined
|
86
|
+
"""
|
87
|
+
undefined_vars = rendered_prompt.undefined_template_variables({})
|
88
|
+
|
89
|
+
# Check for question names in undefined variables
|
90
|
+
self._check_question_names_in_undefined_vars(undefined_vars)
|
91
|
+
|
92
|
+
# Warn about any remaining undefined variables
|
93
|
+
if undefined_vars:
|
94
|
+
warn(f"Question instructions still has variables: {undefined_vars}.")
|
95
|
+
|
96
|
+
def _check_question_names_in_undefined_vars(self, undefined_vars: Set[str]) -> None:
|
97
|
+
"""Checks if any undefined variables match question names in the survey.
|
98
|
+
|
99
|
+
Args:
|
100
|
+
undefined_vars: Set of undefined template variables
|
101
|
+
"""
|
102
|
+
for question_name in self.survey.question_names:
|
103
|
+
if question_name in undefined_vars:
|
104
|
+
print(
|
105
|
+
f"Question name found in undefined_template_variables: {question_name}"
|
106
|
+
)
|
107
|
+
|
108
|
+
def _append_survey_instructions(self, rendered_prompt: Prompt) -> Prompt:
|
109
|
+
"""Appends any relevant survey instructions to the rendered prompt.
|
110
|
+
|
111
|
+
Args:
|
112
|
+
rendered_prompt: The rendered prompt to append instructions to
|
113
|
+
|
114
|
+
Returns:
|
115
|
+
Prompt: Final prompt with survey instructions
|
116
|
+
"""
|
117
|
+
relevant_instructions = self.survey._relevant_instructions(
|
118
|
+
self.question.question_name
|
119
|
+
)
|
120
|
+
|
121
|
+
if not relevant_instructions:
|
122
|
+
return rendered_prompt
|
123
|
+
|
124
|
+
preamble = Prompt(text="")
|
125
|
+
for instruction in relevant_instructions:
|
126
|
+
preamble += instruction.text
|
127
|
+
|
128
|
+
return preamble + rendered_prompt
|