edsl 0.1.46__py3-none-any.whl → 0.1.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/__init__.py +44 -39
- edsl/__version__.py +1 -1
- edsl/agents/__init__.py +4 -2
- edsl/agents/{Agent.py → agent.py} +442 -152
- edsl/agents/{AgentList.py → agent_list.py} +220 -162
- edsl/agents/descriptors.py +46 -7
- edsl/{exceptions/agents.py → agents/exceptions.py} +3 -12
- edsl/base/__init__.py +75 -0
- edsl/base/base_class.py +1303 -0
- edsl/base/data_transfer_models.py +114 -0
- edsl/base/enums.py +215 -0
- edsl/base.py +8 -0
- edsl/buckets/__init__.py +25 -0
- edsl/buckets/bucket_collection.py +324 -0
- edsl/buckets/model_buckets.py +206 -0
- edsl/buckets/token_bucket.py +502 -0
- edsl/{jobs/buckets/TokenBucketAPI.py → buckets/token_bucket_api.py} +1 -1
- edsl/buckets/token_bucket_client.py +509 -0
- edsl/caching/__init__.py +20 -0
- edsl/caching/cache.py +814 -0
- edsl/caching/cache_entry.py +427 -0
- edsl/{data/CacheHandler.py → caching/cache_handler.py} +14 -15
- edsl/caching/exceptions.py +24 -0
- edsl/caching/orm.py +30 -0
- edsl/{data/RemoteCacheSync.py → caching/remote_cache_sync.py} +3 -3
- edsl/caching/sql_dict.py +441 -0
- edsl/config/__init__.py +8 -0
- edsl/config/config_class.py +177 -0
- edsl/config.py +4 -176
- edsl/conversation/Conversation.py +7 -7
- edsl/conversation/car_buying.py +4 -4
- edsl/conversation/chips.py +6 -6
- edsl/coop/__init__.py +25 -2
- edsl/coop/coop.py +430 -113
- edsl/coop/{ExpectedParrotKeyHandler.py → ep_key_handling.py} +86 -10
- edsl/coop/exceptions.py +62 -0
- edsl/coop/price_fetcher.py +126 -0
- edsl/coop/utils.py +89 -24
- edsl/data_transfer_models.py +5 -72
- edsl/dataset/__init__.py +10 -0
- edsl/{results/Dataset.py → dataset/dataset.py} +116 -36
- edsl/dataset/dataset_operations_mixin.py +1492 -0
- edsl/{results/DatasetTree.py → dataset/dataset_tree.py} +156 -75
- edsl/{results/TableDisplay.py → dataset/display/table_display.py} +18 -7
- edsl/{results → dataset/display}/table_renderers.py +58 -2
- edsl/{results → dataset}/file_exports.py +4 -5
- edsl/{results → dataset}/smart_objects.py +2 -2
- edsl/enums.py +5 -205
- edsl/inference_services/__init__.py +5 -0
- edsl/inference_services/{AvailableModelCacheHandler.py → available_model_cache_handler.py} +2 -3
- edsl/inference_services/{AvailableModelFetcher.py → available_model_fetcher.py} +8 -14
- edsl/inference_services/data_structures.py +3 -2
- edsl/{exceptions/inference_services.py → inference_services/exceptions.py} +1 -1
- edsl/inference_services/{InferenceServiceABC.py → inference_service_abc.py} +1 -1
- edsl/inference_services/{InferenceServicesCollection.py → inference_services_collection.py} +8 -7
- edsl/inference_services/registry.py +4 -41
- edsl/inference_services/{ServiceAvailability.py → service_availability.py} +5 -25
- edsl/inference_services/services/__init__.py +31 -0
- edsl/inference_services/{AnthropicService.py → services/anthropic_service.py} +3 -3
- edsl/inference_services/{AwsBedrock.py → services/aws_bedrock.py} +2 -2
- edsl/inference_services/{AzureAI.py → services/azure_ai.py} +2 -2
- edsl/inference_services/{DeepInfraService.py → services/deep_infra_service.py} +1 -3
- edsl/inference_services/{DeepSeekService.py → services/deep_seek_service.py} +2 -4
- edsl/inference_services/{GoogleService.py → services/google_service.py} +5 -4
- edsl/inference_services/{GroqService.py → services/groq_service.py} +1 -1
- edsl/inference_services/{MistralAIService.py → services/mistral_ai_service.py} +3 -3
- edsl/inference_services/{OllamaService.py → services/ollama_service.py} +1 -7
- edsl/inference_services/{OpenAIService.py → services/open_ai_service.py} +5 -6
- edsl/inference_services/{PerplexityService.py → services/perplexity_service.py} +12 -12
- edsl/inference_services/{TestService.py → services/test_service.py} +7 -6
- edsl/inference_services/{TogetherAIService.py → services/together_ai_service.py} +2 -6
- edsl/inference_services/{XAIService.py → services/xai_service.py} +1 -1
- edsl/inference_services/write_available.py +1 -2
- edsl/instructions/__init__.py +6 -0
- edsl/{surveys/instructions/Instruction.py → instructions/instruction.py} +11 -6
- edsl/{surveys/instructions/InstructionCollection.py → instructions/instruction_collection.py} +10 -5
- edsl/{surveys/InstructionHandler.py → instructions/instruction_handler.py} +3 -3
- edsl/{jobs/interviews → interviews}/ReportErrors.py +2 -2
- edsl/interviews/__init__.py +4 -0
- edsl/{jobs/AnswerQuestionFunctionConstructor.py → interviews/answering_function.py} +45 -18
- edsl/{jobs/interviews/InterviewExceptionEntry.py → interviews/exception_tracking.py} +107 -22
- edsl/interviews/interview.py +638 -0
- edsl/{jobs/interviews/InterviewStatusDictionary.py → interviews/interview_status_dictionary.py} +21 -12
- edsl/{jobs/interviews/InterviewStatusLog.py → interviews/interview_status_log.py} +16 -7
- edsl/{jobs/InterviewTaskManager.py → interviews/interview_task_manager.py} +12 -7
- edsl/{jobs/RequestTokenEstimator.py → interviews/request_token_estimator.py} +8 -3
- edsl/{jobs/interviews/InterviewStatistic.py → interviews/statistics.py} +36 -10
- edsl/invigilators/__init__.py +38 -0
- edsl/invigilators/invigilator_base.py +477 -0
- edsl/{agents/Invigilator.py → invigilators/invigilators.py} +263 -10
- edsl/invigilators/prompt_constructor.py +476 -0
- edsl/{agents → invigilators}/prompt_helpers.py +2 -1
- edsl/{agents/QuestionInstructionPromptBuilder.py → invigilators/question_instructions_prompt_builder.py} +18 -13
- edsl/{agents → invigilators}/question_option_processor.py +96 -21
- edsl/{agents/QuestionTemplateReplacementsBuilder.py → invigilators/question_template_replacements_builder.py} +64 -12
- edsl/jobs/__init__.py +7 -1
- edsl/jobs/async_interview_runner.py +99 -35
- edsl/jobs/check_survey_scenario_compatibility.py +7 -5
- edsl/jobs/data_structures.py +153 -22
- edsl/{exceptions/jobs.py → jobs/exceptions.py} +2 -1
- edsl/jobs/{FetchInvigilator.py → fetch_invigilator.py} +4 -4
- edsl/jobs/{loggers/HTMLTableJobLogger.py → html_table_job_logger.py} +6 -2
- edsl/jobs/{Jobs.py → jobs.py} +321 -155
- edsl/jobs/{JobsChecks.py → jobs_checks.py} +15 -7
- edsl/jobs/{JobsComponentConstructor.py → jobs_component_constructor.py} +20 -17
- edsl/jobs/{InterviewsConstructor.py → jobs_interview_constructor.py} +10 -5
- edsl/jobs/jobs_pricing_estimation.py +347 -0
- edsl/jobs/{JobsRemoteInferenceLogger.py → jobs_remote_inference_logger.py} +4 -3
- edsl/jobs/jobs_runner_asyncio.py +282 -0
- edsl/jobs/{JobsRemoteInferenceHandler.py → remote_inference.py} +19 -22
- edsl/jobs/results_exceptions_handler.py +2 -2
- edsl/key_management/__init__.py +28 -0
- edsl/key_management/key_lookup.py +161 -0
- edsl/{language_models/key_management/KeyLookupBuilder.py → key_management/key_lookup_builder.py} +118 -47
- edsl/key_management/key_lookup_collection.py +82 -0
- edsl/key_management/models.py +218 -0
- edsl/language_models/__init__.py +7 -2
- edsl/language_models/{ComputeCost.py → compute_cost.py} +18 -3
- edsl/{exceptions/language_models.py → language_models/exceptions.py} +2 -1
- edsl/language_models/language_model.py +1080 -0
- edsl/language_models/model.py +10 -25
- edsl/language_models/{ModelList.py → model_list.py} +9 -14
- edsl/language_models/{RawResponseHandler.py → raw_response_handler.py} +1 -1
- edsl/language_models/{RegisterLanguageModelsMeta.py → registry.py} +1 -1
- edsl/language_models/repair.py +4 -4
- edsl/language_models/utilities.py +4 -4
- edsl/notebooks/__init__.py +3 -1
- edsl/notebooks/{Notebook.py → notebook.py} +7 -8
- edsl/prompts/__init__.py +1 -1
- edsl/{exceptions/prompts.py → prompts/exceptions.py} +3 -1
- edsl/prompts/{Prompt.py → prompt.py} +101 -95
- edsl/questions/HTMLQuestion.py +1 -1
- edsl/questions/__init__.py +154 -25
- edsl/questions/answer_validator_mixin.py +1 -1
- edsl/questions/compose_questions.py +4 -3
- edsl/questions/derived/question_likert_five.py +166 -0
- edsl/questions/derived/{QuestionLinearScale.py → question_linear_scale.py} +4 -4
- edsl/questions/derived/{QuestionTopK.py → question_top_k.py} +4 -4
- edsl/questions/derived/{QuestionYesNo.py → question_yes_no.py} +4 -5
- edsl/questions/descriptors.py +24 -30
- edsl/questions/loop_processor.py +65 -19
- edsl/questions/question_base.py +881 -0
- edsl/questions/question_base_gen_mixin.py +15 -16
- edsl/questions/{QuestionBasePromptsMixin.py → question_base_prompts_mixin.py} +2 -2
- edsl/questions/{QuestionBudget.py → question_budget.py} +3 -4
- edsl/questions/{QuestionCheckBox.py → question_check_box.py} +16 -16
- edsl/questions/{QuestionDict.py → question_dict.py} +39 -5
- edsl/questions/{QuestionExtract.py → question_extract.py} +9 -9
- edsl/questions/question_free_text.py +282 -0
- edsl/questions/{QuestionFunctional.py → question_functional.py} +6 -5
- edsl/questions/{QuestionList.py → question_list.py} +6 -7
- edsl/questions/{QuestionMatrix.py → question_matrix.py} +6 -5
- edsl/questions/{QuestionMultipleChoice.py → question_multiple_choice.py} +126 -21
- edsl/questions/{QuestionNumerical.py → question_numerical.py} +5 -5
- edsl/questions/{QuestionRank.py → question_rank.py} +6 -6
- edsl/questions/question_registry.py +10 -16
- edsl/questions/register_questions_meta.py +8 -4
- edsl/questions/response_validator_abc.py +17 -16
- edsl/results/__init__.py +4 -1
- edsl/{exceptions/results.py → results/exceptions.py} +1 -1
- edsl/results/report.py +197 -0
- edsl/results/{Result.py → result.py} +131 -45
- edsl/results/{Results.py → results.py} +420 -216
- edsl/results/results_selector.py +344 -25
- edsl/scenarios/__init__.py +30 -3
- edsl/scenarios/{ConstructDownloadLink.py → construct_download_link.py} +7 -0
- edsl/scenarios/directory_scanner.py +156 -13
- edsl/scenarios/document_chunker.py +186 -0
- edsl/scenarios/exceptions.py +101 -0
- edsl/scenarios/file_methods.py +2 -3
- edsl/scenarios/file_store.py +755 -0
- edsl/scenarios/handlers/__init__.py +14 -14
- edsl/scenarios/handlers/{csv.py → csv_file_store.py} +1 -2
- edsl/scenarios/handlers/{docx.py → docx_file_store.py} +8 -7
- edsl/scenarios/handlers/{html.py → html_file_store.py} +1 -2
- edsl/scenarios/handlers/{jpeg.py → jpeg_file_store.py} +1 -1
- edsl/scenarios/handlers/{json.py → json_file_store.py} +1 -1
- edsl/scenarios/handlers/latex_file_store.py +5 -0
- edsl/scenarios/handlers/{md.py → md_file_store.py} +1 -1
- edsl/scenarios/handlers/{pdf.py → pdf_file_store.py} +2 -2
- edsl/scenarios/handlers/{png.py → png_file_store.py} +1 -1
- edsl/scenarios/handlers/{pptx.py → pptx_file_store.py} +8 -7
- edsl/scenarios/handlers/{py.py → py_file_store.py} +1 -3
- edsl/scenarios/handlers/{sql.py → sql_file_store.py} +2 -1
- edsl/scenarios/handlers/{sqlite.py → sqlite_file_store.py} +2 -3
- edsl/scenarios/handlers/{txt.py → txt_file_store.py} +1 -1
- edsl/scenarios/scenario.py +928 -0
- edsl/scenarios/scenario_join.py +18 -5
- edsl/scenarios/{ScenarioList.py → scenario_list.py} +424 -106
- edsl/scenarios/{ScenarioListPdfMixin.py → scenario_list_pdf_tools.py} +16 -15
- edsl/scenarios/scenario_selector.py +5 -1
- edsl/study/ObjectEntry.py +2 -2
- edsl/study/SnapShot.py +5 -5
- edsl/study/Study.py +20 -21
- edsl/study/__init__.py +6 -4
- edsl/surveys/__init__.py +7 -4
- edsl/surveys/dag/__init__.py +2 -0
- edsl/surveys/{ConstructDAG.py → dag/construct_dag.py} +3 -3
- edsl/surveys/{DAG.py → dag/dag.py} +13 -10
- edsl/surveys/descriptors.py +1 -1
- edsl/surveys/{EditSurvey.py → edit_survey.py} +9 -9
- edsl/{exceptions/surveys.py → surveys/exceptions.py} +1 -2
- edsl/surveys/memory/__init__.py +3 -0
- edsl/surveys/{MemoryPlan.py → memory/memory_plan.py} +10 -9
- edsl/surveys/rules/__init__.py +3 -0
- edsl/surveys/{Rule.py → rules/rule.py} +103 -43
- edsl/surveys/{RuleCollection.py → rules/rule_collection.py} +21 -30
- edsl/surveys/{RuleManager.py → rules/rule_manager.py} +19 -13
- edsl/surveys/survey.py +1743 -0
- edsl/surveys/{SurveyExportMixin.py → survey_export.py} +22 -27
- edsl/surveys/{SurveyFlowVisualization.py → survey_flow_visualization.py} +11 -2
- edsl/surveys/{Simulator.py → survey_simulator.py} +10 -3
- edsl/tasks/__init__.py +32 -0
- edsl/{jobs/tasks/QuestionTaskCreator.py → tasks/question_task_creator.py} +115 -57
- edsl/tasks/task_creators.py +135 -0
- edsl/{jobs/tasks/TaskHistory.py → tasks/task_history.py} +86 -47
- edsl/{jobs/tasks → tasks}/task_status_enum.py +91 -7
- edsl/tasks/task_status_log.py +85 -0
- edsl/tokens/__init__.py +2 -0
- edsl/tokens/interview_token_usage.py +53 -0
- edsl/utilities/PrettyList.py +1 -1
- edsl/utilities/SystemInfo.py +25 -22
- edsl/utilities/__init__.py +29 -21
- edsl/utilities/gcp_bucket/__init__.py +2 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +99 -96
- edsl/utilities/interface.py +44 -536
- edsl/{results/MarkdownToPDF.py → utilities/markdown_to_pdf.py} +13 -5
- edsl/utilities/repair_functions.py +1 -1
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/METADATA +3 -2
- edsl-0.1.48.dist-info/RECORD +347 -0
- edsl/Base.py +0 -426
- edsl/BaseDiff.py +0 -260
- edsl/agents/InvigilatorBase.py +0 -260
- edsl/agents/PromptConstructor.py +0 -318
- edsl/auto/AutoStudy.py +0 -130
- edsl/auto/StageBase.py +0 -243
- edsl/auto/StageGenerateSurvey.py +0 -178
- edsl/auto/StageLabelQuestions.py +0 -125
- edsl/auto/StagePersona.py +0 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +0 -88
- edsl/auto/StagePersonaDimensionValues.py +0 -74
- edsl/auto/StagePersonaDimensions.py +0 -69
- edsl/auto/StageQuestions.py +0 -74
- edsl/auto/SurveyCreatorPipeline.py +0 -21
- edsl/auto/utilities.py +0 -218
- edsl/base/Base.py +0 -279
- edsl/coop/PriceFetcher.py +0 -54
- edsl/data/Cache.py +0 -580
- edsl/data/CacheEntry.py +0 -230
- edsl/data/SQLiteDict.py +0 -292
- edsl/data/__init__.py +0 -5
- edsl/data/orm.py +0 -10
- edsl/exceptions/cache.py +0 -5
- edsl/exceptions/coop.py +0 -14
- edsl/exceptions/data.py +0 -14
- edsl/exceptions/scenarios.py +0 -29
- edsl/jobs/Answers.py +0 -43
- edsl/jobs/JobsPrompts.py +0 -354
- edsl/jobs/buckets/BucketCollection.py +0 -134
- edsl/jobs/buckets/ModelBuckets.py +0 -65
- edsl/jobs/buckets/TokenBucket.py +0 -283
- edsl/jobs/buckets/TokenBucketClient.py +0 -191
- edsl/jobs/interviews/Interview.py +0 -395
- edsl/jobs/interviews/InterviewExceptionCollection.py +0 -99
- edsl/jobs/interviews/InterviewStatisticsCollection.py +0 -25
- edsl/jobs/runners/JobsRunnerAsyncio.py +0 -163
- edsl/jobs/runners/JobsRunnerStatusData.py +0 -0
- edsl/jobs/tasks/TaskCreators.py +0 -64
- edsl/jobs/tasks/TaskStatusLog.py +0 -23
- edsl/jobs/tokens/InterviewTokenUsage.py +0 -27
- edsl/language_models/LanguageModel.py +0 -635
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/key_management/KeyLookup.py +0 -63
- edsl/language_models/key_management/KeyLookupCollection.py +0 -38
- edsl/language_models/key_management/models.py +0 -137
- edsl/questions/QuestionBase.py +0 -539
- edsl/questions/QuestionFreeText.py +0 -130
- edsl/questions/derived/QuestionLikertFive.py +0 -76
- edsl/results/DatasetExportMixin.py +0 -911
- edsl/results/ResultsExportMixin.py +0 -45
- edsl/results/TextEditor.py +0 -50
- edsl/results/results_fetch_mixin.py +0 -33
- edsl/results/results_tools_mixin.py +0 -98
- edsl/scenarios/DocumentChunker.py +0 -104
- edsl/scenarios/FileStore.py +0 -564
- edsl/scenarios/Scenario.py +0 -548
- edsl/scenarios/ScenarioHtmlMixin.py +0 -65
- edsl/scenarios/ScenarioListExportMixin.py +0 -45
- edsl/scenarios/handlers/latex.py +0 -5
- edsl/shared.py +0 -1
- edsl/surveys/Survey.py +0 -1306
- edsl/surveys/SurveyQualtricsImport.py +0 -284
- edsl/surveys/SurveyToApp.py +0 -141
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/tools/__init__.py +0 -1
- edsl/tools/clusters.py +0 -192
- edsl/tools/embeddings.py +0 -27
- edsl/tools/embeddings_plotting.py +0 -118
- edsl/tools/plotting.py +0 -112
- edsl/tools/summarize.py +0 -18
- edsl/utilities/data/Registry.py +0 -6
- edsl/utilities/data/__init__.py +0 -1
- edsl/utilities/data/scooter_results.json +0 -1
- edsl-0.1.46.dist-info/RECORD +0 -366
- /edsl/coop/{CoopFunctionsMixin.py → coop_functions.py} +0 -0
- /edsl/{results → dataset/display}/CSSParameterizer.py +0 -0
- /edsl/{language_models/key_management → dataset/display}/__init__.py +0 -0
- /edsl/{results → dataset/display}/table_data_class.py +0 -0
- /edsl/{results → dataset/display}/table_display.css +0 -0
- /edsl/{results/ResultsGGMixin.py → dataset/r/ggplot.py} +0 -0
- /edsl/{results → dataset}/tree_explore.py +0 -0
- /edsl/{surveys/instructions/ChangeInstruction.py → instructions/change_instruction.py} +0 -0
- /edsl/{jobs/interviews → interviews}/interview_status_enum.py +0 -0
- /edsl/jobs/{runners/JobsRunnerStatus.py → jobs_runner_status.py} +0 -0
- /edsl/language_models/{PriceManager.py → price_manager.py} +0 -0
- /edsl/language_models/{fake_openai_call.py → unused/fake_openai_call.py} +0 -0
- /edsl/language_models/{fake_openai_service.py → unused/fake_openai_service.py} +0 -0
- /edsl/notebooks/{NotebookToLaTeX.py → notebook_to_latex.py} +0 -0
- /edsl/{exceptions/questions.py → questions/exceptions.py} +0 -0
- /edsl/questions/{SimpleAskMixin.py → simple_ask_mixin.py} +0 -0
- /edsl/surveys/{Memory.py → memory/memory.py} +0 -0
- /edsl/surveys/{MemoryManagement.py → memory/memory_management.py} +0 -0
- /edsl/surveys/{SurveyCSS.py → survey_css.py} +0 -0
- /edsl/{jobs/tokens/TokenUsage.py → tokens/token_usage.py} +0 -0
- /edsl/{results/MarkdownToDocx.py → utilities/markdown_to_docx.py} +0 -0
- /edsl/{TemplateLoader.py → utilities/template_loader.py} +0 -0
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/LICENSE +0 -0
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/WHEEL +0 -0
@@ -0,0 +1,476 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING, Literal
|
3
|
+
from functools import cached_property
|
4
|
+
import time
|
5
|
+
import logging
|
6
|
+
|
7
|
+
from ..prompts import Prompt
|
8
|
+
from ..scenarios import Scenario
|
9
|
+
from ..surveys import Survey
|
10
|
+
|
11
|
+
from .prompt_helpers import PromptPlan
|
12
|
+
from .question_template_replacements_builder import (
|
13
|
+
QuestionTemplateReplacementsBuilder,
|
14
|
+
)
|
15
|
+
from .question_option_processor import QuestionOptionProcessor
|
16
|
+
|
17
|
+
if TYPE_CHECKING:
|
18
|
+
from .invigilators import InvigilatorBase
|
19
|
+
from ..questions import QuestionBase
|
20
|
+
from ..agents import Agent
|
21
|
+
from ..language_models import LanguageModel
|
22
|
+
from ..surveys.memory import MemoryPlan
|
23
|
+
from ..questions import QuestionBase
|
24
|
+
from ..scenarios import Scenario
|
25
|
+
|
26
|
+
logger = logging.getLogger(__name__)
|
27
|
+
|
28
|
+
class BasePlaceholder:
|
29
|
+
"""
|
30
|
+
Base class for placeholder values used when a question is not yet answered.
|
31
|
+
|
32
|
+
This class provides a mechanism for handling references to previous question
|
33
|
+
answers that don't yet exist or are unavailable. It serves as a marker or
|
34
|
+
placeholder in prompts and template processing, ensuring that the system can
|
35
|
+
gracefully handle dependencies on missing answers.
|
36
|
+
|
37
|
+
Attributes:
|
38
|
+
value: The default value to use when the placeholder is accessed directly.
|
39
|
+
comment: Description of the placeholder's purpose.
|
40
|
+
_type: The type of placeholder (e.g., "answer", "comment").
|
41
|
+
|
42
|
+
Technical Design:
|
43
|
+
- Implements __getitem__ to act like an empty collection when indexed
|
44
|
+
- Provides clear string representation for debugging and logging
|
45
|
+
- Serves as a base for specific placeholder types like PlaceholderAnswer
|
46
|
+
- Used during template rendering to handle missing or future answers
|
47
|
+
|
48
|
+
Implementation Notes:
|
49
|
+
- This is important for template-based question logic where not all answers
|
50
|
+
may be available at template rendering time
|
51
|
+
- The system can detect these placeholders and handle them appropriately
|
52
|
+
rather than failing when encountering missing answers
|
53
|
+
"""
|
54
|
+
|
55
|
+
def __init__(self, placeholder_type: str = "answer"):
|
56
|
+
"""
|
57
|
+
Initialize a new BasePlaceholder.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
placeholder_type: The type of placeholder (e.g., "answer", "comment").
|
61
|
+
"""
|
62
|
+
self.value = "N/A"
|
63
|
+
self.comment = "Will be populated by prior answer"
|
64
|
+
self._type = placeholder_type
|
65
|
+
|
66
|
+
def __getitem__(self, index: Any) -> str:
|
67
|
+
"""
|
68
|
+
Allow indexing into the placeholder, always returning an empty string.
|
69
|
+
|
70
|
+
This method makes placeholders act like empty collections when indexed,
|
71
|
+
preventing errors when templates try to access specific items.
|
72
|
+
|
73
|
+
Args:
|
74
|
+
index: The index being accessed (ignored).
|
75
|
+
|
76
|
+
Returns:
|
77
|
+
An empty string.
|
78
|
+
"""
|
79
|
+
return ""
|
80
|
+
|
81
|
+
def __str__(self) -> str:
|
82
|
+
"""
|
83
|
+
Get a string representation of the placeholder for display.
|
84
|
+
|
85
|
+
Returns:
|
86
|
+
A string identifying this as a placeholder of a specific type.
|
87
|
+
"""
|
88
|
+
return f"<<{self.__class__.__name__}:{self._type}>>"
|
89
|
+
|
90
|
+
def __repr__(self) -> str:
|
91
|
+
"""
|
92
|
+
Get a string representation for debugging purposes.
|
93
|
+
|
94
|
+
Returns:
|
95
|
+
Same string as __str__.
|
96
|
+
"""
|
97
|
+
return self.__str__()
|
98
|
+
|
99
|
+
|
100
|
+
class PlaceholderAnswer(BasePlaceholder):
|
101
|
+
def __init__(self):
|
102
|
+
super().__init__("answer")
|
103
|
+
|
104
|
+
|
105
|
+
class PlaceholderComment(BasePlaceholder):
|
106
|
+
def __init__(self):
|
107
|
+
super().__init__("comment")
|
108
|
+
|
109
|
+
|
110
|
+
class PlaceholderGeneratedTokens(BasePlaceholder):
|
111
|
+
def __init__(self):
|
112
|
+
super().__init__("generated_tokens")
|
113
|
+
|
114
|
+
|
115
|
+
class PromptConstructor:
|
116
|
+
"""
|
117
|
+
Constructs structured prompts for language models based on questions, agents, and context.
|
118
|
+
|
119
|
+
The PromptConstructor is a critical component in the invigilator architecture that
|
120
|
+
assembles the various elements needed to form effective prompts for language models.
|
121
|
+
It handles the complex task of combining question content, agent characteristics,
|
122
|
+
response requirements, and contextual information into coherent prompts that elicit
|
123
|
+
well-structured responses.
|
124
|
+
|
125
|
+
Prompt Architecture:
|
126
|
+
The constructor builds prompts with several distinct components:
|
127
|
+
|
128
|
+
1. Agent Instructions:
|
129
|
+
- Core instructions about the agent's role and behavior
|
130
|
+
- Example: "You are answering questions as if you were a human. Do not break character."
|
131
|
+
|
132
|
+
2. Persona Prompt:
|
133
|
+
- Details about the agent's characteristics and traits
|
134
|
+
- Example: "You are an agent with the following persona: {'age': 22, 'hair': 'brown'}"
|
135
|
+
|
136
|
+
3. Question Instructions:
|
137
|
+
- The question itself with instructions on how to answer
|
138
|
+
- Example: "You are being asked: Do you like school? The options are 0: yes 1: no
|
139
|
+
Return a valid JSON with your answer code and explanation."
|
140
|
+
|
141
|
+
4. Memory Prompt:
|
142
|
+
- Information about previous questions and answers in the sequence
|
143
|
+
- Example: "Before this question, you answered: Question: Do you like school? Answer: Yes"
|
144
|
+
|
145
|
+
Technical Design:
|
146
|
+
- Uses a template-based approach for flexibility and consistency
|
147
|
+
- Processes question options to present them clearly to the model
|
148
|
+
- Handles template variable replacements for scenarios and previous answers
|
149
|
+
- Supports both system and user prompts with appropriate content separation
|
150
|
+
- Caches computed properties for efficiency
|
151
|
+
|
152
|
+
Implementation Notes:
|
153
|
+
- The class performs no direct I/O or model calls
|
154
|
+
- It focuses solely on prompt construction, adhering to single responsibility principle
|
155
|
+
- Various helper classes handle specialized aspects of prompt construction
|
156
|
+
- Extensive use of cached_property for computational efficiency with complex prompts
|
157
|
+
"""
|
158
|
+
@classmethod
|
159
|
+
def from_invigilator(
|
160
|
+
cls,
|
161
|
+
invigilator: "InvigilatorBase",
|
162
|
+
prompt_plan: Optional["PromptPlan"] = None
|
163
|
+
) -> "PromptConstructor":
|
164
|
+
"""
|
165
|
+
Create a PromptConstructor from an invigilator instance.
|
166
|
+
|
167
|
+
This factory method extracts the necessary components from an invigilator
|
168
|
+
and creates a PromptConstructor instance. This is the primary way to create
|
169
|
+
a PromptConstructor in the context of administering questions.
|
170
|
+
|
171
|
+
Args:
|
172
|
+
invigilator: The invigilator instance containing all necessary components.
|
173
|
+
prompt_plan: Optional custom prompt plan. If None, uses the invigilator's plan.
|
174
|
+
|
175
|
+
Returns:
|
176
|
+
A new PromptConstructor instance configured with the invigilator's components.
|
177
|
+
|
178
|
+
Technical Notes:
|
179
|
+
- This method simplifies the creation of a PromptConstructor with all necessary context
|
180
|
+
- It extracts all required components from the invigilator
|
181
|
+
- The created PromptConstructor has access to agent, question, scenario, etc.
|
182
|
+
- This factory pattern promotes code reuse and maintainability
|
183
|
+
"""
|
184
|
+
return cls(
|
185
|
+
agent=invigilator.agent,
|
186
|
+
question=invigilator.question,
|
187
|
+
scenario=invigilator.scenario,
|
188
|
+
survey=invigilator.survey,
|
189
|
+
model=invigilator.model,
|
190
|
+
current_answers=invigilator.current_answers,
|
191
|
+
memory_plan=invigilator.memory_plan,
|
192
|
+
prompt_plan=prompt_plan or invigilator.prompt_plan
|
193
|
+
)
|
194
|
+
|
195
|
+
def __init__(
|
196
|
+
self,
|
197
|
+
agent: "Agent",
|
198
|
+
question: "QuestionBase",
|
199
|
+
scenario: "Scenario",
|
200
|
+
survey: "Survey",
|
201
|
+
model: "LanguageModel",
|
202
|
+
current_answers: dict,
|
203
|
+
memory_plan: "MemoryPlan",
|
204
|
+
prompt_plan: Optional["PromptPlan"] = None
|
205
|
+
):
|
206
|
+
"""
|
207
|
+
Initialize a new PromptConstructor with all necessary components.
|
208
|
+
|
209
|
+
This constructor sets up a prompt constructor with references to all the
|
210
|
+
components needed to build effective prompts for language models. It establishes
|
211
|
+
the context for constructing prompts that are specific to the given question,
|
212
|
+
agent, scenario, and other context.
|
213
|
+
|
214
|
+
Args:
|
215
|
+
agent: The agent for which to construct prompts.
|
216
|
+
question: The question being asked.
|
217
|
+
scenario: The scenario providing context for the question.
|
218
|
+
survey: The survey containing the question.
|
219
|
+
model: The language model that will process the prompts.
|
220
|
+
current_answers: Dictionary of answers to previous questions.
|
221
|
+
memory_plan: Plan for managing memory across questions.
|
222
|
+
prompt_plan: Configuration for how to structure the prompts.
|
223
|
+
|
224
|
+
Technical Notes:
|
225
|
+
- All components are stored as instance attributes for use in prompt construction
|
226
|
+
- The prompt_plan determines which components are included in the prompts and how
|
227
|
+
- The captured_variables dictionary is used to store variables extracted during
|
228
|
+
template processing
|
229
|
+
- This class uses extensive caching via @cached_property to optimize performance
|
230
|
+
"""
|
231
|
+
self.agent = agent
|
232
|
+
self.question = question
|
233
|
+
self.scenario = scenario
|
234
|
+
self.survey = survey
|
235
|
+
self.model = model
|
236
|
+
self.current_answers = current_answers
|
237
|
+
self.memory_plan = memory_plan
|
238
|
+
self.prompt_plan = prompt_plan or PromptPlan()
|
239
|
+
|
240
|
+
# Storage for variables captured during template processing
|
241
|
+
self.captured_variables = {}
|
242
|
+
|
243
|
+
def get_question_options(self, question_data: dict) -> list[str]:
|
244
|
+
"""
|
245
|
+
Get formatted options for a question based on its data.
|
246
|
+
|
247
|
+
This method delegates to a QuestionOptionProcessor to transform raw question
|
248
|
+
option data into a format appropriate for inclusion in prompts. It handles
|
249
|
+
various question types and their specific option formatting requirements.
|
250
|
+
|
251
|
+
Args:
|
252
|
+
question_data: Dictionary containing the question data, including options.
|
253
|
+
|
254
|
+
Returns:
|
255
|
+
A list of formatted option strings ready for inclusion in prompts.
|
256
|
+
|
257
|
+
Technical Notes:
|
258
|
+
- Delegates the actual option processing to the QuestionOptionProcessor
|
259
|
+
- The processor has specialized logic for different question types
|
260
|
+
- Options are formatted to be clear and unambiguous in prompts
|
261
|
+
- This separation of concerns keeps the PromptConstructor focused on
|
262
|
+
overall prompt construction rather than option formatting details
|
263
|
+
"""
|
264
|
+
return (QuestionOptionProcessor
|
265
|
+
.from_prompt_constructor(self)
|
266
|
+
.get_question_options(question_data)
|
267
|
+
)
|
268
|
+
|
269
|
+
@cached_property
|
270
|
+
def agent_instructions_prompt(self) -> Prompt:
|
271
|
+
"""
|
272
|
+
>>> from .invigilators import InvigilatorBase
|
273
|
+
>>> i = InvigilatorBase.example()
|
274
|
+
>>> i.prompt_constructor.agent_instructions_prompt
|
275
|
+
Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
|
276
|
+
"""
|
277
|
+
from ..agents import Agent
|
278
|
+
|
279
|
+
if self.agent == Agent(): # if agent is empty, then return an empty prompt
|
280
|
+
return Prompt(text="")
|
281
|
+
|
282
|
+
return Prompt(text=self.agent.instruction)
|
283
|
+
|
284
|
+
@cached_property
|
285
|
+
def agent_persona_prompt(self) -> Prompt:
|
286
|
+
"""
|
287
|
+
>>> from edsl.invigilators.invigilators import InvigilatorBase
|
288
|
+
>>> i = InvigilatorBase.example()
|
289
|
+
>>> i.prompt_constructor.agent_persona_prompt
|
290
|
+
Prompt(text=\"""Your traits: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
|
291
|
+
"""
|
292
|
+
from ..agents import Agent
|
293
|
+
|
294
|
+
if self.agent == Agent(): # if agent is empty, then return an empty prompt
|
295
|
+
return Prompt(text="")
|
296
|
+
|
297
|
+
return self.agent.prompt()
|
298
|
+
|
299
|
+
def prior_answers_dict(self) -> dict[str, "QuestionBase"]:
|
300
|
+
"""This is a dictionary of prior answers, if they exist.
|
301
|
+
|
302
|
+
>>> from edsl.invigilators.invigilators import InvigilatorBase
|
303
|
+
>>> i = InvigilatorBase.example()
|
304
|
+
>>> i.prompt_constructor.prior_answers_dict()
|
305
|
+
{'q0': ..., 'q1': ...}
|
306
|
+
"""
|
307
|
+
return self._add_answers(
|
308
|
+
self.survey.question_names_to_questions(), self.current_answers
|
309
|
+
)
|
310
|
+
|
311
|
+
@staticmethod
|
312
|
+
def _extract_question_and_entry_type(key_entry) -> tuple[str, str]:
|
313
|
+
"""
|
314
|
+
Extracts the question name and type for the current answer dictionary key entry.
|
315
|
+
|
316
|
+
>>> PromptConstructor._extract_question_and_entry_type("q0")
|
317
|
+
('q0', 'answer')
|
318
|
+
>>> PromptConstructor._extract_question_and_entry_type("q0_comment")
|
319
|
+
('q0', 'comment')
|
320
|
+
>>> PromptConstructor._extract_question_and_entry_type("q0_alternate_generated_tokens")
|
321
|
+
('q0_alternate', 'generated_tokens')
|
322
|
+
>>> PromptConstructor._extract_question_and_entry_type("q0_alt_comment")
|
323
|
+
('q0_alt', 'comment')
|
324
|
+
"""
|
325
|
+
split_list = key_entry.rsplit("_", maxsplit=1)
|
326
|
+
if len(split_list) == 1:
|
327
|
+
question_name = split_list[0]
|
328
|
+
entry_type = "answer"
|
329
|
+
else:
|
330
|
+
if split_list[1] == "comment":
|
331
|
+
question_name = split_list[0]
|
332
|
+
entry_type = "comment"
|
333
|
+
elif split_list[1] == "tokens": # it's actually 'generated_tokens'
|
334
|
+
question_name = key_entry.replace("_generated_tokens", "")
|
335
|
+
entry_type = "generated_tokens"
|
336
|
+
else:
|
337
|
+
question_name = key_entry
|
338
|
+
entry_type = "answer"
|
339
|
+
return question_name, entry_type
|
340
|
+
|
341
|
+
@staticmethod
|
342
|
+
def _augmented_answers_dict(current_answers: dict) -> dict:
|
343
|
+
"""
|
344
|
+
Creates a nested dictionary of the current answers to question dictionaries; those question dictionaries have the answer, comment, and generated_tokens as keys.
|
345
|
+
|
346
|
+
>>> PromptConstructor._augmented_answers_dict({"q0": "LOVE IT!", "q0_comment": "I love school!"})
|
347
|
+
{'q0': {'answer': 'LOVE IT!', 'comment': 'I love school!'}}
|
348
|
+
"""
|
349
|
+
from collections import defaultdict
|
350
|
+
|
351
|
+
d = defaultdict(dict)
|
352
|
+
for key, value in current_answers.items():
|
353
|
+
question_name, entry_type = (
|
354
|
+
PromptConstructor._extract_question_and_entry_type(key)
|
355
|
+
)
|
356
|
+
d[question_name][entry_type] = value
|
357
|
+
return dict(d)
|
358
|
+
|
359
|
+
@staticmethod
|
360
|
+
def _add_answers(
|
361
|
+
answer_dict: dict, current_answers: dict
|
362
|
+
) -> dict[str, "QuestionBase"]:
|
363
|
+
"""
|
364
|
+
Adds the current answers to the answer dictionary.
|
365
|
+
|
366
|
+
>>> from edsl import QuestionFreeText
|
367
|
+
>>> d = {"q0": QuestionFreeText(question_text="Do you like school?", question_name = "q0")}
|
368
|
+
>>> current_answers = {"q0": "LOVE IT!"}
|
369
|
+
>>> PromptConstructor._add_answers(d, current_answers)['q0'].answer
|
370
|
+
'LOVE IT!'
|
371
|
+
"""
|
372
|
+
augmented_answers = PromptConstructor._augmented_answers_dict(current_answers)
|
373
|
+
|
374
|
+
for question in answer_dict:
|
375
|
+
if question in augmented_answers:
|
376
|
+
for entry_type, value in augmented_answers[question].items():
|
377
|
+
setattr(answer_dict[question], entry_type, value)
|
378
|
+
else:
|
379
|
+
answer_dict[question].answer = PlaceholderAnswer()
|
380
|
+
answer_dict[question].comment = PlaceholderComment()
|
381
|
+
answer_dict[question].generated_tokens = PlaceholderGeneratedTokens()
|
382
|
+
return answer_dict
|
383
|
+
|
384
|
+
@cached_property
|
385
|
+
def file_keys_from_question(self) -> list:
|
386
|
+
"""Extracts the file keys from the question text.
|
387
|
+
|
388
|
+
It checks if the variables in the question text are in the scenario file keys.
|
389
|
+
"""
|
390
|
+
return QuestionTemplateReplacementsBuilder.from_prompt_constructor(self).question_file_keys()
|
391
|
+
|
392
|
+
@cached_property
|
393
|
+
def question_instructions_prompt(self) -> Prompt:
|
394
|
+
"""
|
395
|
+
>>> from edsl.invigilators.invigilators import InvigilatorBase
|
396
|
+
>>> i = InvigilatorBase.example()
|
397
|
+
>>> i.prompt_constructor.question_instructions_prompt
|
398
|
+
Prompt(text=\"""...
|
399
|
+
...
|
400
|
+
"""
|
401
|
+
return self.build_question_instructions_prompt()
|
402
|
+
|
403
|
+
def build_question_instructions_prompt(self) -> Prompt:
|
404
|
+
"""Buils the question instructions prompt."""
|
405
|
+
from .question_instructions_prompt_builder import QuestionInstructionPromptBuilder
|
406
|
+
qipb = QuestionInstructionPromptBuilder.from_prompt_constructor(self)
|
407
|
+
prompt = qipb.build()
|
408
|
+
if prompt.captured_variables:
|
409
|
+
self.captured_variables.update(prompt.captured_variables)
|
410
|
+
|
411
|
+
return prompt
|
412
|
+
|
413
|
+
@cached_property
|
414
|
+
def prior_question_memory_prompt(self) -> Prompt:
|
415
|
+
memory_prompt = Prompt(text="")
|
416
|
+
if self.memory_plan is not None:
|
417
|
+
memory_prompt += self.create_memory_prompt(
|
418
|
+
self.question.question_name
|
419
|
+
).render(self.scenario | self.prior_answers_dict())
|
420
|
+
return memory_prompt
|
421
|
+
|
422
|
+
def create_memory_prompt(self, question_name: str) -> Prompt:
|
423
|
+
"""Create a memory for the agent.
|
424
|
+
|
425
|
+
The returns a memory prompt for the agent.
|
426
|
+
|
427
|
+
>>> from edsl.invigilators.invigilators import InvigilatorBase
|
428
|
+
>>> i = InvigilatorBase.example()
|
429
|
+
>>> i.current_answers = {"q0": "Prior answer"}
|
430
|
+
>>> i.memory_plan.add_single_memory("q1", "q0")
|
431
|
+
>>> p = i.prompt_constructor.create_memory_prompt("q1")
|
432
|
+
>>> p.text.strip().replace("\\n", " ").replace("\\t", " ")
|
433
|
+
'Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer'
|
434
|
+
"""
|
435
|
+
return self.memory_plan.get_memory_prompt_fragment(
|
436
|
+
question_name, self.current_answers
|
437
|
+
)
|
438
|
+
|
439
|
+
def get_prompts(self) -> Dict[str, Any]:
|
440
|
+
"""Get the prompts for the question."""
|
441
|
+
start = time.time()
|
442
|
+
|
443
|
+
# Build all the components
|
444
|
+
agent_instructions = self.agent_instructions_prompt
|
445
|
+
agent_persona = self.agent_persona_prompt
|
446
|
+
question_instructions = self.question_instructions_prompt
|
447
|
+
prior_question_memory = self.prior_question_memory_prompt
|
448
|
+
|
449
|
+
# Get components dict
|
450
|
+
components = {
|
451
|
+
"agent_instructions": agent_instructions.text,
|
452
|
+
"agent_persona": agent_persona.text,
|
453
|
+
"question_instructions": question_instructions.text,
|
454
|
+
"prior_question_memory": prior_question_memory.text,
|
455
|
+
}
|
456
|
+
|
457
|
+
prompts = self.prompt_plan.get_prompts(**components)
|
458
|
+
|
459
|
+
# Handle file keys if present
|
460
|
+
file_keys = self.file_keys_from_question
|
461
|
+
if file_keys:
|
462
|
+
files_list = []
|
463
|
+
for key in file_keys:
|
464
|
+
files_list.append(self.scenario[key])
|
465
|
+
prompts["files_list"] = files_list
|
466
|
+
|
467
|
+
return prompts
|
468
|
+
|
469
|
+
def get_captured_variables(self) -> dict:
|
470
|
+
"""Get the captured variables."""
|
471
|
+
return self.captured_variables
|
472
|
+
|
473
|
+
|
474
|
+
if __name__ == '__main__':
|
475
|
+
import doctest
|
476
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
@@ -1,17 +1,17 @@
|
|
1
1
|
from typing import Dict, List, Set, Any, Union, TYPE_CHECKING
|
2
2
|
from warnings import warn
|
3
3
|
import logging
|
4
|
-
from
|
4
|
+
from ..prompts import Prompt
|
5
5
|
|
6
6
|
if TYPE_CHECKING:
|
7
|
-
from
|
8
|
-
from
|
9
|
-
from
|
10
|
-
from
|
11
|
-
from
|
12
|
-
from
|
13
|
-
|
14
|
-
from
|
7
|
+
from .prompt_constructor import PromptConstructor
|
8
|
+
from ..language_models import Model
|
9
|
+
from ..surveys import Survey
|
10
|
+
from ..questions import QuestionBase
|
11
|
+
from ..scenarios import Scenario
|
12
|
+
from ..agents import Agent
|
13
|
+
|
14
|
+
from .question_template_replacements_builder import (
|
15
15
|
QuestionTemplateReplacementsBuilder as QTRB,
|
16
16
|
)
|
17
17
|
|
@@ -58,6 +58,8 @@ class QuestionInstructionPromptBuilder:
|
|
58
58
|
self.scenario = scenario
|
59
59
|
self.prior_answers_dict = prior_answers_dict
|
60
60
|
|
61
|
+
self.captured_variables = {}
|
62
|
+
|
61
63
|
def build(self) -> Prompt:
|
62
64
|
"""Builds the complete question instructions prompt with all necessary components.
|
63
65
|
|
@@ -149,7 +151,7 @@ class QuestionInstructionPromptBuilder:
|
|
149
151
|
|
150
152
|
The question_options could be intended to be replaced with data from a scenario or prior answers.
|
151
153
|
|
152
|
-
>>> question_data = {'question_name': 'q0', 'question_text': 'Do you like school?', 'question_options': '{{ options }}'}
|
154
|
+
>>> question_data = {'question_name': 'q0', 'question_text': 'Do you like school?', 'question_options': '{{ scenario.options }}'}
|
153
155
|
>>> scenario = {"options": ["yes", "no"]}
|
154
156
|
>>> prior_answers_dict = {}
|
155
157
|
>>> QuestionInstructionPromptBuilder._process_question_options(question_data, scenario, prior_answers_dict)
|
@@ -164,8 +166,7 @@ class QuestionInstructionPromptBuilder:
|
|
164
166
|
Dict: Question data with processed question options
|
165
167
|
"""
|
166
168
|
if "question_options" in question_data:
|
167
|
-
from
|
168
|
-
|
169
|
+
from .question_option_processor import QuestionOptionProcessor
|
169
170
|
question_options = QuestionOptionProcessor(
|
170
171
|
scenario, prior_answers_dict
|
171
172
|
).get_question_options(question_data=question_data)
|
@@ -207,7 +208,11 @@ class QuestionInstructionPromptBuilder:
|
|
207
208
|
replacement_dict = self.qtrb.build_replacement_dict(prompt_data["data"])
|
208
209
|
|
209
210
|
# Render with dict
|
210
|
-
|
211
|
+
rendered_prompt =prompt_data["prompt"].render(replacement_dict)
|
212
|
+
if rendered_prompt.captured_variables:
|
213
|
+
self.captured_variables.update(rendered_prompt.captured_variables)
|
214
|
+
#print(f"Captured variables in QIPB: {self.captured_variables}")
|
215
|
+
return rendered_prompt
|
211
216
|
|
212
217
|
def _validate_template_variables(self, rendered_prompt: Prompt) -> None:
|
213
218
|
"""Validates that all template variables have been properly replaced.
|