edsl 0.1.46__py3-none-any.whl → 0.1.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/__init__.py +44 -39
- edsl/__version__.py +1 -1
- edsl/agents/__init__.py +4 -2
- edsl/agents/{Agent.py → agent.py} +442 -152
- edsl/agents/{AgentList.py → agent_list.py} +220 -162
- edsl/agents/descriptors.py +46 -7
- edsl/{exceptions/agents.py → agents/exceptions.py} +3 -12
- edsl/base/__init__.py +75 -0
- edsl/base/base_class.py +1303 -0
- edsl/base/data_transfer_models.py +114 -0
- edsl/base/enums.py +215 -0
- edsl/base.py +8 -0
- edsl/buckets/__init__.py +25 -0
- edsl/buckets/bucket_collection.py +324 -0
- edsl/buckets/model_buckets.py +206 -0
- edsl/buckets/token_bucket.py +502 -0
- edsl/{jobs/buckets/TokenBucketAPI.py → buckets/token_bucket_api.py} +1 -1
- edsl/buckets/token_bucket_client.py +509 -0
- edsl/caching/__init__.py +20 -0
- edsl/caching/cache.py +814 -0
- edsl/caching/cache_entry.py +427 -0
- edsl/{data/CacheHandler.py → caching/cache_handler.py} +14 -15
- edsl/caching/exceptions.py +24 -0
- edsl/caching/orm.py +30 -0
- edsl/{data/RemoteCacheSync.py → caching/remote_cache_sync.py} +3 -3
- edsl/caching/sql_dict.py +441 -0
- edsl/config/__init__.py +8 -0
- edsl/config/config_class.py +177 -0
- edsl/config.py +4 -176
- edsl/conversation/Conversation.py +7 -7
- edsl/conversation/car_buying.py +4 -4
- edsl/conversation/chips.py +6 -6
- edsl/coop/__init__.py +25 -2
- edsl/coop/coop.py +430 -113
- edsl/coop/{ExpectedParrotKeyHandler.py → ep_key_handling.py} +86 -10
- edsl/coop/exceptions.py +62 -0
- edsl/coop/price_fetcher.py +126 -0
- edsl/coop/utils.py +89 -24
- edsl/data_transfer_models.py +5 -72
- edsl/dataset/__init__.py +10 -0
- edsl/{results/Dataset.py → dataset/dataset.py} +116 -36
- edsl/dataset/dataset_operations_mixin.py +1492 -0
- edsl/{results/DatasetTree.py → dataset/dataset_tree.py} +156 -75
- edsl/{results/TableDisplay.py → dataset/display/table_display.py} +18 -7
- edsl/{results → dataset/display}/table_renderers.py +58 -2
- edsl/{results → dataset}/file_exports.py +4 -5
- edsl/{results → dataset}/smart_objects.py +2 -2
- edsl/enums.py +5 -205
- edsl/inference_services/__init__.py +5 -0
- edsl/inference_services/{AvailableModelCacheHandler.py → available_model_cache_handler.py} +2 -3
- edsl/inference_services/{AvailableModelFetcher.py → available_model_fetcher.py} +8 -14
- edsl/inference_services/data_structures.py +3 -2
- edsl/{exceptions/inference_services.py → inference_services/exceptions.py} +1 -1
- edsl/inference_services/{InferenceServiceABC.py → inference_service_abc.py} +1 -1
- edsl/inference_services/{InferenceServicesCollection.py → inference_services_collection.py} +8 -7
- edsl/inference_services/registry.py +4 -41
- edsl/inference_services/{ServiceAvailability.py → service_availability.py} +5 -25
- edsl/inference_services/services/__init__.py +31 -0
- edsl/inference_services/{AnthropicService.py → services/anthropic_service.py} +3 -3
- edsl/inference_services/{AwsBedrock.py → services/aws_bedrock.py} +2 -2
- edsl/inference_services/{AzureAI.py → services/azure_ai.py} +2 -2
- edsl/inference_services/{DeepInfraService.py → services/deep_infra_service.py} +1 -3
- edsl/inference_services/{DeepSeekService.py → services/deep_seek_service.py} +2 -4
- edsl/inference_services/{GoogleService.py → services/google_service.py} +5 -4
- edsl/inference_services/{GroqService.py → services/groq_service.py} +1 -1
- edsl/inference_services/{MistralAIService.py → services/mistral_ai_service.py} +3 -3
- edsl/inference_services/{OllamaService.py → services/ollama_service.py} +1 -7
- edsl/inference_services/{OpenAIService.py → services/open_ai_service.py} +5 -6
- edsl/inference_services/{PerplexityService.py → services/perplexity_service.py} +12 -12
- edsl/inference_services/{TestService.py → services/test_service.py} +7 -6
- edsl/inference_services/{TogetherAIService.py → services/together_ai_service.py} +2 -6
- edsl/inference_services/{XAIService.py → services/xai_service.py} +1 -1
- edsl/inference_services/write_available.py +1 -2
- edsl/instructions/__init__.py +6 -0
- edsl/{surveys/instructions/Instruction.py → instructions/instruction.py} +11 -6
- edsl/{surveys/instructions/InstructionCollection.py → instructions/instruction_collection.py} +10 -5
- edsl/{surveys/InstructionHandler.py → instructions/instruction_handler.py} +3 -3
- edsl/{jobs/interviews → interviews}/ReportErrors.py +2 -2
- edsl/interviews/__init__.py +4 -0
- edsl/{jobs/AnswerQuestionFunctionConstructor.py → interviews/answering_function.py} +45 -18
- edsl/{jobs/interviews/InterviewExceptionEntry.py → interviews/exception_tracking.py} +107 -22
- edsl/interviews/interview.py +638 -0
- edsl/{jobs/interviews/InterviewStatusDictionary.py → interviews/interview_status_dictionary.py} +21 -12
- edsl/{jobs/interviews/InterviewStatusLog.py → interviews/interview_status_log.py} +16 -7
- edsl/{jobs/InterviewTaskManager.py → interviews/interview_task_manager.py} +12 -7
- edsl/{jobs/RequestTokenEstimator.py → interviews/request_token_estimator.py} +8 -3
- edsl/{jobs/interviews/InterviewStatistic.py → interviews/statistics.py} +36 -10
- edsl/invigilators/__init__.py +38 -0
- edsl/invigilators/invigilator_base.py +477 -0
- edsl/{agents/Invigilator.py → invigilators/invigilators.py} +263 -10
- edsl/invigilators/prompt_constructor.py +476 -0
- edsl/{agents → invigilators}/prompt_helpers.py +2 -1
- edsl/{agents/QuestionInstructionPromptBuilder.py → invigilators/question_instructions_prompt_builder.py} +18 -13
- edsl/{agents → invigilators}/question_option_processor.py +96 -21
- edsl/{agents/QuestionTemplateReplacementsBuilder.py → invigilators/question_template_replacements_builder.py} +64 -12
- edsl/jobs/__init__.py +7 -1
- edsl/jobs/async_interview_runner.py +99 -35
- edsl/jobs/check_survey_scenario_compatibility.py +7 -5
- edsl/jobs/data_structures.py +153 -22
- edsl/{exceptions/jobs.py → jobs/exceptions.py} +2 -1
- edsl/jobs/{FetchInvigilator.py → fetch_invigilator.py} +4 -4
- edsl/jobs/{loggers/HTMLTableJobLogger.py → html_table_job_logger.py} +6 -2
- edsl/jobs/{Jobs.py → jobs.py} +321 -155
- edsl/jobs/{JobsChecks.py → jobs_checks.py} +15 -7
- edsl/jobs/{JobsComponentConstructor.py → jobs_component_constructor.py} +20 -17
- edsl/jobs/{InterviewsConstructor.py → jobs_interview_constructor.py} +10 -5
- edsl/jobs/jobs_pricing_estimation.py +347 -0
- edsl/jobs/{JobsRemoteInferenceLogger.py → jobs_remote_inference_logger.py} +4 -3
- edsl/jobs/jobs_runner_asyncio.py +282 -0
- edsl/jobs/{JobsRemoteInferenceHandler.py → remote_inference.py} +19 -22
- edsl/jobs/results_exceptions_handler.py +2 -2
- edsl/key_management/__init__.py +28 -0
- edsl/key_management/key_lookup.py +161 -0
- edsl/{language_models/key_management/KeyLookupBuilder.py → key_management/key_lookup_builder.py} +118 -47
- edsl/key_management/key_lookup_collection.py +82 -0
- edsl/key_management/models.py +218 -0
- edsl/language_models/__init__.py +7 -2
- edsl/language_models/{ComputeCost.py → compute_cost.py} +18 -3
- edsl/{exceptions/language_models.py → language_models/exceptions.py} +2 -1
- edsl/language_models/language_model.py +1080 -0
- edsl/language_models/model.py +10 -25
- edsl/language_models/{ModelList.py → model_list.py} +9 -14
- edsl/language_models/{RawResponseHandler.py → raw_response_handler.py} +1 -1
- edsl/language_models/{RegisterLanguageModelsMeta.py → registry.py} +1 -1
- edsl/language_models/repair.py +4 -4
- edsl/language_models/utilities.py +4 -4
- edsl/notebooks/__init__.py +3 -1
- edsl/notebooks/{Notebook.py → notebook.py} +7 -8
- edsl/prompts/__init__.py +1 -1
- edsl/{exceptions/prompts.py → prompts/exceptions.py} +3 -1
- edsl/prompts/{Prompt.py → prompt.py} +101 -95
- edsl/questions/HTMLQuestion.py +1 -1
- edsl/questions/__init__.py +154 -25
- edsl/questions/answer_validator_mixin.py +1 -1
- edsl/questions/compose_questions.py +4 -3
- edsl/questions/derived/question_likert_five.py +166 -0
- edsl/questions/derived/{QuestionLinearScale.py → question_linear_scale.py} +4 -4
- edsl/questions/derived/{QuestionTopK.py → question_top_k.py} +4 -4
- edsl/questions/derived/{QuestionYesNo.py → question_yes_no.py} +4 -5
- edsl/questions/descriptors.py +24 -30
- edsl/questions/loop_processor.py +65 -19
- edsl/questions/question_base.py +881 -0
- edsl/questions/question_base_gen_mixin.py +15 -16
- edsl/questions/{QuestionBasePromptsMixin.py → question_base_prompts_mixin.py} +2 -2
- edsl/questions/{QuestionBudget.py → question_budget.py} +3 -4
- edsl/questions/{QuestionCheckBox.py → question_check_box.py} +16 -16
- edsl/questions/{QuestionDict.py → question_dict.py} +39 -5
- edsl/questions/{QuestionExtract.py → question_extract.py} +9 -9
- edsl/questions/question_free_text.py +282 -0
- edsl/questions/{QuestionFunctional.py → question_functional.py} +6 -5
- edsl/questions/{QuestionList.py → question_list.py} +6 -7
- edsl/questions/{QuestionMatrix.py → question_matrix.py} +6 -5
- edsl/questions/{QuestionMultipleChoice.py → question_multiple_choice.py} +126 -21
- edsl/questions/{QuestionNumerical.py → question_numerical.py} +5 -5
- edsl/questions/{QuestionRank.py → question_rank.py} +6 -6
- edsl/questions/question_registry.py +10 -16
- edsl/questions/register_questions_meta.py +8 -4
- edsl/questions/response_validator_abc.py +17 -16
- edsl/results/__init__.py +4 -1
- edsl/{exceptions/results.py → results/exceptions.py} +1 -1
- edsl/results/report.py +197 -0
- edsl/results/{Result.py → result.py} +131 -45
- edsl/results/{Results.py → results.py} +420 -216
- edsl/results/results_selector.py +344 -25
- edsl/scenarios/__init__.py +30 -3
- edsl/scenarios/{ConstructDownloadLink.py → construct_download_link.py} +7 -0
- edsl/scenarios/directory_scanner.py +156 -13
- edsl/scenarios/document_chunker.py +186 -0
- edsl/scenarios/exceptions.py +101 -0
- edsl/scenarios/file_methods.py +2 -3
- edsl/scenarios/file_store.py +755 -0
- edsl/scenarios/handlers/__init__.py +14 -14
- edsl/scenarios/handlers/{csv.py → csv_file_store.py} +1 -2
- edsl/scenarios/handlers/{docx.py → docx_file_store.py} +8 -7
- edsl/scenarios/handlers/{html.py → html_file_store.py} +1 -2
- edsl/scenarios/handlers/{jpeg.py → jpeg_file_store.py} +1 -1
- edsl/scenarios/handlers/{json.py → json_file_store.py} +1 -1
- edsl/scenarios/handlers/latex_file_store.py +5 -0
- edsl/scenarios/handlers/{md.py → md_file_store.py} +1 -1
- edsl/scenarios/handlers/{pdf.py → pdf_file_store.py} +2 -2
- edsl/scenarios/handlers/{png.py → png_file_store.py} +1 -1
- edsl/scenarios/handlers/{pptx.py → pptx_file_store.py} +8 -7
- edsl/scenarios/handlers/{py.py → py_file_store.py} +1 -3
- edsl/scenarios/handlers/{sql.py → sql_file_store.py} +2 -1
- edsl/scenarios/handlers/{sqlite.py → sqlite_file_store.py} +2 -3
- edsl/scenarios/handlers/{txt.py → txt_file_store.py} +1 -1
- edsl/scenarios/scenario.py +928 -0
- edsl/scenarios/scenario_join.py +18 -5
- edsl/scenarios/{ScenarioList.py → scenario_list.py} +424 -106
- edsl/scenarios/{ScenarioListPdfMixin.py → scenario_list_pdf_tools.py} +16 -15
- edsl/scenarios/scenario_selector.py +5 -1
- edsl/study/ObjectEntry.py +2 -2
- edsl/study/SnapShot.py +5 -5
- edsl/study/Study.py +20 -21
- edsl/study/__init__.py +6 -4
- edsl/surveys/__init__.py +7 -4
- edsl/surveys/dag/__init__.py +2 -0
- edsl/surveys/{ConstructDAG.py → dag/construct_dag.py} +3 -3
- edsl/surveys/{DAG.py → dag/dag.py} +13 -10
- edsl/surveys/descriptors.py +1 -1
- edsl/surveys/{EditSurvey.py → edit_survey.py} +9 -9
- edsl/{exceptions/surveys.py → surveys/exceptions.py} +1 -2
- edsl/surveys/memory/__init__.py +3 -0
- edsl/surveys/{MemoryPlan.py → memory/memory_plan.py} +10 -9
- edsl/surveys/rules/__init__.py +3 -0
- edsl/surveys/{Rule.py → rules/rule.py} +103 -43
- edsl/surveys/{RuleCollection.py → rules/rule_collection.py} +21 -30
- edsl/surveys/{RuleManager.py → rules/rule_manager.py} +19 -13
- edsl/surveys/survey.py +1743 -0
- edsl/surveys/{SurveyExportMixin.py → survey_export.py} +22 -27
- edsl/surveys/{SurveyFlowVisualization.py → survey_flow_visualization.py} +11 -2
- edsl/surveys/{Simulator.py → survey_simulator.py} +10 -3
- edsl/tasks/__init__.py +32 -0
- edsl/{jobs/tasks/QuestionTaskCreator.py → tasks/question_task_creator.py} +115 -57
- edsl/tasks/task_creators.py +135 -0
- edsl/{jobs/tasks/TaskHistory.py → tasks/task_history.py} +86 -47
- edsl/{jobs/tasks → tasks}/task_status_enum.py +91 -7
- edsl/tasks/task_status_log.py +85 -0
- edsl/tokens/__init__.py +2 -0
- edsl/tokens/interview_token_usage.py +53 -0
- edsl/utilities/PrettyList.py +1 -1
- edsl/utilities/SystemInfo.py +25 -22
- edsl/utilities/__init__.py +29 -21
- edsl/utilities/gcp_bucket/__init__.py +2 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +99 -96
- edsl/utilities/interface.py +44 -536
- edsl/{results/MarkdownToPDF.py → utilities/markdown_to_pdf.py} +13 -5
- edsl/utilities/repair_functions.py +1 -1
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/METADATA +3 -2
- edsl-0.1.48.dist-info/RECORD +347 -0
- edsl/Base.py +0 -426
- edsl/BaseDiff.py +0 -260
- edsl/agents/InvigilatorBase.py +0 -260
- edsl/agents/PromptConstructor.py +0 -318
- edsl/auto/AutoStudy.py +0 -130
- edsl/auto/StageBase.py +0 -243
- edsl/auto/StageGenerateSurvey.py +0 -178
- edsl/auto/StageLabelQuestions.py +0 -125
- edsl/auto/StagePersona.py +0 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +0 -88
- edsl/auto/StagePersonaDimensionValues.py +0 -74
- edsl/auto/StagePersonaDimensions.py +0 -69
- edsl/auto/StageQuestions.py +0 -74
- edsl/auto/SurveyCreatorPipeline.py +0 -21
- edsl/auto/utilities.py +0 -218
- edsl/base/Base.py +0 -279
- edsl/coop/PriceFetcher.py +0 -54
- edsl/data/Cache.py +0 -580
- edsl/data/CacheEntry.py +0 -230
- edsl/data/SQLiteDict.py +0 -292
- edsl/data/__init__.py +0 -5
- edsl/data/orm.py +0 -10
- edsl/exceptions/cache.py +0 -5
- edsl/exceptions/coop.py +0 -14
- edsl/exceptions/data.py +0 -14
- edsl/exceptions/scenarios.py +0 -29
- edsl/jobs/Answers.py +0 -43
- edsl/jobs/JobsPrompts.py +0 -354
- edsl/jobs/buckets/BucketCollection.py +0 -134
- edsl/jobs/buckets/ModelBuckets.py +0 -65
- edsl/jobs/buckets/TokenBucket.py +0 -283
- edsl/jobs/buckets/TokenBucketClient.py +0 -191
- edsl/jobs/interviews/Interview.py +0 -395
- edsl/jobs/interviews/InterviewExceptionCollection.py +0 -99
- edsl/jobs/interviews/InterviewStatisticsCollection.py +0 -25
- edsl/jobs/runners/JobsRunnerAsyncio.py +0 -163
- edsl/jobs/runners/JobsRunnerStatusData.py +0 -0
- edsl/jobs/tasks/TaskCreators.py +0 -64
- edsl/jobs/tasks/TaskStatusLog.py +0 -23
- edsl/jobs/tokens/InterviewTokenUsage.py +0 -27
- edsl/language_models/LanguageModel.py +0 -635
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/key_management/KeyLookup.py +0 -63
- edsl/language_models/key_management/KeyLookupCollection.py +0 -38
- edsl/language_models/key_management/models.py +0 -137
- edsl/questions/QuestionBase.py +0 -539
- edsl/questions/QuestionFreeText.py +0 -130
- edsl/questions/derived/QuestionLikertFive.py +0 -76
- edsl/results/DatasetExportMixin.py +0 -911
- edsl/results/ResultsExportMixin.py +0 -45
- edsl/results/TextEditor.py +0 -50
- edsl/results/results_fetch_mixin.py +0 -33
- edsl/results/results_tools_mixin.py +0 -98
- edsl/scenarios/DocumentChunker.py +0 -104
- edsl/scenarios/FileStore.py +0 -564
- edsl/scenarios/Scenario.py +0 -548
- edsl/scenarios/ScenarioHtmlMixin.py +0 -65
- edsl/scenarios/ScenarioListExportMixin.py +0 -45
- edsl/scenarios/handlers/latex.py +0 -5
- edsl/shared.py +0 -1
- edsl/surveys/Survey.py +0 -1306
- edsl/surveys/SurveyQualtricsImport.py +0 -284
- edsl/surveys/SurveyToApp.py +0 -141
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/tools/__init__.py +0 -1
- edsl/tools/clusters.py +0 -192
- edsl/tools/embeddings.py +0 -27
- edsl/tools/embeddings_plotting.py +0 -118
- edsl/tools/plotting.py +0 -112
- edsl/tools/summarize.py +0 -18
- edsl/utilities/data/Registry.py +0 -6
- edsl/utilities/data/__init__.py +0 -1
- edsl/utilities/data/scooter_results.json +0 -1
- edsl-0.1.46.dist-info/RECORD +0 -366
- /edsl/coop/{CoopFunctionsMixin.py → coop_functions.py} +0 -0
- /edsl/{results → dataset/display}/CSSParameterizer.py +0 -0
- /edsl/{language_models/key_management → dataset/display}/__init__.py +0 -0
- /edsl/{results → dataset/display}/table_data_class.py +0 -0
- /edsl/{results → dataset/display}/table_display.css +0 -0
- /edsl/{results/ResultsGGMixin.py → dataset/r/ggplot.py} +0 -0
- /edsl/{results → dataset}/tree_explore.py +0 -0
- /edsl/{surveys/instructions/ChangeInstruction.py → instructions/change_instruction.py} +0 -0
- /edsl/{jobs/interviews → interviews}/interview_status_enum.py +0 -0
- /edsl/jobs/{runners/JobsRunnerStatus.py → jobs_runner_status.py} +0 -0
- /edsl/language_models/{PriceManager.py → price_manager.py} +0 -0
- /edsl/language_models/{fake_openai_call.py → unused/fake_openai_call.py} +0 -0
- /edsl/language_models/{fake_openai_service.py → unused/fake_openai_service.py} +0 -0
- /edsl/notebooks/{NotebookToLaTeX.py → notebook_to_latex.py} +0 -0
- /edsl/{exceptions/questions.py → questions/exceptions.py} +0 -0
- /edsl/questions/{SimpleAskMixin.py → simple_ask_mixin.py} +0 -0
- /edsl/surveys/{Memory.py → memory/memory.py} +0 -0
- /edsl/surveys/{MemoryManagement.py → memory/memory_management.py} +0 -0
- /edsl/surveys/{SurveyCSS.py → survey_css.py} +0 -0
- /edsl/{jobs/tokens/TokenUsage.py → tokens/token_usage.py} +0 -0
- /edsl/{results/MarkdownToDocx.py → utilities/markdown_to_docx.py} +0 -0
- /edsl/{TemplateLoader.py → utilities/template_loader.py} +0 -0
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/LICENSE +0 -0
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/WHEEL +0 -0
edsl/agents/InvigilatorBase.py
DELETED
@@ -1,260 +0,0 @@
|
|
1
|
-
from abc import ABC, abstractmethod
|
2
|
-
import asyncio
|
3
|
-
from typing import Coroutine, Dict, Any, Optional, TYPE_CHECKING
|
4
|
-
|
5
|
-
from edsl.utilities.decorators import jupyter_nb_handler
|
6
|
-
from edsl.data_transfer_models import AgentResponseDict
|
7
|
-
|
8
|
-
if TYPE_CHECKING:
|
9
|
-
from edsl.prompts.Prompt import Prompt
|
10
|
-
from edsl.data.Cache import Cache
|
11
|
-
from edsl.questions.QuestionBase import QuestionBase
|
12
|
-
from edsl.scenarios.Scenario import Scenario
|
13
|
-
from edsl.surveys.MemoryPlan import MemoryPlan
|
14
|
-
from edsl.language_models.LanguageModel import LanguageModel
|
15
|
-
from edsl.surveys.Survey import Survey
|
16
|
-
from edsl.agents.Agent import Agent
|
17
|
-
from edsl.language_models.key_management.KeyLookup import KeyLookup
|
18
|
-
|
19
|
-
from edsl.data_transfer_models import EDSLResultObjectInput
|
20
|
-
from edsl.agents.PromptConstructor import PromptConstructor
|
21
|
-
from edsl.agents.prompt_helpers import PromptPlan
|
22
|
-
|
23
|
-
|
24
|
-
class InvigilatorBase(ABC):
|
25
|
-
"""An invigiator (someone who administers an exam) is a class that is responsible for administering a question to an agent.
|
26
|
-
|
27
|
-
>>> InvigilatorBase.example().answer_question()
|
28
|
-
{'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
|
29
|
-
|
30
|
-
>>> InvigilatorBase.example().get_failed_task_result(failure_reason="Failed to get response").comment
|
31
|
-
'Failed to get response'
|
32
|
-
|
33
|
-
This returns an empty prompt because there is no memory the agent needs to have at q0.
|
34
|
-
"""
|
35
|
-
|
36
|
-
def __init__(
|
37
|
-
self,
|
38
|
-
agent: "Agent",
|
39
|
-
question: "QuestionBase",
|
40
|
-
scenario: "Scenario",
|
41
|
-
model: "LanguageModel",
|
42
|
-
memory_plan: "MemoryPlan",
|
43
|
-
current_answers: dict,
|
44
|
-
survey: Optional["Survey"],
|
45
|
-
cache: Optional["Cache"] = None,
|
46
|
-
iteration: Optional[int] = 1,
|
47
|
-
additional_prompt_data: Optional[dict] = None,
|
48
|
-
raise_validation_errors: Optional[bool] = True,
|
49
|
-
prompt_plan: Optional["PromptPlan"] = None,
|
50
|
-
key_lookup: Optional["KeyLookup"] = None,
|
51
|
-
):
|
52
|
-
"""Initialize a new Invigilator."""
|
53
|
-
self.agent = agent
|
54
|
-
self.question = question
|
55
|
-
self.scenario = scenario
|
56
|
-
self.model = model
|
57
|
-
self.memory_plan = memory_plan
|
58
|
-
self.current_answers = current_answers or {}
|
59
|
-
self.iteration = iteration
|
60
|
-
self.additional_prompt_data = additional_prompt_data
|
61
|
-
self.cache = cache
|
62
|
-
self.survey = survey
|
63
|
-
self.raise_validation_errors = raise_validation_errors
|
64
|
-
self.key_lookup = key_lookup
|
65
|
-
|
66
|
-
if prompt_plan is None:
|
67
|
-
self.prompt_plan = PromptPlan()
|
68
|
-
else:
|
69
|
-
self.prompt_plan = prompt_plan
|
70
|
-
|
71
|
-
# placeholder to store the raw model response
|
72
|
-
self.raw_model_response = None
|
73
|
-
|
74
|
-
@property
|
75
|
-
def prompt_constructor(self) -> PromptConstructor:
|
76
|
-
"""Return the prompt constructor."""
|
77
|
-
return PromptConstructor.from_invigilator(self, prompt_plan=self.prompt_plan)
|
78
|
-
|
79
|
-
def to_dict(self, include_cache=False) -> Dict[str, Any]:
|
80
|
-
attributes = [
|
81
|
-
"agent",
|
82
|
-
"question",
|
83
|
-
"scenario",
|
84
|
-
"model",
|
85
|
-
"memory_plan",
|
86
|
-
"current_answers",
|
87
|
-
"iteration",
|
88
|
-
"additional_prompt_data",
|
89
|
-
"survey",
|
90
|
-
"raw_model_response",
|
91
|
-
]
|
92
|
-
if include_cache:
|
93
|
-
attributes.append("cache")
|
94
|
-
|
95
|
-
def serialize_attribute(attr):
|
96
|
-
value = getattr(self, attr)
|
97
|
-
if value is None:
|
98
|
-
return None
|
99
|
-
if hasattr(value, "to_dict"):
|
100
|
-
return value.to_dict()
|
101
|
-
if isinstance(value, (int, float, str, bool, dict, list)):
|
102
|
-
return value
|
103
|
-
return str(value)
|
104
|
-
|
105
|
-
return {attr: serialize_attribute(attr) for attr in attributes}
|
106
|
-
|
107
|
-
@classmethod
|
108
|
-
def from_dict(cls, data) -> "InvigilatorBase":
|
109
|
-
from edsl.agents.Agent import Agent
|
110
|
-
from edsl.questions import QuestionBase
|
111
|
-
from edsl.scenarios.Scenario import Scenario
|
112
|
-
from edsl.surveys.MemoryPlan import MemoryPlan
|
113
|
-
from edsl.language_models.LanguageModel import LanguageModel
|
114
|
-
from edsl.surveys.Survey import Survey
|
115
|
-
from edsl.data.Cache import Cache
|
116
|
-
|
117
|
-
attributes_to_classes = {
|
118
|
-
"agent": Agent,
|
119
|
-
"question": QuestionBase,
|
120
|
-
"scenario": Scenario,
|
121
|
-
"model": LanguageModel,
|
122
|
-
"memory_plan": MemoryPlan,
|
123
|
-
"survey": Survey,
|
124
|
-
"cache": Cache,
|
125
|
-
}
|
126
|
-
d = {}
|
127
|
-
for attr, cls_ in attributes_to_classes.items():
|
128
|
-
if attr in data and data[attr] is not None:
|
129
|
-
if attr not in data:
|
130
|
-
d[attr] = {}
|
131
|
-
else:
|
132
|
-
d[attr] = cls_.from_dict(data[attr])
|
133
|
-
|
134
|
-
d["current_answers"] = data["current_answers"]
|
135
|
-
d["iteration"] = data["iteration"]
|
136
|
-
d["additional_prompt_data"] = data["additional_prompt_data"]
|
137
|
-
|
138
|
-
d = cls(**d)
|
139
|
-
d.raw_model_response = data.get("raw_model_response")
|
140
|
-
return d
|
141
|
-
|
142
|
-
def __repr__(self) -> str:
|
143
|
-
"""Return a string representation of the Invigilator.
|
144
|
-
|
145
|
-
>>> InvigilatorBase.example().__repr__()
|
146
|
-
'InvigilatorExample(...)'
|
147
|
-
|
148
|
-
"""
|
149
|
-
return f"{self.__class__.__name__}(agent={repr(self.agent)}, question={repr(self.question)}, scenario={repr(self.scenario)}, model={repr(self.model)}, memory_plan={repr(self.memory_plan)}, current_answers={repr(self.current_answers)}, iteration={repr(self.iteration)}, additional_prompt_data={repr(self.additional_prompt_data)}, cache={repr(self.cache)})"
|
150
|
-
|
151
|
-
def get_failed_task_result(self, failure_reason: str) -> EDSLResultObjectInput:
|
152
|
-
"""Return an AgentResponseDict used in case the question-asking fails.
|
153
|
-
|
154
|
-
Possible reasons include:
|
155
|
-
- Legimately skipped because of skip logic
|
156
|
-
- Failed to get response from the model
|
157
|
-
|
158
|
-
"""
|
159
|
-
data = {
|
160
|
-
"answer": None,
|
161
|
-
"generated_tokens": None,
|
162
|
-
"comment": failure_reason,
|
163
|
-
"question_name": self.question.question_name,
|
164
|
-
"prompts": self.get_prompts(),
|
165
|
-
"cached_response": None,
|
166
|
-
"raw_model_response": None,
|
167
|
-
"cache_used": None,
|
168
|
-
"cache_key": None,
|
169
|
-
}
|
170
|
-
return EDSLResultObjectInput(**data)
|
171
|
-
|
172
|
-
def get_prompts(self) -> Dict[str, "Prompt"]:
|
173
|
-
"""Return the prompt used."""
|
174
|
-
from edsl.prompts.Prompt import Prompt
|
175
|
-
|
176
|
-
return {
|
177
|
-
"user_prompt": Prompt("NA"),
|
178
|
-
"system_prompt": Prompt("NA"),
|
179
|
-
}
|
180
|
-
|
181
|
-
@abstractmethod
|
182
|
-
async def async_answer_question(self):
|
183
|
-
"""Asnwer a question."""
|
184
|
-
pass
|
185
|
-
|
186
|
-
@jupyter_nb_handler
|
187
|
-
def answer_question(self) -> Coroutine:
|
188
|
-
"""Return a function that gets the answers to the question."""
|
189
|
-
|
190
|
-
async def main():
|
191
|
-
"""Return the answer to the question."""
|
192
|
-
results = await asyncio.gather(self.async_answer_question())
|
193
|
-
return results[0] # Since there's only one task, return its result
|
194
|
-
|
195
|
-
return main()
|
196
|
-
|
197
|
-
@classmethod
|
198
|
-
def example(
|
199
|
-
cls, throw_an_exception=False, question=None, scenario=None, survey=None
|
200
|
-
) -> "InvigilatorBase":
|
201
|
-
"""Return an example invigilator.
|
202
|
-
|
203
|
-
>>> InvigilatorBase.example()
|
204
|
-
InvigilatorExample(...)
|
205
|
-
|
206
|
-
>>> InvigilatorBase.example().answer_question()
|
207
|
-
{'message': [{'text': 'SPAM!'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 1}}
|
208
|
-
|
209
|
-
>>> InvigilatorBase.example(throw_an_exception=True).answer_question()
|
210
|
-
Traceback (most recent call last):
|
211
|
-
...
|
212
|
-
Exception: This is a test error
|
213
|
-
"""
|
214
|
-
from edsl.agents.Agent import Agent
|
215
|
-
from edsl.scenarios.Scenario import Scenario
|
216
|
-
from edsl.surveys.MemoryPlan import MemoryPlan
|
217
|
-
from edsl.language_models.model import Model
|
218
|
-
from edsl.surveys.Survey import Survey
|
219
|
-
|
220
|
-
model = Model("test", canned_response="SPAM!")
|
221
|
-
|
222
|
-
if throw_an_exception:
|
223
|
-
model.throw_exception = True
|
224
|
-
agent = Agent.example()
|
225
|
-
|
226
|
-
if not survey:
|
227
|
-
survey = Survey.example()
|
228
|
-
|
229
|
-
if question not in survey.questions and question is not None:
|
230
|
-
survey.add_question(question)
|
231
|
-
|
232
|
-
question = question or survey.questions[0]
|
233
|
-
scenario = scenario or Scenario.example()
|
234
|
-
memory_plan = MemoryPlan(survey=survey)
|
235
|
-
current_answers = None
|
236
|
-
|
237
|
-
class InvigilatorExample(cls):
|
238
|
-
"""An example invigilator."""
|
239
|
-
|
240
|
-
async def async_answer_question(self):
|
241
|
-
"""Answer a question."""
|
242
|
-
return await self.model.async_execute_model_call(
|
243
|
-
user_prompt="Hello", system_prompt="Hi"
|
244
|
-
)
|
245
|
-
|
246
|
-
return InvigilatorExample(
|
247
|
-
agent=agent,
|
248
|
-
question=question,
|
249
|
-
scenario=scenario,
|
250
|
-
survey=survey,
|
251
|
-
model=model,
|
252
|
-
memory_plan=memory_plan,
|
253
|
-
current_answers=current_answers,
|
254
|
-
)
|
255
|
-
|
256
|
-
|
257
|
-
if __name__ == "__main__":
|
258
|
-
import doctest
|
259
|
-
|
260
|
-
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
edsl/agents/PromptConstructor.py
DELETED
@@ -1,318 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
from typing import Dict, Any, Optional, Set, Union, TYPE_CHECKING, Literal
|
3
|
-
from functools import cached_property
|
4
|
-
import time
|
5
|
-
import logging
|
6
|
-
|
7
|
-
from edsl.prompts.Prompt import Prompt
|
8
|
-
|
9
|
-
from edsl.agents.prompt_helpers import PromptPlan
|
10
|
-
from edsl.agents.QuestionTemplateReplacementsBuilder import (
|
11
|
-
QuestionTemplateReplacementsBuilder,
|
12
|
-
)
|
13
|
-
from edsl.agents.question_option_processor import QuestionOptionProcessor
|
14
|
-
|
15
|
-
if TYPE_CHECKING:
|
16
|
-
from edsl.agents.InvigilatorBase import InvigilatorBase
|
17
|
-
from edsl.questions.QuestionBase import QuestionBase
|
18
|
-
from edsl.agents.Agent import Agent
|
19
|
-
from edsl.surveys.Survey import Survey
|
20
|
-
from edsl.language_models.LanguageModel import LanguageModel
|
21
|
-
from edsl.surveys.MemoryPlan import MemoryPlan
|
22
|
-
from edsl.questions.QuestionBase import QuestionBase
|
23
|
-
from edsl.scenarios.Scenario import Scenario
|
24
|
-
|
25
|
-
logger = logging.getLogger(__name__)
|
26
|
-
|
27
|
-
class BasePlaceholder:
|
28
|
-
"""Base class for placeholder values when a question is not yet answered."""
|
29
|
-
|
30
|
-
def __init__(self, placeholder_type: str = "answer"):
|
31
|
-
self.value = "N/A"
|
32
|
-
self.comment = "Will be populated by prior answer"
|
33
|
-
self._type = placeholder_type
|
34
|
-
|
35
|
-
def __getitem__(self, index):
|
36
|
-
return ""
|
37
|
-
|
38
|
-
def __str__(self):
|
39
|
-
return f"<<{self.__class__.__name__}:{self._type}>>"
|
40
|
-
|
41
|
-
def __repr__(self):
|
42
|
-
return self.__str__()
|
43
|
-
|
44
|
-
|
45
|
-
class PlaceholderAnswer(BasePlaceholder):
|
46
|
-
def __init__(self):
|
47
|
-
super().__init__("answer")
|
48
|
-
|
49
|
-
|
50
|
-
class PlaceholderComment(BasePlaceholder):
|
51
|
-
def __init__(self):
|
52
|
-
super().__init__("comment")
|
53
|
-
|
54
|
-
|
55
|
-
class PlaceholderGeneratedTokens(BasePlaceholder):
|
56
|
-
def __init__(self):
|
57
|
-
super().__init__("generated_tokens")
|
58
|
-
|
59
|
-
|
60
|
-
class PromptConstructor:
|
61
|
-
"""
|
62
|
-
This class constructs the prompts for the language model.
|
63
|
-
|
64
|
-
The pieces of a prompt are:
|
65
|
-
- The agent instructions - "You are answering questions as if you were a human. Do not break character."
|
66
|
-
- The persona prompt - "You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}"
|
67
|
-
- The question instructions - "You are being asked the following question: Do you like school? The options are 0: yes 1: no Return a valid JSON formatted like this, selecting only the number of the option: {"answer": <put answer code here>, "comment": "<put explanation here>"} Only 1 option may be selected."
|
68
|
-
- The memory prompt - "Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer"
|
69
|
-
"""
|
70
|
-
@classmethod
|
71
|
-
def from_invigilator(
|
72
|
-
cls,
|
73
|
-
invigilator: "InvigilatorBase",
|
74
|
-
prompt_plan: Optional["PromptPlan"] = None
|
75
|
-
) -> "PromptConstructor":
|
76
|
-
return cls(
|
77
|
-
agent=invigilator.agent,
|
78
|
-
question=invigilator.question,
|
79
|
-
scenario=invigilator.scenario,
|
80
|
-
survey=invigilator.survey,
|
81
|
-
model=invigilator.model,
|
82
|
-
current_answers=invigilator.current_answers,
|
83
|
-
memory_plan=invigilator.memory_plan,
|
84
|
-
prompt_plan=prompt_plan
|
85
|
-
)
|
86
|
-
|
87
|
-
def __init__(
|
88
|
-
self,
|
89
|
-
agent: "Agent",
|
90
|
-
question: "QuestionBase",
|
91
|
-
scenario: "Scenario",
|
92
|
-
survey: "Survey",
|
93
|
-
model: "LanguageModel",
|
94
|
-
current_answers: dict,
|
95
|
-
memory_plan: "MemoryPlan",
|
96
|
-
prompt_plan: Optional["PromptPlan"] = None
|
97
|
-
):
|
98
|
-
self.agent = agent
|
99
|
-
self.question = question
|
100
|
-
self.scenario = scenario
|
101
|
-
self.survey = survey
|
102
|
-
self.model = model
|
103
|
-
self.current_answers = current_answers
|
104
|
-
self.memory_plan = memory_plan
|
105
|
-
self.prompt_plan = prompt_plan or PromptPlan()
|
106
|
-
|
107
|
-
def get_question_options(self, question_data: dict) -> list[str]:
|
108
|
-
"""Get the question options."""
|
109
|
-
return (QuestionOptionProcessor
|
110
|
-
.from_prompt_constructor(self)
|
111
|
-
.get_question_options(question_data)
|
112
|
-
)
|
113
|
-
|
114
|
-
@cached_property
|
115
|
-
def agent_instructions_prompt(self) -> Prompt:
|
116
|
-
"""
|
117
|
-
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
118
|
-
>>> i = InvigilatorBase.example()
|
119
|
-
>>> i.prompt_constructor.agent_instructions_prompt
|
120
|
-
Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
|
121
|
-
"""
|
122
|
-
from edsl.agents.Agent import Agent
|
123
|
-
|
124
|
-
if self.agent == Agent(): # if agent is empty, then return an empty prompt
|
125
|
-
return Prompt(text="")
|
126
|
-
|
127
|
-
return Prompt(text=self.agent.instruction)
|
128
|
-
|
129
|
-
@cached_property
|
130
|
-
def agent_persona_prompt(self) -> Prompt:
|
131
|
-
"""
|
132
|
-
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
133
|
-
>>> i = InvigilatorBase.example()
|
134
|
-
>>> i.prompt_constructor.agent_persona_prompt
|
135
|
-
Prompt(text=\"""Your traits: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
|
136
|
-
"""
|
137
|
-
from edsl.agents.Agent import Agent
|
138
|
-
|
139
|
-
if self.agent == Agent(): # if agent is empty, then return an empty prompt
|
140
|
-
return Prompt(text="")
|
141
|
-
|
142
|
-
return self.agent.prompt()
|
143
|
-
|
144
|
-
def prior_answers_dict(self) -> dict[str, "QuestionBase"]:
|
145
|
-
"""This is a dictionary of prior answers, if they exist.
|
146
|
-
|
147
|
-
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
148
|
-
>>> i = InvigilatorBase.example()
|
149
|
-
>>> i.prompt_constructor.prior_answers_dict()
|
150
|
-
{'q0': ..., 'q1': ...}
|
151
|
-
"""
|
152
|
-
return self._add_answers(
|
153
|
-
self.survey.question_names_to_questions(), self.current_answers
|
154
|
-
)
|
155
|
-
|
156
|
-
@staticmethod
|
157
|
-
def _extract_question_and_entry_type(key_entry) -> tuple[str, str]:
|
158
|
-
"""
|
159
|
-
Extracts the question name and type for the current answer dictionary key entry.
|
160
|
-
|
161
|
-
>>> PromptConstructor._extract_question_and_entry_type("q0")
|
162
|
-
('q0', 'answer')
|
163
|
-
>>> PromptConstructor._extract_question_and_entry_type("q0_comment")
|
164
|
-
('q0', 'comment')
|
165
|
-
>>> PromptConstructor._extract_question_and_entry_type("q0_alternate_generated_tokens")
|
166
|
-
('q0_alternate', 'generated_tokens')
|
167
|
-
>>> PromptConstructor._extract_question_and_entry_type("q0_alt_comment")
|
168
|
-
('q0_alt', 'comment')
|
169
|
-
"""
|
170
|
-
split_list = key_entry.rsplit("_", maxsplit=1)
|
171
|
-
if len(split_list) == 1:
|
172
|
-
question_name = split_list[0]
|
173
|
-
entry_type = "answer"
|
174
|
-
else:
|
175
|
-
if split_list[1] == "comment":
|
176
|
-
question_name = split_list[0]
|
177
|
-
entry_type = "comment"
|
178
|
-
elif split_list[1] == "tokens": # it's actually 'generated_tokens'
|
179
|
-
question_name = key_entry.replace("_generated_tokens", "")
|
180
|
-
entry_type = "generated_tokens"
|
181
|
-
else:
|
182
|
-
question_name = key_entry
|
183
|
-
entry_type = "answer"
|
184
|
-
return question_name, entry_type
|
185
|
-
|
186
|
-
@staticmethod
|
187
|
-
def _augmented_answers_dict(current_answers: dict) -> dict:
|
188
|
-
"""
|
189
|
-
>>> PromptConstructor._augmented_answers_dict({"q0": "LOVE IT!", "q0_comment": "I love school!"})
|
190
|
-
{'q0': {'answer': 'LOVE IT!', 'comment': 'I love school!'}}
|
191
|
-
"""
|
192
|
-
from collections import defaultdict
|
193
|
-
|
194
|
-
d = defaultdict(dict)
|
195
|
-
for key, value in current_answers.items():
|
196
|
-
question_name, entry_type = (
|
197
|
-
PromptConstructor._extract_question_and_entry_type(key)
|
198
|
-
)
|
199
|
-
d[question_name][entry_type] = value
|
200
|
-
return dict(d)
|
201
|
-
|
202
|
-
@staticmethod
|
203
|
-
def _add_answers(
|
204
|
-
answer_dict: dict, current_answers: dict
|
205
|
-
) -> dict[str, "QuestionBase"]:
|
206
|
-
"""
|
207
|
-
>>> from edsl import QuestionFreeText
|
208
|
-
>>> d = {"q0": QuestionFreeText(question_text="Do you like school?", question_name = "q0")}
|
209
|
-
>>> current_answers = {"q0": "LOVE IT!"}
|
210
|
-
>>> PromptConstructor._add_answers(d, current_answers)['q0'].answer
|
211
|
-
'LOVE IT!'
|
212
|
-
"""
|
213
|
-
augmented_answers = PromptConstructor._augmented_answers_dict(current_answers)
|
214
|
-
|
215
|
-
for question in answer_dict:
|
216
|
-
if question in augmented_answers:
|
217
|
-
for entry_type, value in augmented_answers[question].items():
|
218
|
-
setattr(answer_dict[question], entry_type, value)
|
219
|
-
else:
|
220
|
-
answer_dict[question].answer = PlaceholderAnswer()
|
221
|
-
answer_dict[question].comment = PlaceholderComment()
|
222
|
-
answer_dict[question].generated_tokens = PlaceholderGeneratedTokens()
|
223
|
-
return answer_dict
|
224
|
-
|
225
|
-
@cached_property
|
226
|
-
def question_file_keys(self) -> list:
|
227
|
-
"""Extracts the file keys from the question text.
|
228
|
-
|
229
|
-
It checks if the variables in the question text are in the scenario file keys.
|
230
|
-
"""
|
231
|
-
return QuestionTemplateReplacementsBuilder.from_prompt_constructor(self).question_file_keys()
|
232
|
-
|
233
|
-
@cached_property
|
234
|
-
def question_instructions_prompt(self) -> Prompt:
|
235
|
-
"""
|
236
|
-
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
237
|
-
>>> i = InvigilatorBase.example()
|
238
|
-
>>> i.prompt_constructor.question_instructions_prompt
|
239
|
-
Prompt(text=\"""...
|
240
|
-
...
|
241
|
-
"""
|
242
|
-
return self.build_question_instructions_prompt()
|
243
|
-
|
244
|
-
def build_question_instructions_prompt(self) -> Prompt:
|
245
|
-
"""Buils the question instructions prompt."""
|
246
|
-
from edsl.agents.QuestionInstructionPromptBuilder import (
|
247
|
-
QuestionInstructionPromptBuilder,
|
248
|
-
)
|
249
|
-
|
250
|
-
return QuestionInstructionPromptBuilder.from_prompt_constructor(self).build()
|
251
|
-
|
252
|
-
@cached_property
|
253
|
-
def prior_question_memory_prompt(self) -> Prompt:
|
254
|
-
memory_prompt = Prompt(text="")
|
255
|
-
if self.memory_plan is not None:
|
256
|
-
memory_prompt += self.create_memory_prompt(
|
257
|
-
self.question.question_name
|
258
|
-
).render(self.scenario | self.prior_answers_dict())
|
259
|
-
return memory_prompt
|
260
|
-
|
261
|
-
def create_memory_prompt(self, question_name: str) -> Prompt:
|
262
|
-
"""Create a memory for the agent.
|
263
|
-
|
264
|
-
The returns a memory prompt for the agent.
|
265
|
-
|
266
|
-
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
267
|
-
>>> i = InvigilatorBase.example()
|
268
|
-
>>> i.current_answers = {"q0": "Prior answer"}
|
269
|
-
>>> i.memory_plan.add_single_memory("q1", "q0")
|
270
|
-
>>> p = i.prompt_constructor.create_memory_prompt("q1")
|
271
|
-
>>> p.text.strip().replace("\\n", " ").replace("\\t", " ")
|
272
|
-
'Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer'
|
273
|
-
"""
|
274
|
-
return self.memory_plan.get_memory_prompt_fragment(
|
275
|
-
question_name, self.current_answers
|
276
|
-
)
|
277
|
-
|
278
|
-
def get_prompts(self) -> Dict[str, Any]:
|
279
|
-
"""Get the prompts for the question."""
|
280
|
-
start = time.time()
|
281
|
-
|
282
|
-
# Build all the components
|
283
|
-
agent_instructions = self.agent_instructions_prompt
|
284
|
-
agent_persona = self.agent_persona_prompt
|
285
|
-
question_instructions = self.question_instructions_prompt
|
286
|
-
prior_question_memory = self.prior_question_memory_prompt
|
287
|
-
|
288
|
-
# Get components dict
|
289
|
-
components = {
|
290
|
-
"agent_instructions": agent_instructions.text,
|
291
|
-
"agent_persona": agent_persona.text,
|
292
|
-
"question_instructions": question_instructions.text,
|
293
|
-
"prior_question_memory": prior_question_memory.text,
|
294
|
-
}
|
295
|
-
# Get arranged components first
|
296
|
-
arranged = self.prompt_plan.arrange_components(**components)
|
297
|
-
|
298
|
-
prompts = self.prompt_plan.get_prompts(**components)
|
299
|
-
|
300
|
-
# Handle file keys if present
|
301
|
-
if hasattr(self, 'question_file_keys') and self.question_file_keys:
|
302
|
-
files_list = []
|
303
|
-
for key in self.question_file_keys:
|
304
|
-
files_list.append(self.scenario[key])
|
305
|
-
prompts["files_list"] = files_list
|
306
|
-
|
307
|
-
return prompts
|
308
|
-
|
309
|
-
|
310
|
-
# def _process_prompt(args):
|
311
|
-
# """Helper function to process a single prompt list with its replacements."""
|
312
|
-
# prompt_list, replacements = args
|
313
|
-
# return prompt_list.reduce()
|
314
|
-
|
315
|
-
|
316
|
-
if __name__ == '__main__':
|
317
|
-
import doctest
|
318
|
-
doctest.testmod(optionflags=doctest.ELLIPSIS)
|