edsl 0.1.47__py3-none-any.whl → 0.1.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/__init__.py +44 -39
- edsl/__version__.py +1 -1
- edsl/agents/__init__.py +4 -2
- edsl/agents/{Agent.py → agent.py} +442 -152
- edsl/agents/{AgentList.py → agent_list.py} +220 -162
- edsl/agents/descriptors.py +46 -7
- edsl/{exceptions/agents.py → agents/exceptions.py} +3 -12
- edsl/base/__init__.py +75 -0
- edsl/base/base_class.py +1303 -0
- edsl/base/data_transfer_models.py +114 -0
- edsl/base/enums.py +215 -0
- edsl/base.py +8 -0
- edsl/buckets/__init__.py +25 -0
- edsl/buckets/bucket_collection.py +324 -0
- edsl/buckets/model_buckets.py +206 -0
- edsl/buckets/token_bucket.py +502 -0
- edsl/{jobs/buckets/TokenBucketAPI.py → buckets/token_bucket_api.py} +1 -1
- edsl/buckets/token_bucket_client.py +509 -0
- edsl/caching/__init__.py +20 -0
- edsl/caching/cache.py +814 -0
- edsl/caching/cache_entry.py +427 -0
- edsl/{data/CacheHandler.py → caching/cache_handler.py} +14 -15
- edsl/caching/exceptions.py +24 -0
- edsl/caching/orm.py +30 -0
- edsl/{data/RemoteCacheSync.py → caching/remote_cache_sync.py} +3 -3
- edsl/caching/sql_dict.py +441 -0
- edsl/config/__init__.py +8 -0
- edsl/config/config_class.py +177 -0
- edsl/config.py +4 -176
- edsl/conversation/Conversation.py +7 -7
- edsl/conversation/car_buying.py +4 -4
- edsl/conversation/chips.py +6 -6
- edsl/coop/__init__.py +25 -2
- edsl/coop/coop.py +303 -67
- edsl/coop/{ExpectedParrotKeyHandler.py → ep_key_handling.py} +86 -10
- edsl/coop/exceptions.py +62 -0
- edsl/coop/price_fetcher.py +126 -0
- edsl/coop/utils.py +89 -24
- edsl/data_transfer_models.py +5 -72
- edsl/dataset/__init__.py +10 -0
- edsl/{results/Dataset.py → dataset/dataset.py} +116 -36
- edsl/{results/DatasetExportMixin.py → dataset/dataset_operations_mixin.py} +606 -122
- edsl/{results/DatasetTree.py → dataset/dataset_tree.py} +156 -75
- edsl/{results/TableDisplay.py → dataset/display/table_display.py} +18 -7
- edsl/{results → dataset/display}/table_renderers.py +58 -2
- edsl/{results → dataset}/file_exports.py +4 -5
- edsl/{results → dataset}/smart_objects.py +2 -2
- edsl/enums.py +5 -205
- edsl/inference_services/__init__.py +5 -0
- edsl/inference_services/{AvailableModelCacheHandler.py → available_model_cache_handler.py} +2 -3
- edsl/inference_services/{AvailableModelFetcher.py → available_model_fetcher.py} +8 -14
- edsl/inference_services/data_structures.py +3 -2
- edsl/{exceptions/inference_services.py → inference_services/exceptions.py} +1 -1
- edsl/inference_services/{InferenceServiceABC.py → inference_service_abc.py} +1 -1
- edsl/inference_services/{InferenceServicesCollection.py → inference_services_collection.py} +8 -7
- edsl/inference_services/registry.py +4 -41
- edsl/inference_services/{ServiceAvailability.py → service_availability.py} +5 -25
- edsl/inference_services/services/__init__.py +31 -0
- edsl/inference_services/{AnthropicService.py → services/anthropic_service.py} +3 -3
- edsl/inference_services/{AwsBedrock.py → services/aws_bedrock.py} +2 -2
- edsl/inference_services/{AzureAI.py → services/azure_ai.py} +2 -2
- edsl/inference_services/{DeepInfraService.py → services/deep_infra_service.py} +1 -3
- edsl/inference_services/{DeepSeekService.py → services/deep_seek_service.py} +2 -4
- edsl/inference_services/{GoogleService.py → services/google_service.py} +5 -4
- edsl/inference_services/{GroqService.py → services/groq_service.py} +1 -1
- edsl/inference_services/{MistralAIService.py → services/mistral_ai_service.py} +3 -3
- edsl/inference_services/{OllamaService.py → services/ollama_service.py} +1 -7
- edsl/inference_services/{OpenAIService.py → services/open_ai_service.py} +5 -6
- edsl/inference_services/{PerplexityService.py → services/perplexity_service.py} +3 -7
- edsl/inference_services/{TestService.py → services/test_service.py} +7 -6
- edsl/inference_services/{TogetherAIService.py → services/together_ai_service.py} +2 -6
- edsl/inference_services/{XAIService.py → services/xai_service.py} +1 -1
- edsl/inference_services/write_available.py +1 -2
- edsl/instructions/__init__.py +6 -0
- edsl/{surveys/instructions/Instruction.py → instructions/instruction.py} +11 -6
- edsl/{surveys/instructions/InstructionCollection.py → instructions/instruction_collection.py} +10 -5
- edsl/{surveys/InstructionHandler.py → instructions/instruction_handler.py} +3 -3
- edsl/{jobs/interviews → interviews}/ReportErrors.py +2 -2
- edsl/interviews/__init__.py +4 -0
- edsl/{jobs/AnswerQuestionFunctionConstructor.py → interviews/answering_function.py} +45 -18
- edsl/{jobs/interviews/InterviewExceptionEntry.py → interviews/exception_tracking.py} +107 -22
- edsl/interviews/interview.py +638 -0
- edsl/{jobs/interviews/InterviewStatusDictionary.py → interviews/interview_status_dictionary.py} +21 -12
- edsl/{jobs/interviews/InterviewStatusLog.py → interviews/interview_status_log.py} +16 -7
- edsl/{jobs/InterviewTaskManager.py → interviews/interview_task_manager.py} +12 -7
- edsl/{jobs/RequestTokenEstimator.py → interviews/request_token_estimator.py} +8 -3
- edsl/{jobs/interviews/InterviewStatistic.py → interviews/statistics.py} +36 -10
- edsl/invigilators/__init__.py +38 -0
- edsl/invigilators/invigilator_base.py +477 -0
- edsl/{agents/Invigilator.py → invigilators/invigilators.py} +263 -10
- edsl/invigilators/prompt_constructor.py +476 -0
- edsl/{agents → invigilators}/prompt_helpers.py +2 -1
- edsl/{agents/QuestionInstructionPromptBuilder.py → invigilators/question_instructions_prompt_builder.py} +18 -13
- edsl/{agents → invigilators}/question_option_processor.py +96 -21
- edsl/{agents/QuestionTemplateReplacementsBuilder.py → invigilators/question_template_replacements_builder.py} +64 -12
- edsl/jobs/__init__.py +7 -1
- edsl/jobs/async_interview_runner.py +99 -35
- edsl/jobs/check_survey_scenario_compatibility.py +7 -5
- edsl/jobs/data_structures.py +153 -22
- edsl/{exceptions/jobs.py → jobs/exceptions.py} +2 -1
- edsl/jobs/{FetchInvigilator.py → fetch_invigilator.py} +4 -4
- edsl/jobs/{loggers/HTMLTableJobLogger.py → html_table_job_logger.py} +6 -2
- edsl/jobs/{Jobs.py → jobs.py} +313 -167
- edsl/jobs/{JobsChecks.py → jobs_checks.py} +15 -7
- edsl/jobs/{JobsComponentConstructor.py → jobs_component_constructor.py} +19 -17
- edsl/jobs/{InterviewsConstructor.py → jobs_interview_constructor.py} +10 -5
- edsl/jobs/jobs_pricing_estimation.py +347 -0
- edsl/jobs/{JobsRemoteInferenceLogger.py → jobs_remote_inference_logger.py} +4 -3
- edsl/jobs/jobs_runner_asyncio.py +282 -0
- edsl/jobs/{JobsRemoteInferenceHandler.py → remote_inference.py} +19 -22
- edsl/jobs/results_exceptions_handler.py +2 -2
- edsl/key_management/__init__.py +28 -0
- edsl/key_management/key_lookup.py +161 -0
- edsl/{language_models/key_management/KeyLookupBuilder.py → key_management/key_lookup_builder.py} +118 -47
- edsl/key_management/key_lookup_collection.py +82 -0
- edsl/key_management/models.py +218 -0
- edsl/language_models/__init__.py +7 -2
- edsl/language_models/{ComputeCost.py → compute_cost.py} +18 -3
- edsl/{exceptions/language_models.py → language_models/exceptions.py} +2 -1
- edsl/language_models/language_model.py +1080 -0
- edsl/language_models/model.py +10 -25
- edsl/language_models/{ModelList.py → model_list.py} +9 -14
- edsl/language_models/{RawResponseHandler.py → raw_response_handler.py} +1 -1
- edsl/language_models/{RegisterLanguageModelsMeta.py → registry.py} +1 -1
- edsl/language_models/repair.py +4 -4
- edsl/language_models/utilities.py +4 -4
- edsl/notebooks/__init__.py +3 -1
- edsl/notebooks/{Notebook.py → notebook.py} +7 -8
- edsl/prompts/__init__.py +1 -1
- edsl/{exceptions/prompts.py → prompts/exceptions.py} +3 -1
- edsl/prompts/{Prompt.py → prompt.py} +101 -95
- edsl/questions/HTMLQuestion.py +1 -1
- edsl/questions/__init__.py +154 -25
- edsl/questions/answer_validator_mixin.py +1 -1
- edsl/questions/compose_questions.py +4 -3
- edsl/questions/derived/question_likert_five.py +166 -0
- edsl/questions/derived/{QuestionLinearScale.py → question_linear_scale.py} +4 -4
- edsl/questions/derived/{QuestionTopK.py → question_top_k.py} +4 -4
- edsl/questions/derived/{QuestionYesNo.py → question_yes_no.py} +4 -5
- edsl/questions/descriptors.py +24 -30
- edsl/questions/loop_processor.py +65 -19
- edsl/questions/question_base.py +881 -0
- edsl/questions/question_base_gen_mixin.py +15 -16
- edsl/questions/{QuestionBasePromptsMixin.py → question_base_prompts_mixin.py} +2 -2
- edsl/questions/{QuestionBudget.py → question_budget.py} +3 -4
- edsl/questions/{QuestionCheckBox.py → question_check_box.py} +16 -16
- edsl/questions/{QuestionDict.py → question_dict.py} +39 -5
- edsl/questions/{QuestionExtract.py → question_extract.py} +9 -9
- edsl/questions/question_free_text.py +282 -0
- edsl/questions/{QuestionFunctional.py → question_functional.py} +6 -5
- edsl/questions/{QuestionList.py → question_list.py} +6 -7
- edsl/questions/{QuestionMatrix.py → question_matrix.py} +6 -5
- edsl/questions/{QuestionMultipleChoice.py → question_multiple_choice.py} +126 -21
- edsl/questions/{QuestionNumerical.py → question_numerical.py} +5 -5
- edsl/questions/{QuestionRank.py → question_rank.py} +6 -6
- edsl/questions/question_registry.py +4 -9
- edsl/questions/register_questions_meta.py +8 -4
- edsl/questions/response_validator_abc.py +17 -16
- edsl/results/__init__.py +4 -1
- edsl/{exceptions/results.py → results/exceptions.py} +1 -1
- edsl/results/report.py +197 -0
- edsl/results/{Result.py → result.py} +131 -45
- edsl/results/{Results.py → results.py} +365 -220
- edsl/results/results_selector.py +344 -25
- edsl/scenarios/__init__.py +30 -3
- edsl/scenarios/{ConstructDownloadLink.py → construct_download_link.py} +7 -0
- edsl/scenarios/directory_scanner.py +156 -13
- edsl/scenarios/document_chunker.py +186 -0
- edsl/scenarios/exceptions.py +101 -0
- edsl/scenarios/file_methods.py +2 -3
- edsl/scenarios/{FileStore.py → file_store.py} +275 -189
- edsl/scenarios/handlers/__init__.py +14 -14
- edsl/scenarios/handlers/{csv.py → csv_file_store.py} +1 -2
- edsl/scenarios/handlers/{docx.py → docx_file_store.py} +8 -7
- edsl/scenarios/handlers/{html.py → html_file_store.py} +1 -2
- edsl/scenarios/handlers/{jpeg.py → jpeg_file_store.py} +1 -1
- edsl/scenarios/handlers/{json.py → json_file_store.py} +1 -1
- edsl/scenarios/handlers/latex_file_store.py +5 -0
- edsl/scenarios/handlers/{md.py → md_file_store.py} +1 -1
- edsl/scenarios/handlers/{pdf.py → pdf_file_store.py} +2 -2
- edsl/scenarios/handlers/{png.py → png_file_store.py} +1 -1
- edsl/scenarios/handlers/{pptx.py → pptx_file_store.py} +8 -7
- edsl/scenarios/handlers/{py.py → py_file_store.py} +1 -3
- edsl/scenarios/handlers/{sql.py → sql_file_store.py} +2 -1
- edsl/scenarios/handlers/{sqlite.py → sqlite_file_store.py} +2 -3
- edsl/scenarios/handlers/{txt.py → txt_file_store.py} +1 -1
- edsl/scenarios/scenario.py +928 -0
- edsl/scenarios/scenario_join.py +18 -5
- edsl/scenarios/{ScenarioList.py → scenario_list.py} +294 -106
- edsl/scenarios/{ScenarioListPdfMixin.py → scenario_list_pdf_tools.py} +16 -15
- edsl/scenarios/scenario_selector.py +5 -1
- edsl/study/ObjectEntry.py +2 -2
- edsl/study/SnapShot.py +5 -5
- edsl/study/Study.py +18 -19
- edsl/study/__init__.py +6 -4
- edsl/surveys/__init__.py +7 -4
- edsl/surveys/dag/__init__.py +2 -0
- edsl/surveys/{ConstructDAG.py → dag/construct_dag.py} +3 -3
- edsl/surveys/{DAG.py → dag/dag.py} +13 -10
- edsl/surveys/descriptors.py +1 -1
- edsl/surveys/{EditSurvey.py → edit_survey.py} +9 -9
- edsl/{exceptions/surveys.py → surveys/exceptions.py} +1 -2
- edsl/surveys/memory/__init__.py +3 -0
- edsl/surveys/{MemoryPlan.py → memory/memory_plan.py} +10 -9
- edsl/surveys/rules/__init__.py +3 -0
- edsl/surveys/{Rule.py → rules/rule.py} +103 -43
- edsl/surveys/{RuleCollection.py → rules/rule_collection.py} +21 -30
- edsl/surveys/{RuleManager.py → rules/rule_manager.py} +19 -13
- edsl/surveys/survey.py +1743 -0
- edsl/surveys/{SurveyExportMixin.py → survey_export.py} +22 -27
- edsl/surveys/{SurveyFlowVisualization.py → survey_flow_visualization.py} +11 -2
- edsl/surveys/{Simulator.py → survey_simulator.py} +10 -3
- edsl/tasks/__init__.py +32 -0
- edsl/{jobs/tasks/QuestionTaskCreator.py → tasks/question_task_creator.py} +115 -57
- edsl/tasks/task_creators.py +135 -0
- edsl/{jobs/tasks/TaskHistory.py → tasks/task_history.py} +86 -47
- edsl/{jobs/tasks → tasks}/task_status_enum.py +91 -7
- edsl/tasks/task_status_log.py +85 -0
- edsl/tokens/__init__.py +2 -0
- edsl/tokens/interview_token_usage.py +53 -0
- edsl/utilities/PrettyList.py +1 -1
- edsl/utilities/SystemInfo.py +25 -22
- edsl/utilities/__init__.py +29 -21
- edsl/utilities/gcp_bucket/__init__.py +2 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +99 -96
- edsl/utilities/interface.py +44 -536
- edsl/{results/MarkdownToPDF.py → utilities/markdown_to_pdf.py} +13 -5
- edsl/utilities/repair_functions.py +1 -1
- {edsl-0.1.47.dist-info → edsl-0.1.48.dist-info}/METADATA +1 -1
- edsl-0.1.48.dist-info/RECORD +347 -0
- edsl/Base.py +0 -493
- edsl/BaseDiff.py +0 -260
- edsl/agents/InvigilatorBase.py +0 -260
- edsl/agents/PromptConstructor.py +0 -318
- edsl/coop/PriceFetcher.py +0 -54
- edsl/data/Cache.py +0 -582
- edsl/data/CacheEntry.py +0 -238
- edsl/data/SQLiteDict.py +0 -292
- edsl/data/__init__.py +0 -5
- edsl/data/orm.py +0 -10
- edsl/exceptions/cache.py +0 -5
- edsl/exceptions/coop.py +0 -14
- edsl/exceptions/data.py +0 -14
- edsl/exceptions/scenarios.py +0 -29
- edsl/jobs/Answers.py +0 -43
- edsl/jobs/JobsPrompts.py +0 -354
- edsl/jobs/buckets/BucketCollection.py +0 -134
- edsl/jobs/buckets/ModelBuckets.py +0 -65
- edsl/jobs/buckets/TokenBucket.py +0 -283
- edsl/jobs/buckets/TokenBucketClient.py +0 -191
- edsl/jobs/interviews/Interview.py +0 -395
- edsl/jobs/interviews/InterviewExceptionCollection.py +0 -99
- edsl/jobs/interviews/InterviewStatisticsCollection.py +0 -25
- edsl/jobs/runners/JobsRunnerAsyncio.py +0 -163
- edsl/jobs/runners/JobsRunnerStatusData.py +0 -0
- edsl/jobs/tasks/TaskCreators.py +0 -64
- edsl/jobs/tasks/TaskStatusLog.py +0 -23
- edsl/jobs/tokens/InterviewTokenUsage.py +0 -27
- edsl/language_models/LanguageModel.py +0 -635
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/key_management/KeyLookup.py +0 -63
- edsl/language_models/key_management/KeyLookupCollection.py +0 -38
- edsl/language_models/key_management/models.py +0 -137
- edsl/questions/QuestionBase.py +0 -544
- edsl/questions/QuestionFreeText.py +0 -130
- edsl/questions/derived/QuestionLikertFive.py +0 -76
- edsl/results/ResultsExportMixin.py +0 -45
- edsl/results/TextEditor.py +0 -50
- edsl/results/results_fetch_mixin.py +0 -33
- edsl/results/results_tools_mixin.py +0 -98
- edsl/scenarios/DocumentChunker.py +0 -104
- edsl/scenarios/Scenario.py +0 -548
- edsl/scenarios/ScenarioHtmlMixin.py +0 -65
- edsl/scenarios/ScenarioListExportMixin.py +0 -45
- edsl/scenarios/handlers/latex.py +0 -5
- edsl/shared.py +0 -1
- edsl/surveys/Survey.py +0 -1301
- edsl/surveys/SurveyQualtricsImport.py +0 -284
- edsl/surveys/SurveyToApp.py +0 -141
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/tools/__init__.py +0 -1
- edsl/tools/clusters.py +0 -192
- edsl/tools/embeddings.py +0 -27
- edsl/tools/embeddings_plotting.py +0 -118
- edsl/tools/plotting.py +0 -112
- edsl/tools/summarize.py +0 -18
- edsl/utilities/data/Registry.py +0 -6
- edsl/utilities/data/__init__.py +0 -1
- edsl/utilities/data/scooter_results.json +0 -1
- edsl-0.1.47.dist-info/RECORD +0 -354
- /edsl/coop/{CoopFunctionsMixin.py → coop_functions.py} +0 -0
- /edsl/{results → dataset/display}/CSSParameterizer.py +0 -0
- /edsl/{language_models/key_management → dataset/display}/__init__.py +0 -0
- /edsl/{results → dataset/display}/table_data_class.py +0 -0
- /edsl/{results → dataset/display}/table_display.css +0 -0
- /edsl/{results/ResultsGGMixin.py → dataset/r/ggplot.py} +0 -0
- /edsl/{results → dataset}/tree_explore.py +0 -0
- /edsl/{surveys/instructions/ChangeInstruction.py → instructions/change_instruction.py} +0 -0
- /edsl/{jobs/interviews → interviews}/interview_status_enum.py +0 -0
- /edsl/jobs/{runners/JobsRunnerStatus.py → jobs_runner_status.py} +0 -0
- /edsl/language_models/{PriceManager.py → price_manager.py} +0 -0
- /edsl/language_models/{fake_openai_call.py → unused/fake_openai_call.py} +0 -0
- /edsl/language_models/{fake_openai_service.py → unused/fake_openai_service.py} +0 -0
- /edsl/notebooks/{NotebookToLaTeX.py → notebook_to_latex.py} +0 -0
- /edsl/{exceptions/questions.py → questions/exceptions.py} +0 -0
- /edsl/questions/{SimpleAskMixin.py → simple_ask_mixin.py} +0 -0
- /edsl/surveys/{Memory.py → memory/memory.py} +0 -0
- /edsl/surveys/{MemoryManagement.py → memory/memory_management.py} +0 -0
- /edsl/surveys/{SurveyCSS.py → survey_css.py} +0 -0
- /edsl/{jobs/tokens/TokenUsage.py → tokens/token_usage.py} +0 -0
- /edsl/{results/MarkdownToDocx.py → utilities/markdown_to_docx.py} +0 -0
- /edsl/{TemplateLoader.py → utilities/template_loader.py} +0 -0
- {edsl-0.1.47.dist-info → edsl-0.1.48.dist-info}/LICENSE +0 -0
- {edsl-0.1.47.dist-info → edsl-0.1.48.dist-info}/WHEEL +0 -0
@@ -0,0 +1,638 @@
|
|
1
|
+
"""Interview implementation for asynchronously running agents through surveys.
|
2
|
+
|
3
|
+
This module provides the Interview class, which manages the process of an agent answering
|
4
|
+
a survey with a specific language model and scenario. It handles the complete workflow including:
|
5
|
+
|
6
|
+
1. Determining which questions to ask based on survey skip logic
|
7
|
+
2. Managing memory to control what previous answers are available for each question
|
8
|
+
3. Tracking token usage and ensuring rate limits are respected
|
9
|
+
4. Handling exceptions and retry logic
|
10
|
+
5. Managing the asynchronous execution of question answering tasks
|
11
|
+
|
12
|
+
The Interview class serves as the execution layer between high-level Jobs objects and
|
13
|
+
the individual API calls to language models, with support for caching and distributed execution.
|
14
|
+
"""
|
15
|
+
|
16
|
+
from __future__ import annotations
|
17
|
+
import asyncio
|
18
|
+
import copy
|
19
|
+
from dataclasses import dataclass
|
20
|
+
|
21
|
+
from typing import Any, Type, List, Generator, Optional, TYPE_CHECKING
|
22
|
+
|
23
|
+
# from jobs module
|
24
|
+
from ..jobs.data_structures import Answers
|
25
|
+
from ..buckets import ModelBuckets
|
26
|
+
from ..jobs.fetch_invigilator import FetchInvigilator
|
27
|
+
from ..utilities.utilities import dict_hash
|
28
|
+
from ..surveys import Survey
|
29
|
+
|
30
|
+
# from interviews module
|
31
|
+
from .answering_function import AnswerQuestionFunctionConstructor
|
32
|
+
from .interview_task_manager import InterviewTaskManager
|
33
|
+
from .request_token_estimator import RequestTokenEstimator
|
34
|
+
from .interview_status_dictionary import InterviewStatusDictionary
|
35
|
+
from .exception_tracking import InterviewExceptionCollection, InterviewExceptionEntry
|
36
|
+
|
37
|
+
|
38
|
+
if TYPE_CHECKING:
|
39
|
+
from ..agents import Agent
|
40
|
+
from ..surveys import Survey
|
41
|
+
from ..scenarios import Scenario
|
42
|
+
from ..caching import Cache
|
43
|
+
from ..language_models import LanguageModel
|
44
|
+
from ..tokens import InterviewTokenUsage
|
45
|
+
from ..invigilators import InvigilatorBase
|
46
|
+
from ..key_management import KeyLookup
|
47
|
+
|
48
|
+
|
49
|
+
@dataclass
|
50
|
+
class InterviewRunningConfig:
|
51
|
+
"""Configuration parameters for interview execution.
|
52
|
+
|
53
|
+
This dataclass contains settings that control how an interview is conducted,
|
54
|
+
including error handling, caching behavior, and validation options.
|
55
|
+
|
56
|
+
Attributes:
|
57
|
+
cache: Optional cache for storing and retrieving model responses
|
58
|
+
skip_retry: Whether to skip retrying failed questions (default: False)
|
59
|
+
raise_validation_errors: Whether to raise exceptions for validation errors (default: True)
|
60
|
+
stop_on_exception: Whether to stop the entire interview when an exception occurs (default: False)
|
61
|
+
"""
|
62
|
+
|
63
|
+
cache: Optional["Cache"] = (None,)
|
64
|
+
skip_retry: bool = (False,)
|
65
|
+
raise_validation_errors: bool = (True,)
|
66
|
+
stop_on_exception: bool = (False,)
|
67
|
+
|
68
|
+
|
69
|
+
class Interview:
|
70
|
+
"""Manages the process of an agent answering a survey asynchronously.
|
71
|
+
|
72
|
+
An Interview represents a single execution unit - one agent answering one survey with one
|
73
|
+
language model and one scenario. It handles the complete workflow of navigating through
|
74
|
+
the survey based on skip logic, creating tasks for each question, tracking execution status,
|
75
|
+
and collecting results.
|
76
|
+
|
77
|
+
The core functionality is implemented in the `async_conduct_interview` method, which
|
78
|
+
orchestrates the asynchronous execution of all question-answering tasks while respecting
|
79
|
+
dependencies and rate limits. The class maintains detailed state about the interview progress,
|
80
|
+
including answers collected so far, task statuses, token usage, and any exceptions encountered.
|
81
|
+
|
82
|
+
Key components:
|
83
|
+
- Task management: Creating and scheduling tasks for each question
|
84
|
+
- Memory management: Controlling what previous answers are visible for each question
|
85
|
+
- Exception handling: Tracking and potentially retrying failed questions
|
86
|
+
- Status tracking: Monitoring the state of each task and the overall interview
|
87
|
+
- Token tracking: Measuring and limiting API token usage
|
88
|
+
|
89
|
+
This class serves as the execution layer that translates a high-level survey definition
|
90
|
+
into concrete API calls to language models, with support for caching and fault tolerance.
|
91
|
+
"""
|
92
|
+
|
93
|
+
def __init__(
|
94
|
+
self,
|
95
|
+
agent: Agent,
|
96
|
+
survey: Survey,
|
97
|
+
scenario: Scenario,
|
98
|
+
model: Type["LanguageModel"],
|
99
|
+
iteration: int = 0,
|
100
|
+
indices: dict = None,
|
101
|
+
cache: Optional["Cache"] = None,
|
102
|
+
skip_retry: bool = False,
|
103
|
+
raise_validation_errors: bool = True,
|
104
|
+
):
|
105
|
+
"""Initialize a new Interview instance.
|
106
|
+
|
107
|
+
Args:
|
108
|
+
agent: The agent that will answer the survey questions
|
109
|
+
survey: The survey containing questions to be answered
|
110
|
+
scenario: The scenario providing context for the questions
|
111
|
+
model: The language model used to generate agent responses
|
112
|
+
iteration: The iteration number of this interview (for batch processing)
|
113
|
+
indices: Optional dictionary mapping question names to custom indices
|
114
|
+
cache: Optional cache for storing and retrieving model responses
|
115
|
+
skip_retry: Whether to skip retrying failed questions
|
116
|
+
raise_validation_errors: Whether to raise exceptions for validation errors
|
117
|
+
|
118
|
+
The initialization process sets up the interview state including:
|
119
|
+
1. Creating the task manager for handling question execution
|
120
|
+
2. Initializing empty containers for answers and exceptions
|
121
|
+
3. Setting up configuration and tracking structures
|
122
|
+
4. Computing question indices for quick lookups
|
123
|
+
|
124
|
+
Examples:
|
125
|
+
>>> i = Interview.example()
|
126
|
+
>>> i.task_manager.task_creators
|
127
|
+
{}
|
128
|
+
|
129
|
+
>>> i.exceptions
|
130
|
+
{}
|
131
|
+
|
132
|
+
>>> _ = asyncio.run(i.async_conduct_interview())
|
133
|
+
>>> i.task_status_logs['q0']
|
134
|
+
[{'log_time': ..., 'value': <TaskStatus.NOT_STARTED: 1>}, {'log_time': ..., 'value': <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>}, {'log_time': ..., 'value': <TaskStatus.API_CALL_IN_PROGRESS: 7>}, {'log_time': ..., 'value': <TaskStatus.SUCCESS: 8>}]
|
135
|
+
|
136
|
+
>>> i.to_index
|
137
|
+
{'q0': 0, 'q1': 1, 'q2': 2}
|
138
|
+
"""
|
139
|
+
self.agent = agent
|
140
|
+
self.survey = copy.deepcopy(survey) # why do we need to deepcopy the survey?
|
141
|
+
self.scenario = scenario
|
142
|
+
self.model = model
|
143
|
+
self.iteration = iteration
|
144
|
+
|
145
|
+
self.answers = Answers() # will get filled in as interview progresses
|
146
|
+
|
147
|
+
self.task_manager = InterviewTaskManager(
|
148
|
+
survey=self.survey,
|
149
|
+
iteration=iteration,
|
150
|
+
)
|
151
|
+
|
152
|
+
self.exceptions = InterviewExceptionCollection()
|
153
|
+
|
154
|
+
self.running_config = InterviewRunningConfig(
|
155
|
+
cache=cache,
|
156
|
+
skip_retry=skip_retry,
|
157
|
+
raise_validation_errors=raise_validation_errors,
|
158
|
+
)
|
159
|
+
|
160
|
+
# dictionary mapping question names to their index in the survey.
|
161
|
+
self.to_index = {
|
162
|
+
question_name: index
|
163
|
+
for index, question_name in enumerate(self.survey.question_names)
|
164
|
+
}
|
165
|
+
|
166
|
+
self.failed_questions = []
|
167
|
+
|
168
|
+
self.indices = indices
|
169
|
+
self.initial_hash = hash(self)
|
170
|
+
|
171
|
+
@property
|
172
|
+
def cache(self) -> "Cache":
|
173
|
+
"""Get the cache used for storing and retrieving model responses.
|
174
|
+
|
175
|
+
Returns:
|
176
|
+
Cache: The cache object associated with this interview
|
177
|
+
"""
|
178
|
+
return self.running_config.cache
|
179
|
+
|
180
|
+
@cache.setter
|
181
|
+
def cache(self, value: "Cache") -> None:
|
182
|
+
"""Set the cache used for storing and retrieving model responses.
|
183
|
+
|
184
|
+
Args:
|
185
|
+
value: The cache object to use
|
186
|
+
"""
|
187
|
+
self.running_config.cache = value
|
188
|
+
|
189
|
+
@property
|
190
|
+
def skip_retry(self) -> bool:
|
191
|
+
"""Get whether the interview should skip retrying failed questions.
|
192
|
+
|
193
|
+
Returns:
|
194
|
+
bool: True if failed questions should not be retried
|
195
|
+
"""
|
196
|
+
return self.running_config.skip_retry
|
197
|
+
|
198
|
+
@property
|
199
|
+
def raise_validation_errors(self) -> bool:
|
200
|
+
"""Get whether validation errors should raise exceptions.
|
201
|
+
|
202
|
+
Returns:
|
203
|
+
bool: True if validation errors should raise exceptions
|
204
|
+
"""
|
205
|
+
return self.running_config.raise_validation_errors
|
206
|
+
|
207
|
+
@property
|
208
|
+
def has_exceptions(self) -> bool:
|
209
|
+
"""Check if any exceptions have occurred during the interview.
|
210
|
+
|
211
|
+
Returns:
|
212
|
+
bool: True if any exceptions have been recorded
|
213
|
+
"""
|
214
|
+
return len(self.exceptions) > 0
|
215
|
+
|
216
|
+
@property
|
217
|
+
def task_status_logs(self) -> 'InterviewStatusLog':
|
218
|
+
"""Get the complete status history for all tasks in the interview.
|
219
|
+
|
220
|
+
This property provides access to the status logs for all questions,
|
221
|
+
showing how each task progressed through various states during execution.
|
222
|
+
|
223
|
+
Returns:
|
224
|
+
InterviewStatusLog: Dictionary mapping question names to their status log histories
|
225
|
+
"""
|
226
|
+
return self.task_manager.task_status_logs
|
227
|
+
|
228
|
+
@property
|
229
|
+
def token_usage(self) -> "InterviewTokenUsage":
|
230
|
+
"""Get the token usage statistics for the entire interview.
|
231
|
+
|
232
|
+
This tracks how many tokens were used for prompts and completions
|
233
|
+
across all questions in the interview.
|
234
|
+
|
235
|
+
Returns:
|
236
|
+
InterviewTokenUsage: Token usage statistics for the interview
|
237
|
+
"""
|
238
|
+
return self.task_manager.token_usage
|
239
|
+
|
240
|
+
@property
|
241
|
+
def interview_status(self) -> InterviewStatusDictionary:
|
242
|
+
"""Get the current status summary for all tasks in the interview.
|
243
|
+
|
244
|
+
This provides a count of tasks in each status category (not started,
|
245
|
+
in progress, completed, failed, etc.).
|
246
|
+
|
247
|
+
Returns:
|
248
|
+
InterviewStatusDictionary: Dictionary mapping status codes to counts
|
249
|
+
"""
|
250
|
+
return self.task_manager.interview_status
|
251
|
+
|
252
|
+
def to_dict(self, include_exceptions=True, add_edsl_version=True) -> dict[str, Any]:
|
253
|
+
"""Serialize the interview to a dictionary representation.
|
254
|
+
|
255
|
+
This method creates a dictionary containing all the essential components
|
256
|
+
of the interview, which can be used for hashing, serialization, and
|
257
|
+
creating duplicate interviews.
|
258
|
+
|
259
|
+
Args:
|
260
|
+
include_exceptions: Whether to include exception information (default: True)
|
261
|
+
add_edsl_version: Whether to include EDSL version in component dicts (default: True)
|
262
|
+
|
263
|
+
Returns:
|
264
|
+
dict: Dictionary representation of the interview
|
265
|
+
|
266
|
+
Examples:
|
267
|
+
>>> i = Interview.example()
|
268
|
+
>>> hash(i)
|
269
|
+
1670837906923478736
|
270
|
+
"""
|
271
|
+
# Create the base dictionary with core components
|
272
|
+
d = {
|
273
|
+
"agent": self.agent.to_dict(add_edsl_version=add_edsl_version),
|
274
|
+
"survey": self.survey.to_dict(add_edsl_version=add_edsl_version),
|
275
|
+
"scenario": self.scenario.to_dict(add_edsl_version=add_edsl_version),
|
276
|
+
"model": self.model.to_dict(add_edsl_version=add_edsl_version),
|
277
|
+
"iteration": self.iteration,
|
278
|
+
"exceptions": {},
|
279
|
+
}
|
280
|
+
|
281
|
+
# Optionally include exceptions
|
282
|
+
if include_exceptions:
|
283
|
+
d["exceptions"] = self.exceptions.to_dict()
|
284
|
+
|
285
|
+
# Include custom indices if present
|
286
|
+
if hasattr(self, "indices"):
|
287
|
+
d["indices"] = self.indices
|
288
|
+
|
289
|
+
return d
|
290
|
+
|
291
|
+
@classmethod
|
292
|
+
def from_dict(cls, d: dict[str, Any]) -> "Interview":
|
293
|
+
"""Create an Interview instance from a dictionary representation.
|
294
|
+
|
295
|
+
This class method deserializes an interview from a dictionary created by
|
296
|
+
the to_dict method, recreating all components including agent, survey,
|
297
|
+
scenario, model, and any exceptions.
|
298
|
+
|
299
|
+
Args:
|
300
|
+
d: Dictionary representation of an interview
|
301
|
+
|
302
|
+
Returns:
|
303
|
+
Interview: A reconstructed Interview instance
|
304
|
+
"""
|
305
|
+
# Import necessary classes
|
306
|
+
from ..agents import Agent
|
307
|
+
from ..surveys import Survey
|
308
|
+
from ..scenarios import Scenario
|
309
|
+
from ..language_models import LanguageModel
|
310
|
+
|
311
|
+
# Deserialize each component
|
312
|
+
agent = Agent.from_dict(d["agent"])
|
313
|
+
survey = Survey.from_dict(d["survey"])
|
314
|
+
scenario = Scenario.from_dict(d["scenario"])
|
315
|
+
model = LanguageModel.from_dict(d["model"])
|
316
|
+
iteration = d["iteration"]
|
317
|
+
|
318
|
+
# Prepare constructor parameters
|
319
|
+
params = {
|
320
|
+
"agent": agent,
|
321
|
+
"survey": survey,
|
322
|
+
"scenario": scenario,
|
323
|
+
"model": model,
|
324
|
+
"iteration": iteration,
|
325
|
+
}
|
326
|
+
|
327
|
+
# Add optional indices if present
|
328
|
+
if "indices" in d:
|
329
|
+
params["indices"] = d["indices"]
|
330
|
+
|
331
|
+
# Create the interview instance
|
332
|
+
interview = cls(**params)
|
333
|
+
|
334
|
+
# Restore exceptions if present
|
335
|
+
if "exceptions" in d:
|
336
|
+
exceptions = InterviewExceptionCollection.from_dict(d["exceptions"])
|
337
|
+
interview.exceptions = exceptions
|
338
|
+
|
339
|
+
return interview
|
340
|
+
|
341
|
+
def __hash__(self) -> int:
|
342
|
+
"""Generate a hash value for the interview.
|
343
|
+
|
344
|
+
This hash is based on the essential components of the interview
|
345
|
+
(agent, survey, scenario, model, and iteration) but excludes mutable
|
346
|
+
state like exceptions to ensure consistent hashing.
|
347
|
+
|
348
|
+
Returns:
|
349
|
+
int: A hash value that uniquely identifies this interview configuration
|
350
|
+
"""
|
351
|
+
return dict_hash(self.to_dict(include_exceptions=False, add_edsl_version=False))
|
352
|
+
|
353
|
+
def __eq__(self, other: "Interview") -> bool:
|
354
|
+
"""Check if two interviews are equivalent.
|
355
|
+
|
356
|
+
Two interviews are considered equal if they have the same agent, survey,
|
357
|
+
scenario, model, and iteration number.
|
358
|
+
|
359
|
+
Args:
|
360
|
+
other: Another interview to compare with
|
361
|
+
|
362
|
+
Returns:
|
363
|
+
bool: True if the interviews are equivalent, False otherwise
|
364
|
+
|
365
|
+
Examples:
|
366
|
+
>>> from edsl.interviews import Interview
|
367
|
+
>>> i = Interview.example()
|
368
|
+
>>> d = i.to_dict()
|
369
|
+
>>> i2 = Interview.from_dict(d)
|
370
|
+
>>> i == i2
|
371
|
+
True
|
372
|
+
"""
|
373
|
+
return hash(self) == hash(other)
|
374
|
+
|
375
|
+
async def async_conduct_interview(
|
376
|
+
self,
|
377
|
+
run_config: Optional["RunConfig"] = None,
|
378
|
+
) -> tuple["Answers", List[dict[str, Any]]]:
|
379
|
+
"""Execute the interview process asynchronously.
|
380
|
+
|
381
|
+
This is the core method that conducts the entire interview, creating tasks
|
382
|
+
for each question, managing dependencies between them, handling rate limits,
|
383
|
+
and collecting results. It orchestrates the asynchronous execution of all
|
384
|
+
question-answering tasks in the correct order based on survey rules.
|
385
|
+
|
386
|
+
Args:
|
387
|
+
run_config: Optional configuration for the interview execution,
|
388
|
+
including parameters like stop_on_exception and environment
|
389
|
+
settings like bucket_collection and key_lookup
|
390
|
+
|
391
|
+
Returns:
|
392
|
+
tuple: A tuple containing:
|
393
|
+
- Answers: Dictionary of all question answers
|
394
|
+
- List[dict]: List of valid results with detailed information
|
395
|
+
|
396
|
+
Examples:
|
397
|
+
Basic usage:
|
398
|
+
|
399
|
+
>>> i = Interview.example()
|
400
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
401
|
+
>>> result['q0']
|
402
|
+
'yes'
|
403
|
+
|
404
|
+
Handling exceptions:
|
405
|
+
|
406
|
+
>>> i = Interview.example(throw_exception=True)
|
407
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
408
|
+
>>> i.exceptions
|
409
|
+
{'q0': ...
|
410
|
+
|
411
|
+
Using custom configuration:
|
412
|
+
|
413
|
+
>>> i = Interview.example()
|
414
|
+
>>> from edsl.jobs import RunConfig, RunParameters, RunEnvironment
|
415
|
+
>>> run_config = RunConfig(parameters=RunParameters(), environment=RunEnvironment())
|
416
|
+
>>> run_config.parameters.stop_on_exception = True
|
417
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview(run_config))
|
418
|
+
"""
|
419
|
+
from ..jobs import RunConfig, RunParameters, RunEnvironment
|
420
|
+
|
421
|
+
if run_config is None:
|
422
|
+
run_config = RunConfig(
|
423
|
+
parameters=RunParameters(),
|
424
|
+
environment=RunEnvironment(),
|
425
|
+
)
|
426
|
+
self.stop_on_exception = run_config.parameters.stop_on_exception
|
427
|
+
|
428
|
+
# if no model bucket is passed, create an 'infinity' bucket with no rate limits
|
429
|
+
bucket_collection = run_config.environment.bucket_collection
|
430
|
+
|
431
|
+
if bucket_collection:
|
432
|
+
model_buckets = bucket_collection.get(self.model)
|
433
|
+
else:
|
434
|
+
model_buckets = None
|
435
|
+
|
436
|
+
if model_buckets is None or hasattr(self.agent, "answer_question_directly"):
|
437
|
+
model_buckets = ModelBuckets.infinity_bucket()
|
438
|
+
|
439
|
+
self.skip_flags = {q.question_name: False for q in self.survey.questions}
|
440
|
+
|
441
|
+
self.tasks = self.task_manager.build_question_tasks(
|
442
|
+
answer_func=AnswerQuestionFunctionConstructor(
|
443
|
+
self, key_lookup=run_config.environment.key_lookup
|
444
|
+
)(),
|
445
|
+
token_estimator=RequestTokenEstimator(self),
|
446
|
+
model_buckets=model_buckets,
|
447
|
+
)
|
448
|
+
|
449
|
+
## This is the key part---it creates a task for each question,
|
450
|
+
## with dependencies on the questions that must be answered before this one can be answered.
|
451
|
+
|
452
|
+
## 'Invigilators' are used to administer the survey.
|
453
|
+
fetcher = FetchInvigilator(
|
454
|
+
interview=self,
|
455
|
+
current_answers=self.answers,
|
456
|
+
key_lookup=run_config.environment.key_lookup,
|
457
|
+
)
|
458
|
+
self.invigilators = [fetcher(question) for question in self.survey.questions]
|
459
|
+
await asyncio.gather(
|
460
|
+
*self.tasks, return_exceptions=not run_config.parameters.stop_on_exception
|
461
|
+
)
|
462
|
+
self.answers.replace_missing_answers_with_none(self.survey)
|
463
|
+
valid_results = list(
|
464
|
+
self._extract_valid_results(self.tasks, self.invigilators, self.exceptions)
|
465
|
+
)
|
466
|
+
return self.answers, valid_results
|
467
|
+
|
468
|
+
@staticmethod
|
469
|
+
def _extract_valid_results(
|
470
|
+
tasks: List["asyncio.Task"],
|
471
|
+
invigilators: List["InvigilatorBase"],
|
472
|
+
exceptions: InterviewExceptionCollection,
|
473
|
+
) -> Generator["Answers", None, None]:
|
474
|
+
"""Extract valid results from completed tasks and handle exceptions.
|
475
|
+
|
476
|
+
This method processes the completed asyncio tasks, extracting successful
|
477
|
+
results and handling any exceptions that occurred. It maintains the
|
478
|
+
relationship between tasks, invigilators, and the questions they represent.
|
479
|
+
|
480
|
+
Args:
|
481
|
+
tasks: List of asyncio tasks for each question
|
482
|
+
invigilators: List of invigilators corresponding to each task
|
483
|
+
exceptions: Collection for storing any exceptions that occurred
|
484
|
+
|
485
|
+
Yields:
|
486
|
+
Answers: Valid results from each successfully completed task
|
487
|
+
|
488
|
+
Notes:
|
489
|
+
- Tasks and invigilators must have the same length and be in the same order
|
490
|
+
- Cancelled tasks are expected and don't trigger exception recording
|
491
|
+
- Other exceptions are recorded in the exceptions collection
|
492
|
+
|
493
|
+
Examples:
|
494
|
+
>>> i = Interview.example()
|
495
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
496
|
+
"""
|
497
|
+
assert len(tasks) == len(invigilators)
|
498
|
+
|
499
|
+
def handle_task(task, invigilator):
|
500
|
+
try:
|
501
|
+
result: Answers = task.result()
|
502
|
+
if result == "skipped":
|
503
|
+
result = invigilator.get_failed_task_result(
|
504
|
+
failure_reason="Task was skipped."
|
505
|
+
)
|
506
|
+
except asyncio.CancelledError as e: # task was cancelled
|
507
|
+
result = invigilator.get_failed_task_result(
|
508
|
+
failure_reason="Task was cancelled."
|
509
|
+
)
|
510
|
+
except Exception as e: # any other kind of exception in the task
|
511
|
+
result = invigilator.get_failed_task_result(
|
512
|
+
failure_reason=f"Task failed with exception: {str(e)}."
|
513
|
+
)
|
514
|
+
exception_entry = InterviewExceptionEntry(
|
515
|
+
exception=e,
|
516
|
+
invigilator=invigilator,
|
517
|
+
)
|
518
|
+
exceptions.add(task.get_name(), exception_entry)
|
519
|
+
return result
|
520
|
+
|
521
|
+
for task, invigilator in zip(tasks, invigilators):
|
522
|
+
if not task.done():
|
523
|
+
raise ValueError(f"Task {task.get_name()} is not done.")
|
524
|
+
|
525
|
+
yield handle_task(task, invigilator)
|
526
|
+
|
527
|
+
def __repr__(self) -> str:
|
528
|
+
"""Generate a string representation of the interview.
|
529
|
+
|
530
|
+
This representation includes the key components of the interview
|
531
|
+
(agent, survey, scenario, and model) for debugging and display purposes.
|
532
|
+
|
533
|
+
Returns:
|
534
|
+
str: A string representation of the interview instance
|
535
|
+
"""
|
536
|
+
return f"Interview(agent = {repr(self.agent)}, survey = {repr(self.survey)}, scenario = {repr(self.scenario)}, model = {repr(self.model)})"
|
537
|
+
|
538
|
+
def duplicate(
|
539
|
+
self, iteration: int, cache: "Cache", randomize_survey: Optional[bool] = True
|
540
|
+
) -> "Interview":
|
541
|
+
"""Create a duplicate of this interview with a new iteration number and cache.
|
542
|
+
|
543
|
+
This method creates a new Interview instance with the same components but
|
544
|
+
a different iteration number. It can optionally randomize the survey questions
|
545
|
+
(for surveys that support randomization) and use a different cache.
|
546
|
+
|
547
|
+
Args:
|
548
|
+
iteration: The new iteration number for the duplicated interview
|
549
|
+
cache: The cache to use for the new interview (can be None)
|
550
|
+
randomize_survey: Whether to randomize the survey questions (default: True)
|
551
|
+
|
552
|
+
Returns:
|
553
|
+
Interview: A new interview instance with updated iteration and cache
|
554
|
+
|
555
|
+
Examples:
|
556
|
+
>>> i = Interview.example()
|
557
|
+
>>> i2 = i.duplicate(1, None)
|
558
|
+
>>> i.iteration + 1 == i2.iteration
|
559
|
+
True
|
560
|
+
"""
|
561
|
+
# Get a randomized copy of the survey if requested
|
562
|
+
if randomize_survey:
|
563
|
+
new_survey = self.survey.draw()
|
564
|
+
else:
|
565
|
+
new_survey = self.survey
|
566
|
+
|
567
|
+
# Create a new interview with the same components but different iteration
|
568
|
+
return Interview(
|
569
|
+
agent=self.agent,
|
570
|
+
survey=new_survey,
|
571
|
+
scenario=self.scenario,
|
572
|
+
model=self.model,
|
573
|
+
iteration=iteration,
|
574
|
+
cache=self.running_config.cache,
|
575
|
+
skip_retry=self.running_config.skip_retry,
|
576
|
+
indices=self.indices,
|
577
|
+
)
|
578
|
+
|
579
|
+
@classmethod
|
580
|
+
def example(self, throw_exception: bool = False) -> "Interview":
|
581
|
+
"""Create an example Interview instance for testing and demonstrations.
|
582
|
+
|
583
|
+
This method provides a convenient way to create a fully configured
|
584
|
+
Interview instance with default components. It can be configured to
|
585
|
+
either work normally or deliberately throw exceptions for testing
|
586
|
+
error handling scenarios.
|
587
|
+
|
588
|
+
Args:
|
589
|
+
throw_exception: If True, creates an interview that will throw
|
590
|
+
exceptions when run (useful for testing error handling)
|
591
|
+
|
592
|
+
Returns:
|
593
|
+
Interview: A fully configured example interview instance
|
594
|
+
|
595
|
+
Examples:
|
596
|
+
Creating a normal interview:
|
597
|
+
|
598
|
+
>>> i = Interview.example()
|
599
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
600
|
+
>>> result['q0']
|
601
|
+
'yes'
|
602
|
+
|
603
|
+
Creating an interview that will throw exceptions:
|
604
|
+
|
605
|
+
>>> i = Interview.example(throw_exception=True)
|
606
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
607
|
+
>>> i.has_exceptions
|
608
|
+
True
|
609
|
+
"""
|
610
|
+
from ..agents import Agent
|
611
|
+
from ..surveys import Survey
|
612
|
+
from ..scenarios import Scenario
|
613
|
+
from ..language_models import LanguageModel
|
614
|
+
|
615
|
+
# Define a simple direct answering method that always returns "yes"
|
616
|
+
def f(self, question, scenario):
|
617
|
+
return "yes"
|
618
|
+
|
619
|
+
# Create standard components
|
620
|
+
agent = Agent.example()
|
621
|
+
agent.add_direct_question_answering_method(f)
|
622
|
+
survey = Survey.example()
|
623
|
+
scenario = Scenario.example()
|
624
|
+
model = LanguageModel.example()
|
625
|
+
|
626
|
+
# If we want an interview that throws exceptions, configure it accordingly
|
627
|
+
if throw_exception:
|
628
|
+
model = LanguageModel.example(test_model=True, throw_exception=True)
|
629
|
+
agent = Agent.example() # Without direct answering method
|
630
|
+
|
631
|
+
# Create and return the interview
|
632
|
+
return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
|
633
|
+
|
634
|
+
|
635
|
+
if __name__ == "__main__":
|
636
|
+
import doctest
|
637
|
+
|
638
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
edsl/{jobs/interviews/InterviewStatusDictionary.py → interviews/interview_status_dictionary.py}
RENAMED
@@ -1,9 +1,8 @@
|
|
1
1
|
from __future__ import annotations
|
2
|
-
import json
|
3
2
|
from collections import UserDict
|
4
3
|
from typing import Union, Dict
|
5
4
|
|
6
|
-
from
|
5
|
+
from ..tasks.task_status_enum import TaskStatus, get_enum_from_string
|
7
6
|
|
8
7
|
|
9
8
|
class InterviewStatusDictionary(UserDict):
|
@@ -48,31 +47,41 @@ class InterviewStatusDictionary(UserDict):
|
|
48
47
|
def __repr__(self):
|
49
48
|
return f"InterviewStatusDictionary({self.data})"
|
50
49
|
|
51
|
-
def to_dict(self):
|
50
|
+
def to_dict(self) -> dict:
|
51
|
+
"""Return a dictionary representation of the InterviewStatusDictionary."""
|
52
52
|
new_data = {str(key): value for key, value in self.data.items()}
|
53
53
|
return new_data
|
54
54
|
|
55
|
-
def print(self):
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
55
|
+
# def print(self):
|
56
|
+
# d = {}
|
57
|
+
# for key, value in self.data.items():
|
58
|
+
# d[str(key)] = value
|
59
|
+
# from edsl.utilities.interface import print_dict_with_rich
|
60
60
|
|
61
|
-
|
61
|
+
# print_dict_with_rich(d)
|
62
62
|
|
63
63
|
@classmethod
|
64
|
-
def from_dict(cls, data):
|
64
|
+
def from_dict(cls, data: dict) -> "InterviewStatusDictionary":
|
65
|
+
"""Create an InterviewStatusDictionary from a dictionary."""
|
65
66
|
new_data = {get_enum_from_string(key): value for key, value in data.items()}
|
66
67
|
return cls(new_data)
|
67
68
|
|
68
|
-
def to_json(self):
|
69
|
+
def to_json(self) -> str:
|
70
|
+
"""Return a JSON representation of the InterviewStatusDictionary."""
|
69
71
|
import json
|
70
72
|
|
71
73
|
return json.dumps(self.to_dict())
|
72
74
|
|
73
75
|
@classmethod
|
74
|
-
def from_json(cls, data):
|
76
|
+
def from_json(cls, data: str) -> "InterviewStatusDictionary":
|
77
|
+
"""Create an InterviewStatusDictionary from a JSON string."""
|
75
78
|
import json
|
76
79
|
|
77
80
|
data = json.loads(data)
|
78
81
|
return cls.from_dict(data)
|
82
|
+
|
83
|
+
|
84
|
+
if __name__ == "__main__":
|
85
|
+
import doctest
|
86
|
+
|
87
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|