edsl 0.1.29.dev2__tar.gz → 0.1.29.dev5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/PKG-INFO +11 -10
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/README.md +10 -10
- edsl-0.1.29.dev5/edsl/__version__.py +1 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/agents/Agent.py +12 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/agents/AgentList.py +3 -4
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/agents/InvigilatorBase.py +15 -10
- edsl-0.1.29.dev5/edsl/agents/PromptConstructionMixin.py +374 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/InputData.py +37 -8
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/coop/coop.py +68 -15
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/coop/utils.py +2 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/Jobs.py +22 -16
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/InterviewTaskBuildingMixin.py +1 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/notebooks/Notebook.py +30 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/Prompt.py +31 -19
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionBase.py +32 -11
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/settings.py +1 -1
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/Dataset.py +31 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/Results.py +6 -8
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/ResultsToolsMixin.py +2 -1
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/scenarios/ScenarioList.py +19 -3
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/Survey.py +37 -3
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/tools/plotting.py +4 -2
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/pyproject.toml +1 -4
- edsl-0.1.29.dev2/edsl/__version__.py +0 -1
- edsl-0.1.29.dev2/edsl/agents/PromptConstructionMixin.py +0 -134
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/LICENSE +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/Base.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/BaseDiff.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/agents/Invigilator.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/agents/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/agents/descriptors.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/config.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/AgentConstructionMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/Conjure.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/InputDataCSV.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/InputDataMixinQuestionStats.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/InputDataPyRead.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/InputDataSPSS.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/InputDataStata.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/QuestionOptionMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/QuestionTypeMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/RawQuestion.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/SurveyResponses.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/examples/placeholder.txt +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/naming_utilities.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conjure/utilities.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conversation/Conversation.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conversation/car_buying.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conversation/mug_negotiation.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/conversation/next_speaker_utilities.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/coop/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/data/Cache.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/data/CacheEntry.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/data/CacheHandler.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/data/SQLiteDict.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/data/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/data/orm.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/data_transfer_models.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/enums.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/agents.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/configuration.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/coop.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/data.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/general.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/jobs.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/language_models.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/prompts.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/questions.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/results.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/exceptions/surveys.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/AnthropicService.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/DeepInfraService.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/GoogleService.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/InferenceServiceABC.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/InferenceServicesCollection.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/OpenAIService.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/models_available_cache.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/rate_limits_cache.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/registry.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/inference_services/write_available.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/Answers.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/buckets/BucketCollection.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/buckets/ModelBuckets.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/buckets/TokenBucket.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/Interview.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/InterviewStatistic.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/InterviewStatisticsCollection.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/InterviewStatusDictionary.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/InterviewStatusLog.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/InterviewStatusMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/ReportErrors.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/interview_exception_tracking.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/interview_status_enum.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/interviews/retry_management.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/runners/JobsRunnerAsyncio.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/runners/JobsRunnerStatusData.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/runners/JobsRunnerStatusMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tasks/QuestionTaskCreator.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tasks/TaskCreators.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tasks/TaskHistory.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tasks/TaskStatusLog.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tasks/task_management.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tasks/task_status_enum.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tokens/InterviewTokenUsage.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/jobs/tokens/TokenUsage.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/language_models/LanguageModel.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/language_models/ModelList.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/language_models/RegisterLanguageModelsMeta.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/language_models/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/language_models/registry.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/language_models/repair.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/language_models/unused/ReplicateBase.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/notebooks/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/QuestionInstructionsBase.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/agent_instructions.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/agent_persona.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_budget.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_checkbox.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_extract.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_freetext.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_linear_scale.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_list.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_multiple_choice.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_numerical.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/library/question_rank.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/prompt_config.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/prompts/registry.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/AnswerValidatorMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionBudget.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionCheckBox.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionExtract.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionFreeText.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionFunctional.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionList.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionMultipleChoice.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionNumerical.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/QuestionRank.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/RegisterQuestionsMeta.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/SimpleAskMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/compose_questions.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/derived/QuestionLikertFive.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/derived/QuestionLinearScale.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/derived/QuestionTopK.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/derived/QuestionYesNo.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/derived/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/descriptors.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/questions/question_registry.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/Result.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/ResultsDBMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/ResultsExportMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/ResultsFetchMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/ResultsGGMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/results/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/scenarios/Scenario.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/scenarios/ScenarioHtmlMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/scenarios/ScenarioImageMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/scenarios/ScenarioListPdfMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/scenarios/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/shared.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/study/ObjectEntry.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/study/ProofOfWork.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/study/SnapShot.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/study/Study.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/study/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/DAG.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/Memory.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/MemoryPlan.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/Rule.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/RuleCollection.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/SurveyCSS.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/SurveyExportMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/SurveyFlowVisualizationMixin.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/base.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/surveys/descriptors.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/tools/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/tools/clusters.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/tools/embeddings.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/tools/embeddings_plotting.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/tools/summarize.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/SystemInfo.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/ast_utilities.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/data/Registry.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/data/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/data/scooter_results.json +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/decorators.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/gcp_bucket/__init__.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/gcp_bucket/cloud_storage.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/gcp_bucket/simple_example.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/interface.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/repair_functions.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/restricted_python.py +0 -0
- {edsl-0.1.29.dev2 → edsl-0.1.29.dev5}/edsl/utilities/utilities.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: edsl
|
3
|
-
Version: 0.1.29.
|
3
|
+
Version: 0.1.29.dev5
|
4
4
|
Summary: Create and analyze LLM-based surveys
|
5
5
|
Home-page: https://www.expectedparrot.com/
|
6
6
|
License: MIT
|
@@ -57,15 +57,6 @@ The Expected Parrot Domain-Specific Language (EDSL) package lets you conduct com
|
|
57
57
|
- [LinkedIn](https://www.linkedin.com/company/expectedparrot/)
|
58
58
|
- [Blog](https://blog.expectedparrot.com)
|
59
59
|
|
60
|
-
## 💡 Contributions, feature requests & bugs
|
61
|
-
Interested in contributing? Want us to add a new feature? Found a bug for us to squash?
|
62
|
-
Please send us an email at [info@expectedparrot.com](mailto:info@expectedparrot.com) or message us at our [Discord channel](https://discord.com/invite/mxAYkjfy9m).
|
63
|
-
|
64
|
-
## 💻 Requirements
|
65
|
-
* EDSL is compatible with Python 3.9 - 3.12.
|
66
|
-
* API keys for large language models that you want to use, stored in a `.env` file.
|
67
|
-
See instructions on [storing API keys](https://docs.expectedparrot.com/en/latest/api_keys.html).
|
68
|
-
|
69
60
|
## 🌎 Hello, World!
|
70
61
|
A quick example:
|
71
62
|
|
@@ -96,3 +87,13 @@ Output:
|
|
96
87
|
│ Good │
|
97
88
|
└───────────────────┘
|
98
89
|
```
|
90
|
+
|
91
|
+
## 💻 Requirements
|
92
|
+
* EDSL is compatible with Python 3.9 - 3.12.
|
93
|
+
* API keys for large language models that you want to use, stored in a `.env` file.
|
94
|
+
See instructions on [storing API keys](https://docs.expectedparrot.com/en/latest/api_keys.html).
|
95
|
+
|
96
|
+
## 💡 Contributions, feature requests & bugs
|
97
|
+
Interested in contributing? Want us to add a new feature? Found a bug for us to squash?
|
98
|
+
Please send us an email at [info@expectedparrot.com](mailto:info@expectedparrot.com) or message us at our [Discord channel](https://discord.com/invite/mxAYkjfy9m).
|
99
|
+
|
@@ -14,15 +14,6 @@ The Expected Parrot Domain-Specific Language (EDSL) package lets you conduct com
|
|
14
14
|
- [LinkedIn](https://www.linkedin.com/company/expectedparrot/)
|
15
15
|
- [Blog](https://blog.expectedparrot.com)
|
16
16
|
|
17
|
-
## 💡 Contributions, feature requests & bugs
|
18
|
-
Interested in contributing? Want us to add a new feature? Found a bug for us to squash?
|
19
|
-
Please send us an email at [info@expectedparrot.com](mailto:info@expectedparrot.com) or message us at our [Discord channel](https://discord.com/invite/mxAYkjfy9m).
|
20
|
-
|
21
|
-
## 💻 Requirements
|
22
|
-
* EDSL is compatible with Python 3.9 - 3.12.
|
23
|
-
* API keys for large language models that you want to use, stored in a `.env` file.
|
24
|
-
See instructions on [storing API keys](https://docs.expectedparrot.com/en/latest/api_keys.html).
|
25
|
-
|
26
17
|
## 🌎 Hello, World!
|
27
18
|
A quick example:
|
28
19
|
|
@@ -52,4 +43,13 @@ Output:
|
|
52
43
|
┡━━━━━━━━━━━━━━━━━━━┩
|
53
44
|
│ Good │
|
54
45
|
└───────────────────┘
|
55
|
-
```
|
46
|
+
```
|
47
|
+
|
48
|
+
## 💻 Requirements
|
49
|
+
* EDSL is compatible with Python 3.9 - 3.12.
|
50
|
+
* API keys for large language models that you want to use, stored in a `.env` file.
|
51
|
+
See instructions on [storing API keys](https://docs.expectedparrot.com/en/latest/api_keys.html).
|
52
|
+
|
53
|
+
## 💡 Contributions, feature requests & bugs
|
54
|
+
Interested in contributing? Want us to add a new feature? Found a bug for us to squash?
|
55
|
+
Please send us an email at [info@expectedparrot.com](mailto:info@expectedparrot.com) or message us at our [Discord channel](https://discord.com/invite/mxAYkjfy9m).
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.1.29.dev5"
|
@@ -273,6 +273,7 @@ class Agent(Base):
|
|
273
273
|
*,
|
274
274
|
question: QuestionBase,
|
275
275
|
cache,
|
276
|
+
survey: Optional["Survey"] = None,
|
276
277
|
scenario: Optional[Scenario] = None,
|
277
278
|
model: Optional[LanguageModel] = None,
|
278
279
|
debug: bool = False,
|
@@ -301,6 +302,7 @@ class Agent(Base):
|
|
301
302
|
invigilator = self._create_invigilator(
|
302
303
|
question=question,
|
303
304
|
scenario=scenario,
|
305
|
+
survey=survey,
|
304
306
|
model=model,
|
305
307
|
debug=debug,
|
306
308
|
memory_plan=memory_plan,
|
@@ -317,6 +319,7 @@ class Agent(Base):
|
|
317
319
|
question: QuestionBase,
|
318
320
|
cache: Cache,
|
319
321
|
scenario: Optional[Scenario] = None,
|
322
|
+
survey: Optional["Survey"] = None,
|
320
323
|
model: Optional[LanguageModel] = None,
|
321
324
|
debug: bool = False,
|
322
325
|
memory_plan: Optional[MemoryPlan] = None,
|
@@ -349,6 +352,7 @@ class Agent(Base):
|
|
349
352
|
question=question,
|
350
353
|
cache=cache,
|
351
354
|
scenario=scenario,
|
355
|
+
survey=survey,
|
352
356
|
model=model,
|
353
357
|
debug=debug,
|
354
358
|
memory_plan=memory_plan,
|
@@ -366,6 +370,7 @@ class Agent(Base):
|
|
366
370
|
cache: Optional[Cache] = None,
|
367
371
|
scenario: Optional[Scenario] = None,
|
368
372
|
model: Optional[LanguageModel] = None,
|
373
|
+
survey: Optional["Survey"] = None,
|
369
374
|
debug: bool = False,
|
370
375
|
memory_plan: Optional[MemoryPlan] = None,
|
371
376
|
current_answers: Optional[dict] = None,
|
@@ -404,6 +409,7 @@ class Agent(Base):
|
|
404
409
|
self,
|
405
410
|
question=question,
|
406
411
|
scenario=scenario,
|
412
|
+
survey=survey,
|
407
413
|
model=model,
|
408
414
|
memory_plan=memory_plan,
|
409
415
|
current_answers=current_answers,
|
@@ -479,6 +485,12 @@ class Agent(Base):
|
|
479
485
|
"""
|
480
486
|
return self.data == other.data
|
481
487
|
|
488
|
+
def __getattr__(self, name):
|
489
|
+
# This will be called only if 'name' is not found in the usual places
|
490
|
+
if name in self.traits:
|
491
|
+
return self.traits[name]
|
492
|
+
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
|
493
|
+
|
482
494
|
def print(self) -> None:
|
483
495
|
from rich import print_json
|
484
496
|
import json
|
@@ -241,16 +241,16 @@ class AgentList(UserList, Base):
|
|
241
241
|
|
242
242
|
"""
|
243
243
|
return cls([Agent.example(), Agent.example()])
|
244
|
-
|
244
|
+
|
245
245
|
@classmethod
|
246
|
-
def from_list(self, trait_name:str, values: List[Any]):
|
246
|
+
def from_list(self, trait_name: str, values: List[Any]):
|
247
247
|
"""Create an AgentList from a list of values.
|
248
248
|
|
249
249
|
:param trait_name: The name of the trait.
|
250
250
|
:param values: A list of values.
|
251
251
|
"""
|
252
252
|
return AgentList([Agent({trait_name: value}) for value in values])
|
253
|
-
|
253
|
+
|
254
254
|
def __mul__(self, other: AgentList) -> AgentList:
|
255
255
|
"""Takes the cross product of two AgentLists."""
|
256
256
|
from itertools import product
|
@@ -260,7 +260,6 @@ class AgentList(UserList, Base):
|
|
260
260
|
new_sl.append(s1 + s2)
|
261
261
|
return AgentList(new_sl)
|
262
262
|
|
263
|
-
|
264
263
|
def code(self, string=True) -> Union[str, list[str]]:
|
265
264
|
"""Return code to construct an AgentList.
|
266
265
|
|
@@ -46,6 +46,7 @@ class InvigilatorBase(ABC):
|
|
46
46
|
model: LanguageModel,
|
47
47
|
memory_plan: MemoryPlan,
|
48
48
|
current_answers: dict,
|
49
|
+
survey: Optional["Survey"],
|
49
50
|
cache: Optional[Cache] = None,
|
50
51
|
iteration: Optional[int] = 1,
|
51
52
|
additional_prompt_data: Optional[dict] = None,
|
@@ -57,11 +58,12 @@ class InvigilatorBase(ABC):
|
|
57
58
|
self.scenario = scenario
|
58
59
|
self.model = model
|
59
60
|
self.memory_plan = memory_plan
|
60
|
-
self.current_answers = current_answers
|
61
|
+
self.current_answers = current_answers or {}
|
61
62
|
self.iteration = iteration
|
62
63
|
self.additional_prompt_data = additional_prompt_data
|
63
64
|
self.cache = cache
|
64
65
|
self.sidecar_model = sidecar_model
|
66
|
+
self.survey = survey
|
65
67
|
|
66
68
|
def __repr__(self) -> str:
|
67
69
|
"""Return a string representation of the Invigilator.
|
@@ -76,7 +78,7 @@ class InvigilatorBase(ABC):
|
|
76
78
|
"""Return an AgentResponseDict used in case the question-asking fails.
|
77
79
|
|
78
80
|
>>> InvigilatorBase.example().get_failed_task_result()
|
79
|
-
{'answer': None, 'comment': 'Failed to get response',
|
81
|
+
{'answer': None, 'comment': 'Failed to get response', ...}
|
80
82
|
"""
|
81
83
|
return AgentResponseDict(
|
82
84
|
answer=None,
|
@@ -86,11 +88,8 @@ class InvigilatorBase(ABC):
|
|
86
88
|
)
|
87
89
|
|
88
90
|
def get_prompts(self) -> Dict[str, Prompt]:
|
89
|
-
"""Return the prompt used.
|
91
|
+
"""Return the prompt used."""
|
90
92
|
|
91
|
-
>>> InvigilatorBase.example().get_prompts()
|
92
|
-
{'user_prompt': Prompt(text=\"""NA\"""), 'system_prompt': Prompt(text=\"""NA\""")}
|
93
|
-
"""
|
94
93
|
return {
|
95
94
|
"user_prompt": Prompt("NA"),
|
96
95
|
"system_prompt": Prompt("NA"),
|
@@ -129,7 +128,7 @@ class InvigilatorBase(ABC):
|
|
129
128
|
)
|
130
129
|
|
131
130
|
@classmethod
|
132
|
-
def example(cls, throw_an_exception=False):
|
131
|
+
def example(cls, throw_an_exception=False, question=None, scenario=None):
|
133
132
|
"""Return an example invigilator.
|
134
133
|
|
135
134
|
>>> InvigilatorBase.example()
|
@@ -167,15 +166,20 @@ class InvigilatorBase(ABC):
|
|
167
166
|
if throw_an_exception:
|
168
167
|
model.throw_an_exception = True
|
169
168
|
agent = Agent.example()
|
170
|
-
question = QuestionMultipleChoice.example()
|
171
|
-
|
169
|
+
# question = QuestionMultipleChoice.example()
|
170
|
+
from edsl.surveys import Survey
|
171
|
+
|
172
|
+
survey = Survey.example()
|
173
|
+
question = question or survey.questions[0]
|
174
|
+
scenario = scenario or Scenario.example()
|
172
175
|
# memory_plan = None #memory_plan = MemoryPlan()
|
173
176
|
from edsl import Survey
|
174
177
|
|
175
178
|
memory_plan = MemoryPlan(survey=Survey.example())
|
176
179
|
current_answers = None
|
180
|
+
from edsl.agents.PromptConstructionMixin import PromptConstructorMixin
|
177
181
|
|
178
|
-
class InvigilatorExample(InvigilatorBase):
|
182
|
+
class InvigilatorExample(PromptConstructorMixin, InvigilatorBase):
|
179
183
|
"""An example invigilator."""
|
180
184
|
|
181
185
|
async def async_answer_question(self):
|
@@ -188,6 +192,7 @@ class InvigilatorBase(ABC):
|
|
188
192
|
agent=agent,
|
189
193
|
question=question,
|
190
194
|
scenario=scenario,
|
195
|
+
survey=survey,
|
191
196
|
model=model,
|
192
197
|
memory_plan=memory_plan,
|
193
198
|
current_answers=current_answers,
|
@@ -0,0 +1,374 @@
|
|
1
|
+
from typing import Dict, Any, Optional
|
2
|
+
from collections import UserList
|
3
|
+
|
4
|
+
# from functools import reduce
|
5
|
+
from edsl.prompts.Prompt import Prompt
|
6
|
+
|
7
|
+
# from edsl.utilities.decorators import sync_wrapper, jupyter_nb_handler
|
8
|
+
from edsl.prompts.registry import get_classes as prompt_lookup
|
9
|
+
from edsl.exceptions import QuestionScenarioRenderError
|
10
|
+
|
11
|
+
import enum
|
12
|
+
|
13
|
+
|
14
|
+
class PromptComponent(enum.Enum):
|
15
|
+
AGENT_INSTRUCTIONS = "agent_instructions"
|
16
|
+
AGENT_PERSONA = "agent_persona"
|
17
|
+
QUESTION_INSTRUCTIONS = "question_instructions"
|
18
|
+
PRIOR_QUESTION_MEMORY = "prior_question_memory"
|
19
|
+
|
20
|
+
|
21
|
+
class PromptList(UserList):
|
22
|
+
separator = Prompt(" ")
|
23
|
+
|
24
|
+
def reduce(self):
|
25
|
+
"""Reduce the list of prompts to a single prompt.
|
26
|
+
|
27
|
+
>>> p = PromptList([Prompt("You are a happy-go lucky agent."), Prompt("You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}")])
|
28
|
+
>>> p.reduce()
|
29
|
+
Prompt(text=\"""You are a happy-go lucky agent. You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}\""")
|
30
|
+
|
31
|
+
"""
|
32
|
+
p = self[0]
|
33
|
+
for prompt in self[1:]:
|
34
|
+
if len(prompt) > 0:
|
35
|
+
p = p + self.separator + prompt
|
36
|
+
return p
|
37
|
+
|
38
|
+
|
39
|
+
class PromptPlan:
|
40
|
+
"""A plan for constructing prompts for the LLM call.
|
41
|
+
Every prompt plan has a user prompt order and a system prompt order.
|
42
|
+
It must contain each of the values in the PromptComponent enum.
|
43
|
+
|
44
|
+
|
45
|
+
>>> p = PromptPlan(user_prompt_order=(PromptComponent.AGENT_INSTRUCTIONS, PromptComponent.AGENT_PERSONA),system_prompt_order=(PromptComponent.QUESTION_INSTRUCTIONS, PromptComponent.PRIOR_QUESTION_MEMORY))
|
46
|
+
>>> p._is_valid_plan()
|
47
|
+
True
|
48
|
+
|
49
|
+
>>> p.arrange_components(agent_instructions=1, agent_persona=2, question_instructions=3, prior_question_memory=4)
|
50
|
+
{'user_prompt': ..., 'system_prompt': ...}
|
51
|
+
|
52
|
+
>>> p = PromptPlan(user_prompt_order=("agent_instructions", ), system_prompt_order=("question_instructions", "prior_question_memory"))
|
53
|
+
Traceback (most recent call last):
|
54
|
+
...
|
55
|
+
ValueError: Invalid plan: must contain each value of PromptComponent exactly once.
|
56
|
+
|
57
|
+
"""
|
58
|
+
|
59
|
+
def __init__(
|
60
|
+
self,
|
61
|
+
user_prompt_order: Optional[tuple] = None,
|
62
|
+
system_prompt_order: Optional[tuple] = None,
|
63
|
+
):
|
64
|
+
"""Initialize the PromptPlan."""
|
65
|
+
|
66
|
+
if user_prompt_order is None:
|
67
|
+
user_prompt_order = (
|
68
|
+
PromptComponent.QUESTION_INSTRUCTIONS,
|
69
|
+
PromptComponent.PRIOR_QUESTION_MEMORY,
|
70
|
+
)
|
71
|
+
if system_prompt_order is None:
|
72
|
+
system_prompt_order = (
|
73
|
+
PromptComponent.AGENT_INSTRUCTIONS,
|
74
|
+
PromptComponent.AGENT_PERSONA,
|
75
|
+
)
|
76
|
+
|
77
|
+
# very commmon way to screw this up given how python treats single strings as iterables
|
78
|
+
if isinstance(user_prompt_order, str):
|
79
|
+
user_prompt_order = (user_prompt_order,)
|
80
|
+
|
81
|
+
if isinstance(system_prompt_order, str):
|
82
|
+
system_prompt_order = (system_prompt_order,)
|
83
|
+
|
84
|
+
if not isinstance(user_prompt_order, tuple):
|
85
|
+
raise TypeError(
|
86
|
+
f"Expected a tuple, but got {type(user_prompt_order).__name__}"
|
87
|
+
)
|
88
|
+
|
89
|
+
if not isinstance(system_prompt_order, tuple):
|
90
|
+
raise TypeError(
|
91
|
+
f"Expected a tuple, but got {type(system_prompt_order).__name__}"
|
92
|
+
)
|
93
|
+
|
94
|
+
self.user_prompt_order = self._convert_to_enum(user_prompt_order)
|
95
|
+
self.system_prompt_order = self._convert_to_enum(system_prompt_order)
|
96
|
+
if not self._is_valid_plan():
|
97
|
+
raise ValueError(
|
98
|
+
"Invalid plan: must contain each value of PromptComponent exactly once."
|
99
|
+
)
|
100
|
+
|
101
|
+
def _convert_to_enum(self, prompt_order: tuple):
|
102
|
+
"""Convert string names to PromptComponent enum values."""
|
103
|
+
return tuple(
|
104
|
+
PromptComponent(component) if isinstance(component, str) else component
|
105
|
+
for component in prompt_order
|
106
|
+
)
|
107
|
+
|
108
|
+
def _is_valid_plan(self):
|
109
|
+
"""Check if the plan is valid."""
|
110
|
+
combined = self.user_prompt_order + self.system_prompt_order
|
111
|
+
return set(combined) == set(PromptComponent)
|
112
|
+
|
113
|
+
def arrange_components(self, **kwargs) -> Dict[PromptComponent, Prompt]:
|
114
|
+
"""Arrange the components in the order specified by the plan."""
|
115
|
+
# check is valid components passed
|
116
|
+
component_strings = set([pc.value for pc in PromptComponent])
|
117
|
+
if not set(kwargs.keys()) == component_strings:
|
118
|
+
raise ValueError(
|
119
|
+
f"Invalid components passed: {set(kwargs.keys())} but expected {PromptComponent}"
|
120
|
+
)
|
121
|
+
|
122
|
+
user_prompt = PromptList(
|
123
|
+
[kwargs[component.value] for component in self.user_prompt_order]
|
124
|
+
)
|
125
|
+
system_prompt = PromptList(
|
126
|
+
[kwargs[component.value] for component in self.system_prompt_order]
|
127
|
+
)
|
128
|
+
return {"user_prompt": user_prompt, "system_prompt": system_prompt}
|
129
|
+
|
130
|
+
def get_prompts(self, **kwargs) -> Dict[str, Prompt]:
|
131
|
+
"""Get both prompts for the LLM call."""
|
132
|
+
prompts = self.arrange_components(**kwargs)
|
133
|
+
return {
|
134
|
+
"user_prompt": prompts["user_prompt"].reduce(),
|
135
|
+
"system_prompt": prompts["system_prompt"].reduce(),
|
136
|
+
}
|
137
|
+
|
138
|
+
|
139
|
+
class PromptConstructorMixin:
|
140
|
+
"""Mixin for constructing prompts for the LLM call.
|
141
|
+
|
142
|
+
The pieces of a prompt are:
|
143
|
+
- The agent instructions - "You are answering questions as if you were a human. Do not break character."
|
144
|
+
- The persona prompt - "You are an agent with the following persona: {'age': 22, 'hair': 'brown', 'height': 5.5}"
|
145
|
+
- The question instructions - "You are being asked the following question: Do you like school? The options are 0: yes 1: no Return a valid JSON formatted like this, selecting only the number of the option: {"answer": <put answer code here>, "comment": "<put explanation here>"} Only 1 option may be selected."
|
146
|
+
- The memory prompt - "Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer"
|
147
|
+
|
148
|
+
This is mixed into the Invigilator class.
|
149
|
+
"""
|
150
|
+
|
151
|
+
prompt_plan = PromptPlan()
|
152
|
+
|
153
|
+
@property
|
154
|
+
def agent_instructions_prompt(self) -> Prompt:
|
155
|
+
"""
|
156
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
157
|
+
>>> i = InvigilatorBase.example()
|
158
|
+
>>> i.agent_instructions_prompt
|
159
|
+
Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
|
160
|
+
"""
|
161
|
+
if not hasattr(self, "_agent_instructions_prompt"):
|
162
|
+
applicable_prompts = prompt_lookup(
|
163
|
+
component_type="agent_instructions",
|
164
|
+
model=self.model.model,
|
165
|
+
)
|
166
|
+
if len(applicable_prompts) == 0:
|
167
|
+
raise Exception("No applicable prompts found")
|
168
|
+
self._agent_instructions_prompt = applicable_prompts[0](
|
169
|
+
text=self.agent.instruction
|
170
|
+
)
|
171
|
+
return self._agent_instructions_prompt
|
172
|
+
|
173
|
+
@property
|
174
|
+
def agent_persona_prompt(self) -> Prompt:
|
175
|
+
"""
|
176
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
177
|
+
>>> i = InvigilatorBase.example()
|
178
|
+
>>> i.agent_persona_prompt
|
179
|
+
Prompt(text=\"""You are an agent with the following persona:
|
180
|
+
{'age': 22, 'hair': 'brown', 'height': 5.5}\""")
|
181
|
+
|
182
|
+
"""
|
183
|
+
if not hasattr(self, "_agent_persona_prompt"):
|
184
|
+
if not hasattr(self.agent, "agent_persona"):
|
185
|
+
applicable_prompts = prompt_lookup(
|
186
|
+
component_type="agent_persona",
|
187
|
+
model=self.model.model,
|
188
|
+
)
|
189
|
+
persona_prompt_template = applicable_prompts[0]()
|
190
|
+
else:
|
191
|
+
persona_prompt_template = self.agent.agent_persona
|
192
|
+
|
193
|
+
# TODO: This multiple passing of agent traits - not sure if it is necessary. Not harmful.
|
194
|
+
if undefined := persona_prompt_template.undefined_template_variables(
|
195
|
+
self.agent.traits
|
196
|
+
| {"traits": self.agent.traits}
|
197
|
+
| {"codebook": self.agent.codebook}
|
198
|
+
| {"traits": self.agent.traits}
|
199
|
+
):
|
200
|
+
raise QuestionScenarioRenderError(
|
201
|
+
f"Agent persona still has variables that were not rendered: {undefined}"
|
202
|
+
)
|
203
|
+
|
204
|
+
persona_prompt = persona_prompt_template.render(
|
205
|
+
self.agent.traits | {"traits": self.agent.traits},
|
206
|
+
codebook=self.agent.codebook,
|
207
|
+
traits=self.agent.traits,
|
208
|
+
)
|
209
|
+
if persona_prompt.has_variables:
|
210
|
+
raise QuestionScenarioRenderError(
|
211
|
+
"Agent persona still has variables that were not rendered."
|
212
|
+
)
|
213
|
+
self._agent_persona_prompt = persona_prompt
|
214
|
+
|
215
|
+
return self._agent_persona_prompt
|
216
|
+
|
217
|
+
@property
|
218
|
+
def question_instructions_prompt(self) -> Prompt:
|
219
|
+
"""
|
220
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
221
|
+
>>> i = InvigilatorBase.example()
|
222
|
+
>>> i.question_instructions_prompt
|
223
|
+
Prompt(text=\"""You are being asked the following question: Do you like school?
|
224
|
+
The options are
|
225
|
+
<BLANKLINE>
|
226
|
+
0: yes
|
227
|
+
<BLANKLINE>
|
228
|
+
1: no
|
229
|
+
<BLANKLINE>
|
230
|
+
Return a valid JSON formatted like this, selecting only the number of the option:
|
231
|
+
{"answer": <put answer code here>, "comment": "<put explanation here>"}
|
232
|
+
Only 1 option may be selected.\""")
|
233
|
+
|
234
|
+
>>> from edsl import QuestionFreeText
|
235
|
+
>>> q = QuestionFreeText(question_text = "Consider {{ X }}. What is your favorite color?", question_name = "q_color")
|
236
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
237
|
+
>>> i = InvigilatorBase.example(question = q)
|
238
|
+
>>> i.question_instructions_prompt
|
239
|
+
Traceback (most recent call last):
|
240
|
+
...
|
241
|
+
edsl.exceptions.questions.QuestionScenarioRenderError: Question instructions still has variables: ['X'].
|
242
|
+
|
243
|
+
|
244
|
+
>>> from edsl import QuestionFreeText
|
245
|
+
>>> q = QuestionFreeText(question_text = "You were asked the question '{{ q0.question_text }}'. What is your favorite color?", question_name = "q_color")
|
246
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
247
|
+
>>> i = InvigilatorBase.example(question = q)
|
248
|
+
>>> i.question_instructions_prompt
|
249
|
+
Prompt(text=\"""You are being asked the following question: You were asked the question 'Do you like school?'. What is your favorite color?
|
250
|
+
Return a valid JSON formatted like this:
|
251
|
+
{"answer": "<put free text answer here>"}\""")
|
252
|
+
|
253
|
+
>>> from edsl import QuestionFreeText
|
254
|
+
>>> q = QuestionFreeText(question_text = "You stated '{{ q0.answer }}'. What is your favorite color?", question_name = "q_color")
|
255
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
256
|
+
>>> i = InvigilatorBase.example(question = q)
|
257
|
+
>>> i.current_answers = {"q0": "I like school"}
|
258
|
+
>>> i.question_instructions_prompt
|
259
|
+
Prompt(text=\"""You are being asked the following question: You stated 'I like school'. What is your favorite color?
|
260
|
+
Return a valid JSON formatted like this:
|
261
|
+
{"answer": "<put free text answer here>"}\""")
|
262
|
+
|
263
|
+
|
264
|
+
"""
|
265
|
+
if not hasattr(self, "_question_instructions_prompt"):
|
266
|
+
question_prompt = self.question.get_instructions(model=self.model.model)
|
267
|
+
|
268
|
+
# TODO: Try to populate the answers in the question object if they are available
|
269
|
+
d = self.survey.question_names_to_questions()
|
270
|
+
for question, answer in self.current_answers.items():
|
271
|
+
if question in d:
|
272
|
+
d[question].answer = answer
|
273
|
+
else:
|
274
|
+
# adds a comment to the question
|
275
|
+
if (new_question := question.split("_comment")[0]) in d:
|
276
|
+
d[new_question].comment = answer
|
277
|
+
|
278
|
+
rendered_instructions = question_prompt.render(self.question.data | self.scenario | d | {'agent': self.agent})
|
279
|
+
|
280
|
+
undefined_template_variables = (
|
281
|
+
rendered_instructions.undefined_template_variables({})
|
282
|
+
)
|
283
|
+
|
284
|
+
# Check if it's the name of a question in the survey
|
285
|
+
for question_name in self.survey.question_names:
|
286
|
+
if question_name in undefined_template_variables:
|
287
|
+
print(
|
288
|
+
"Question name found in undefined_template_variables: ",
|
289
|
+
question_name,
|
290
|
+
)
|
291
|
+
|
292
|
+
if undefined_template_variables:
|
293
|
+
print(undefined_template_variables)
|
294
|
+
raise QuestionScenarioRenderError(
|
295
|
+
f"Question instructions still has variables: {undefined_template_variables}."
|
296
|
+
)
|
297
|
+
|
298
|
+
self._question_instructions_prompt = rendered_instructions
|
299
|
+
return self._question_instructions_prompt
|
300
|
+
|
301
|
+
@property
|
302
|
+
def prior_question_memory_prompt(self) -> Prompt:
|
303
|
+
if not hasattr(self, "_prior_question_memory_prompt"):
|
304
|
+
from edsl.prompts.Prompt import Prompt
|
305
|
+
|
306
|
+
memory_prompt = Prompt(text="")
|
307
|
+
if self.memory_plan is not None:
|
308
|
+
memory_prompt += self.create_memory_prompt(
|
309
|
+
self.question.question_name
|
310
|
+
).render(self.scenario)
|
311
|
+
self._prior_question_memory_prompt = memory_prompt
|
312
|
+
return self._prior_question_memory_prompt
|
313
|
+
|
314
|
+
def construct_system_prompt(self) -> Prompt:
|
315
|
+
"""Construct the system prompt for the LLM call."""
|
316
|
+
import warnings
|
317
|
+
|
318
|
+
warnings.warn(
|
319
|
+
"This method is deprecated. Use get_prompts instead.", DeprecationWarning
|
320
|
+
)
|
321
|
+
return self.get_prompts()["system_prompt"]
|
322
|
+
|
323
|
+
def construct_user_prompt(self) -> Prompt:
|
324
|
+
"""Construct the user prompt for the LLM call."""
|
325
|
+
import warnings
|
326
|
+
|
327
|
+
warnings.warn(
|
328
|
+
"This method is deprecated. Use get_prompts instead.", DeprecationWarning
|
329
|
+
)
|
330
|
+
return self.get_prompts()["user_prompt"]
|
331
|
+
|
332
|
+
def get_prompts(self) -> Dict[str, Prompt]:
|
333
|
+
"""Get both prompts for the LLM call.
|
334
|
+
|
335
|
+
>>> from edsl import QuestionFreeText
|
336
|
+
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
|
337
|
+
>>> q = QuestionFreeText(question_text="How are you today?", question_name="q0")
|
338
|
+
>>> i = InvigilatorBase.example(question = q)
|
339
|
+
>>> i.get_prompts()
|
340
|
+
{'user_prompt': ..., 'system_prompt': ...}
|
341
|
+
>>> scenario = i._get_scenario_with_image()
|
342
|
+
>>> scenario.has_image
|
343
|
+
True
|
344
|
+
>>> q = QuestionFreeText(question_text="How are you today?", question_name="q0")
|
345
|
+
>>> i = InvigilatorBase.example(question = q, scenario = scenario)
|
346
|
+
>>> i.get_prompts()
|
347
|
+
{'user_prompt': ..., 'system_prompt': ..., 'encoded_image': ...'}
|
348
|
+
"""
|
349
|
+
prompts = self.prompt_plan.get_prompts(
|
350
|
+
agent_instructions=self.agent_instructions_prompt,
|
351
|
+
agent_persona=self.agent_persona_prompt,
|
352
|
+
question_instructions=self.question_instructions_prompt,
|
353
|
+
prior_question_memory=self.prior_question_memory_prompt,
|
354
|
+
)
|
355
|
+
|
356
|
+
if hasattr(self.scenario, "has_image") and self.scenario.has_image:
|
357
|
+
prompts["encoded_image"] = self.scenario["encoded_image"]
|
358
|
+
return prompts
|
359
|
+
|
360
|
+
def _get_scenario_with_image(self) -> Dict[str, Any]:
|
361
|
+
"""This is a helper function to get a scenario with an image, for testing purposes."""
|
362
|
+
from edsl import Scenario
|
363
|
+
|
364
|
+
try:
|
365
|
+
scenario = Scenario.from_image("../../static/logo.png")
|
366
|
+
except FileNotFoundError:
|
367
|
+
scenario = Scenario.from_image("static/logo.png")
|
368
|
+
return scenario
|
369
|
+
|
370
|
+
|
371
|
+
if __name__ == "__main__":
|
372
|
+
import doctest
|
373
|
+
|
374
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|