edsl 0.1.30.dev4__tar.gz → 0.1.30.dev5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/PKG-INFO +1 -1
- edsl-0.1.30.dev5/edsl/__version__.py +1 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/buckets/TokenBucket.py +12 -1
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/Interview.py +95 -70
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/InterviewTaskBuildingMixin.py +7 -19
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tasks/QuestionTaskCreator.py +4 -7
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/language_models/LanguageModel.py +3 -1
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionFunctional.py +34 -3
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionMultipleChoice.py +55 -8
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/descriptors.py +40 -2
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/DatasetExportMixin.py +82 -6
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/Result.py +52 -6
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/Results.py +64 -28
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/study/Study.py +2 -2
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/Survey.py +32 -1
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/pyproject.toml +19 -18
- edsl-0.1.30.dev4/edsl/__version__.py +0 -1
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/LICENSE +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/README.md +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/Base.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/BaseDiff.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/agents/Agent.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/agents/AgentList.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/agents/Invigilator.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/agents/InvigilatorBase.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/agents/PromptConstructionMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/agents/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/agents/descriptors.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/base/Base.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/config.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/AgentConstructionMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/Conjure.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/InputData.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/InputDataCSV.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/InputDataMixinQuestionStats.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/InputDataPyRead.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/InputDataSPSS.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/InputDataStata.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/QuestionOptionMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/QuestionTypeMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/RawQuestion.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/SurveyResponses.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/examples/placeholder.txt +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/naming_utilities.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conjure/utilities.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conversation/Conversation.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conversation/car_buying.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conversation/mug_negotiation.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/conversation/next_speaker_utilities.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/coop/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/coop/coop.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/coop/utils.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/data/Cache.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/data/CacheEntry.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/data/CacheHandler.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/data/SQLiteDict.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/data/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/data/orm.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/data_transfer_models.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/enums.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/agents.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/configuration.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/coop.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/data.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/general.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/jobs.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/language_models.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/prompts.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/questions.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/results.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/exceptions/surveys.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/AnthropicService.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/DeepInfraService.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/GoogleService.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/InferenceServiceABC.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/InferenceServicesCollection.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/OpenAIService.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/models_available_cache.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/rate_limits_cache.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/registry.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/inference_services/write_available.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/Answers.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/Jobs.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/buckets/BucketCollection.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/buckets/ModelBuckets.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/InterviewStatistic.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/InterviewStatisticsCollection.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/InterviewStatusDictionary.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/InterviewStatusLog.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/InterviewStatusMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/ReportErrors.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/interview_exception_tracking.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/interview_status_enum.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/interviews/retry_management.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/runners/JobsRunnerAsyncio.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/runners/JobsRunnerStatusData.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/runners/JobsRunnerStatusMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tasks/TaskCreators.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tasks/TaskHistory.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tasks/TaskStatusLog.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tasks/task_management.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tasks/task_status_enum.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tokens/InterviewTokenUsage.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/jobs/tokens/TokenUsage.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/language_models/ModelList.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/language_models/RegisterLanguageModelsMeta.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/language_models/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/language_models/registry.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/language_models/repair.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/language_models/unused/ReplicateBase.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/notebooks/Notebook.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/notebooks/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/Prompt.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/QuestionInstructionsBase.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/agent_instructions.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/agent_persona.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_budget.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_checkbox.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_extract.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_freetext.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_linear_scale.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_list.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_multiple_choice.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_numerical.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/library/question_rank.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/prompt_config.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/prompts/registry.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/AnswerValidatorMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionBase.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionBudget.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionCheckBox.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionExtract.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionFreeText.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionList.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionNumerical.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/QuestionRank.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/RegisterQuestionsMeta.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/SimpleAskMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/compose_questions.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/derived/QuestionLikertFive.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/derived/QuestionLinearScale.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/derived/QuestionTopK.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/derived/QuestionYesNo.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/derived/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/question_registry.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/questions/settings.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/Dataset.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/ResultsDBMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/ResultsExportMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/ResultsFetchMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/ResultsGGMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/ResultsToolsMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/results/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/FileStore.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/Scenario.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/ScenarioHtmlMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/ScenarioImageMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/ScenarioList.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/ScenarioListExportMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/ScenarioListPdfMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/scenarios/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/shared.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/study/ObjectEntry.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/study/ProofOfWork.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/study/SnapShot.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/study/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/DAG.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/Memory.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/MemoryPlan.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/Rule.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/RuleCollection.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/SurveyCSS.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/SurveyExportMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/SurveyFlowVisualizationMixin.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/base.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/surveys/descriptors.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/tools/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/tools/clusters.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/tools/embeddings.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/tools/embeddings_plotting.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/tools/plotting.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/tools/summarize.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/SystemInfo.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/ast_utilities.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/data/Registry.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/data/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/data/scooter_results.json +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/decorators.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/gcp_bucket/__init__.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/gcp_bucket/cloud_storage.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/gcp_bucket/simple_example.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/interface.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/repair_functions.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/restricted_python.py +0 -0
- {edsl-0.1.30.dev4 → edsl-0.1.30.dev5}/edsl/utilities/utilities.py +0 -0
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.1.30.dev5"
|
@@ -30,6 +30,7 @@ class TokenBucket:
|
|
30
30
|
if self.turbo_mode:
|
31
31
|
pass
|
32
32
|
else:
|
33
|
+
#pass
|
33
34
|
self.turbo_mode = True
|
34
35
|
self.capacity = float("inf")
|
35
36
|
self.refill_rate = float("inf")
|
@@ -72,7 +73,17 @@ class TokenBucket:
|
|
72
73
|
self.log.append((time.monotonic(), self.tokens))
|
73
74
|
|
74
75
|
def refill(self) -> None:
|
75
|
-
"""Refill the bucket with new tokens based on elapsed time.
|
76
|
+
"""Refill the bucket with new tokens based on elapsed time.
|
77
|
+
|
78
|
+
|
79
|
+
|
80
|
+
>>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
|
81
|
+
>>> bucket.tokens = 0
|
82
|
+
>>> bucket.refill()
|
83
|
+
>>> bucket.tokens > 0
|
84
|
+
True
|
85
|
+
|
86
|
+
"""
|
76
87
|
now = time.monotonic()
|
77
88
|
elapsed = now - self.last_refill
|
78
89
|
refill_amount = elapsed * self.refill_rate
|
@@ -20,6 +20,10 @@ from edsl.jobs.interviews.retry_management import retry_strategy
|
|
20
20
|
from edsl.jobs.interviews.InterviewTaskBuildingMixin import InterviewTaskBuildingMixin
|
21
21
|
from edsl.jobs.interviews.InterviewStatusMixin import InterviewStatusMixin
|
22
22
|
|
23
|
+
import asyncio
|
24
|
+
|
25
|
+
def run_async(coro):
|
26
|
+
return asyncio.run(coro)
|
23
27
|
|
24
28
|
class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
25
29
|
"""
|
@@ -36,8 +40,8 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
36
40
|
model: Type["LanguageModel"],
|
37
41
|
debug: Optional[bool] = False,
|
38
42
|
iteration: int = 0,
|
39
|
-
cache: "Cache" = None,
|
40
|
-
sidecar_model:
|
43
|
+
cache: Optional["Cache"] = None,
|
44
|
+
sidecar_model: Optional['LanguageModel'] = None,
|
41
45
|
):
|
42
46
|
"""Initialize the Interview instance.
|
43
47
|
|
@@ -45,6 +49,24 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
45
49
|
:param survey: the survey being administered to the agent.
|
46
50
|
:param scenario: the scenario that populates the survey questions.
|
47
51
|
:param model: the language model used to answer the questions.
|
52
|
+
:param debug: if True, run without calls to the language model.
|
53
|
+
:param iteration: the iteration number of the interview.
|
54
|
+
:param cache: the cache used to store the answers.
|
55
|
+
:param sidecar_model: a sidecar model used to answer questions.
|
56
|
+
|
57
|
+
>>> i = Interview.example()
|
58
|
+
>>> i.task_creators
|
59
|
+
{}
|
60
|
+
|
61
|
+
>>> i.exceptions
|
62
|
+
{}
|
63
|
+
|
64
|
+
>>> _ = asyncio.run(i.async_conduct_interview())
|
65
|
+
>>> i.task_status_logs['q0']
|
66
|
+
[{'log_time': ..., 'value': <TaskStatus.NOT_STARTED: 1>}, {'log_time': ..., 'value': <TaskStatus.WAITING_FOR_DEPENDENCIES: 2>}, {'log_time': ..., 'value': <TaskStatus.API_CALL_IN_PROGRESS: 7>}, {'log_time': ..., 'value': <TaskStatus.SUCCESS: 8>}]
|
67
|
+
|
68
|
+
>>> i.to_index
|
69
|
+
{'q0': 0, 'q1': 1, 'q2': 2}
|
48
70
|
|
49
71
|
"""
|
50
72
|
self.agent = agent
|
@@ -64,7 +86,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
64
86
|
self.exceptions = InterviewExceptionCollection()
|
65
87
|
self._task_status_log_dict = InterviewStatusLog()
|
66
88
|
|
67
|
-
# dictionary mapping question names to their index in the survey.
|
89
|
+
# dictionary mapping question names to their index in the survey.
|
68
90
|
self.to_index = {
|
69
91
|
question_name: index
|
70
92
|
for index, question_name in enumerate(self.survey.question_names)
|
@@ -76,14 +98,16 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
76
98
|
model_buckets: ModelBuckets = None,
|
77
99
|
debug: bool = False,
|
78
100
|
stop_on_exception: bool = False,
|
79
|
-
sidecar_model: Optional[LanguageModel] = None,
|
101
|
+
sidecar_model: Optional['LanguageModel'] = None,
|
80
102
|
) -> tuple["Answers", List[dict[str, Any]]]:
|
81
103
|
"""
|
82
104
|
Conduct an Interview asynchronously.
|
105
|
+
It returns a tuple with the answers and a list of valid results.
|
83
106
|
|
84
107
|
:param model_buckets: a dictionary of token buckets for the model.
|
85
108
|
:param debug: run without calls to LLM.
|
86
109
|
:param stop_on_exception: if True, stops the interview if an exception is raised.
|
110
|
+
:param sidecar_model: a sidecar model used to answer questions.
|
87
111
|
|
88
112
|
Example usage:
|
89
113
|
|
@@ -91,22 +115,44 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
91
115
|
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
92
116
|
>>> result['q0']
|
93
117
|
'yes'
|
118
|
+
|
119
|
+
>>> i = Interview.example(throw_exception = True)
|
120
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
121
|
+
Attempt 1 failed with exception:This is a test error now waiting 1.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
|
122
|
+
<BLANKLINE>
|
123
|
+
<BLANKLINE>
|
124
|
+
Attempt 2 failed with exception:This is a test error now waiting 2.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
|
125
|
+
<BLANKLINE>
|
126
|
+
<BLANKLINE>
|
127
|
+
Attempt 3 failed with exception:This is a test error now waiting 4.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
|
128
|
+
<BLANKLINE>
|
129
|
+
<BLANKLINE>
|
130
|
+
Attempt 4 failed with exception:This is a test error now waiting 8.00 seconds before retrying.Parameters: start=1.0, max=60.0, max_attempts=5.
|
131
|
+
<BLANKLINE>
|
132
|
+
<BLANKLINE>
|
133
|
+
|
134
|
+
>>> i.exceptions
|
135
|
+
{'q0': [{'exception': "Exception('This is a test error')", 'time': ..., 'traceback': ...
|
136
|
+
|
137
|
+
>>> i = Interview.example()
|
138
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview(stop_on_exception = True))
|
139
|
+
Traceback (most recent call last):
|
140
|
+
...
|
141
|
+
asyncio.exceptions.CancelledError
|
94
142
|
"""
|
95
143
|
self.sidecar_model = sidecar_model
|
96
144
|
|
97
145
|
# if no model bucket is passed, create an 'infinity' bucket with no rate limits
|
98
|
-
# print("model_buckets", model_buckets)
|
99
146
|
if model_buckets is None or hasattr(self.agent, "answer_question_directly"):
|
100
147
|
model_buckets = ModelBuckets.infinity_bucket()
|
101
148
|
|
102
|
-
|
103
|
-
# model_buckets = ModelBuckets.infinity_bucket()
|
104
|
-
|
149
|
+
|
105
150
|
## build the tasks using the InterviewTaskBuildingMixin
|
106
151
|
## This is the key part---it creates a task for each question,
|
107
152
|
## with dependencies on the questions that must be answered before this one can be answered.
|
108
153
|
self.tasks = self._build_question_tasks(
|
109
|
-
debug=debug,
|
154
|
+
debug=debug,
|
155
|
+
model_buckets=model_buckets
|
110
156
|
)
|
111
157
|
|
112
158
|
## 'Invigilators' are used to administer the survey
|
@@ -123,6 +169,14 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
123
169
|
It iterates through the tasks and invigilators, and yields the results of the tasks that are done.
|
124
170
|
If a task is not done, it raises a ValueError.
|
125
171
|
If an exception is raised in the task, it records the exception in the Interview instance except if the task was cancelled, which is expected behavior.
|
172
|
+
|
173
|
+
>>> i = Interview.example()
|
174
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
175
|
+
>>> results = list(i._extract_valid_results())
|
176
|
+
>>> len(results) == len(i.survey)
|
177
|
+
True
|
178
|
+
>>> type(results[0])
|
179
|
+
<class 'edsl.data_transfer_models.AgentResponseDict'>
|
126
180
|
"""
|
127
181
|
assert len(self.tasks) == len(self.invigilators)
|
128
182
|
|
@@ -140,7 +194,18 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
140
194
|
yield result
|
141
195
|
|
142
196
|
def _record_exception(self, task, exception: Exception) -> None:
|
143
|
-
"""Record an exception in the Interview instance.
|
197
|
+
"""Record an exception in the Interview instance.
|
198
|
+
|
199
|
+
It records the exception in the Interview instance, with the task name and the exception entry.
|
200
|
+
|
201
|
+
>>> i = Interview.example()
|
202
|
+
>>> result, _ = asyncio.run(i.async_conduct_interview())
|
203
|
+
>>> i.exceptions
|
204
|
+
{}
|
205
|
+
>>> i._record_exception(i.tasks[0], Exception("An exception occurred."))
|
206
|
+
>>> i.exceptions
|
207
|
+
{'q0': [{'exception': "Exception('An exception occurred.')", 'time': ..., 'traceback': 'NoneType: None\\n'}]}
|
208
|
+
"""
|
144
209
|
exception_entry = InterviewExceptionEntry(
|
145
210
|
exception=repr(exception),
|
146
211
|
time=time.time(),
|
@@ -156,6 +221,10 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
156
221
|
It is used to determine the order in which questions should be answered.
|
157
222
|
This reflects both agent 'memory' considerations and 'skip' logic.
|
158
223
|
The 'textify' parameter is set to True, so that the question names are returned as strings rather than integer indices.
|
224
|
+
|
225
|
+
>>> i = Interview.example()
|
226
|
+
>>> i.dag == {'q2': {'q0'}, 'q1': {'q0'}}
|
227
|
+
True
|
159
228
|
"""
|
160
229
|
return self.survey.dag(textify=True)
|
161
230
|
|
@@ -166,8 +235,15 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
166
235
|
"""Return a string representation of the Interview instance."""
|
167
236
|
return f"Interview(agent = {repr(self.agent)}, survey = {repr(self.survey)}, scenario = {repr(self.scenario)}, model = {repr(self.model)})"
|
168
237
|
|
169
|
-
def duplicate(self, iteration: int, cache: Cache) -> Interview:
|
170
|
-
"""Duplicate the interview, but with a new iteration number and cache.
|
238
|
+
def duplicate(self, iteration: int, cache: 'Cache') -> Interview:
|
239
|
+
"""Duplicate the interview, but with a new iteration number and cache.
|
240
|
+
|
241
|
+
>>> i = Interview.example()
|
242
|
+
>>> i2 = i.duplicate(1, None)
|
243
|
+
>>> i.iteration + 1 == i2.iteration
|
244
|
+
True
|
245
|
+
|
246
|
+
"""
|
171
247
|
return Interview(
|
172
248
|
agent=self.agent,
|
173
249
|
survey=self.survey,
|
@@ -178,7 +254,7 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
178
254
|
)
|
179
255
|
|
180
256
|
@classmethod
|
181
|
-
def example(self):
|
257
|
+
def example(self, throw_exception: bool = False) -> Interview:
|
182
258
|
"""Return an example Interview instance."""
|
183
259
|
from edsl.agents import Agent
|
184
260
|
from edsl.surveys import Survey
|
@@ -193,66 +269,15 @@ class Interview(InterviewStatusMixin, InterviewTaskBuildingMixin):
|
|
193
269
|
survey = Survey.example()
|
194
270
|
scenario = Scenario.example()
|
195
271
|
model = LanguageModel.example()
|
272
|
+
if throw_exception:
|
273
|
+
model = LanguageModel.example(test_model = True, throw_exception=True)
|
274
|
+
agent = Agent.example()
|
275
|
+
return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
|
196
276
|
return Interview(agent=agent, survey=survey, scenario=scenario, model=model)
|
197
277
|
|
198
278
|
|
199
279
|
if __name__ == "__main__":
|
200
280
|
import doctest
|
201
281
|
|
202
|
-
|
203
|
-
|
204
|
-
# from edsl.agents import Agent
|
205
|
-
# from edsl.surveys import Survey
|
206
|
-
# from edsl.scenarios import Scenario
|
207
|
-
# from edsl.questions import QuestionMultipleChoice
|
208
|
-
|
209
|
-
# # from edsl.jobs.Interview import Interview
|
210
|
-
|
211
|
-
# # a survey with skip logic
|
212
|
-
# q0 = QuestionMultipleChoice(
|
213
|
-
# question_text="Do you like school?",
|
214
|
-
# question_options=["yes", "no"],
|
215
|
-
# question_name="q0",
|
216
|
-
# )
|
217
|
-
# q1 = QuestionMultipleChoice(
|
218
|
-
# question_text="Why not?",
|
219
|
-
# question_options=["killer bees in cafeteria", "other"],
|
220
|
-
# question_name="q1",
|
221
|
-
# )
|
222
|
-
# q2 = QuestionMultipleChoice(
|
223
|
-
# question_text="Why?",
|
224
|
-
# question_options=["**lack*** of killer bees in cafeteria", "other"],
|
225
|
-
# question_name="q2",
|
226
|
-
# )
|
227
|
-
# s = Survey(questions=[q0, q1, q2])
|
228
|
-
# s = s.add_rule(q0, "q0 == 'yes'", q2)
|
229
|
-
|
230
|
-
# # create an interview
|
231
|
-
# a = Agent(traits=None)
|
232
|
-
|
233
|
-
# def direct_question_answering_method(self, question, scenario):
|
234
|
-
# """Answer a question directly."""
|
235
|
-
# raise Exception("Error!")
|
236
|
-
# # return "yes"
|
237
|
-
|
238
|
-
# a.add_direct_question_answering_method(direct_question_answering_method)
|
239
|
-
# scenario = Scenario()
|
240
|
-
# m = Model()
|
241
|
-
# I = Interview(agent=a, survey=s, scenario=scenario, model=m)
|
242
|
-
|
243
|
-
# result = asyncio.run(I.async_conduct_interview())
|
244
|
-
# # # conduct five interviews
|
245
|
-
# # for _ in range(5):
|
246
|
-
# # I.conduct_interview(debug=True)
|
247
|
-
|
248
|
-
# # # replace missing answers
|
249
|
-
# # I
|
250
|
-
# # repr(I)
|
251
|
-
# # eval(repr(I))
|
252
|
-
# # print(I.task_status_logs.status_matrix(20))
|
253
|
-
# status_matrix = I.task_status_logs.status_matrix(20)
|
254
|
-
# numerical_matrix = I.task_status_logs.numerical_matrix(20)
|
255
|
-
# I.task_status_logs.visualize()
|
256
|
-
|
257
|
-
# I.exceptions.print()
|
258
|
-
# I.exceptions.ascii_table()
|
282
|
+
# add ellipsis
|
283
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
@@ -25,7 +25,7 @@ TIMEOUT = float(CONFIG.get("EDSL_API_TIMEOUT"))
|
|
25
25
|
class InterviewTaskBuildingMixin:
|
26
26
|
def _build_invigilators(
|
27
27
|
self, debug: bool
|
28
|
-
) -> Generator[InvigilatorBase, None, None]:
|
28
|
+
) -> Generator['InvigilatorBase', None, None]:
|
29
29
|
"""Create an invigilator for each question.
|
30
30
|
|
31
31
|
:param debug: whether to use debug mode, in which case `InvigilatorDebug` is used.
|
@@ -35,7 +35,7 @@ class InterviewTaskBuildingMixin:
|
|
35
35
|
for question in self.survey.questions:
|
36
36
|
yield self._get_invigilator(question=question, debug=debug)
|
37
37
|
|
38
|
-
def _get_invigilator(self, question: QuestionBase, debug: bool) -> "Invigilator":
|
38
|
+
def _get_invigilator(self, question: 'QuestionBase', debug: bool) -> "Invigilator":
|
39
39
|
"""Return an invigilator for the given question.
|
40
40
|
|
41
41
|
:param question: the question to be answered
|
@@ -84,7 +84,7 @@ class InterviewTaskBuildingMixin:
|
|
84
84
|
return tuple(tasks) # , invigilators
|
85
85
|
|
86
86
|
def _get_tasks_that_must_be_completed_before(
|
87
|
-
self, *, tasks: list[asyncio.Task], question: QuestionBase
|
87
|
+
self, *, tasks: list[asyncio.Task], question: 'QuestionBase'
|
88
88
|
) -> Generator[asyncio.Task, None, None]:
|
89
89
|
"""Return the tasks that must be completed before the given question can be answered.
|
90
90
|
|
@@ -100,7 +100,7 @@ class InterviewTaskBuildingMixin:
|
|
100
100
|
def _create_question_task(
|
101
101
|
self,
|
102
102
|
*,
|
103
|
-
question: QuestionBase,
|
103
|
+
question: 'QuestionBase',
|
104
104
|
tasks_that_must_be_completed_before: list[asyncio.Task],
|
105
105
|
model_buckets: ModelBuckets,
|
106
106
|
debug: bool,
|
@@ -175,24 +175,12 @@ class InterviewTaskBuildingMixin:
|
|
175
175
|
|
176
176
|
self._add_answer(response=response, question=question)
|
177
177
|
|
178
|
-
# With the answer to the question, we can now cancel any skipped questions
|
179
178
|
self._cancel_skipped_questions(question)
|
180
179
|
return AgentResponseDict(**response)
|
181
180
|
except Exception as e:
|
182
181
|
raise e
|
183
|
-
|
184
|
-
|
185
|
-
# traceback.print_exc()
|
186
|
-
|
187
|
-
# # Extract and print the traceback info
|
188
|
-
# tb = e.__traceback__
|
189
|
-
# while tb is not None:
|
190
|
-
# print(f"File {tb.tb_frame.f_code.co_filename}, line {tb.tb_lineno}, in {tb.tb_frame.f_code.co_name}")
|
191
|
-
# tb = tb.tb_next
|
192
|
-
# breakpoint()
|
193
|
-
# raise e
|
194
|
-
|
195
|
-
def _add_answer(self, response: AgentResponseDict, question: QuestionBase) -> None:
|
182
|
+
|
183
|
+
def _add_answer(self, response: 'AgentResponseDict', question: 'QuestionBase') -> None:
|
196
184
|
"""Add the answer to the answers dictionary.
|
197
185
|
|
198
186
|
:param response: the response to the question.
|
@@ -200,7 +188,7 @@ class InterviewTaskBuildingMixin:
|
|
200
188
|
"""
|
201
189
|
self.answers.add_answer(response=response, question=question)
|
202
190
|
|
203
|
-
def _skip_this_question(self, current_question: QuestionBase) -> bool:
|
191
|
+
def _skip_this_question(self, current_question: 'QuestionBase') -> bool:
|
204
192
|
"""Determine if the current question should be skipped.
|
205
193
|
|
206
194
|
:param current_question: the question to be answered.
|
@@ -88,7 +88,8 @@ class QuestionTaskCreator(UserList):
|
|
88
88
|
self.append(task)
|
89
89
|
|
90
90
|
def generate_task(self, debug: bool) -> asyncio.Task:
|
91
|
-
"""Create a task that depends on the passed-in dependencies.
|
91
|
+
"""Create a task that depends on the passed-in dependencies.
|
92
|
+
"""
|
92
93
|
task = asyncio.create_task(
|
93
94
|
self._run_task_async(debug), name=self.question.question_name
|
94
95
|
)
|
@@ -144,18 +145,14 @@ class QuestionTaskCreator(UserList):
|
|
144
145
|
self.task_status = TaskStatus.FAILED
|
145
146
|
raise e
|
146
147
|
|
147
|
-
|
148
|
-
# breakpoint()
|
149
|
-
if results.get("cache_used", False):
|
148
|
+
if results.get('cache_used', False):
|
150
149
|
self.tokens_bucket.add_tokens(requested_tokens)
|
151
150
|
self.requests_bucket.add_tokens(1)
|
152
151
|
self.from_cache = True
|
153
|
-
#
|
152
|
+
# Turbo mode means that we don't wait for tokens or requests.
|
154
153
|
self.tokens_bucket.turbo_mode_on()
|
155
154
|
self.requests_bucket.turbo_mode_on()
|
156
155
|
else:
|
157
|
-
# breakpoint()
|
158
|
-
# print("Turning off turbo!")
|
159
156
|
self.tokens_bucket.turbo_mode_off()
|
160
157
|
self.requests_bucket.turbo_mode_off()
|
161
158
|
|
@@ -494,7 +494,7 @@ class LanguageModel(
|
|
494
494
|
return table
|
495
495
|
|
496
496
|
@classmethod
|
497
|
-
def example(cls, test_model: bool = False, canned_response: str = "Hello world"):
|
497
|
+
def example(cls, test_model: bool = False, canned_response: str = "Hello world", throw_exception: bool = False):
|
498
498
|
"""Return a default instance of the class.
|
499
499
|
|
500
500
|
>>> from edsl.language_models import LanguageModel
|
@@ -519,6 +519,8 @@ class LanguageModel(
|
|
519
519
|
) -> dict[str, Any]:
|
520
520
|
await asyncio.sleep(0.1)
|
521
521
|
# return {"message": """{"answer": "Hello, world"}"""}
|
522
|
+
if throw_exception:
|
523
|
+
raise Exception("This is a test error")
|
522
524
|
return {"message": f'{{"answer": "{canned_response}"}}'}
|
523
525
|
|
524
526
|
def parse_response(self, raw_response: dict[str, Any]) -> str:
|
@@ -4,10 +4,33 @@ import inspect
|
|
4
4
|
from edsl.questions.QuestionBase import QuestionBase
|
5
5
|
|
6
6
|
from edsl.utilities.restricted_python import create_restricted_function
|
7
|
-
|
7
|
+
from edsl.utilities.decorators import add_edsl_version, remove_edsl_version
|
8
8
|
|
9
9
|
class QuestionFunctional(QuestionBase):
|
10
|
-
"""A special type of question that is *not* answered by an LLM.
|
10
|
+
"""A special type of question that is *not* answered by an LLM.
|
11
|
+
|
12
|
+
>>> from edsl import Scenario, Agent
|
13
|
+
|
14
|
+
# Create an instance of QuestionFunctional with the new function
|
15
|
+
>>> question = QuestionFunctional.example()
|
16
|
+
|
17
|
+
# Activate and test the function
|
18
|
+
>>> question.activate()
|
19
|
+
>>> scenario = Scenario({"numbers": [1, 2, 3, 4, 5]})
|
20
|
+
>>> agent = Agent(traits={"multiplier": 10})
|
21
|
+
>>> results = question.by(scenario).by(agent).run()
|
22
|
+
>>> results.select("answer.*").to_list()[0] == 150
|
23
|
+
True
|
24
|
+
|
25
|
+
# Serialize the question to a dictionary
|
26
|
+
|
27
|
+
>>> from edsl.questions.QuestionBase import QuestionBase
|
28
|
+
>>> new_question = QuestionBase.from_dict(question.to_dict())
|
29
|
+
>>> results = new_question.by(scenario).by(agent).run()
|
30
|
+
>>> results.select("answer.*").to_list()[0] == 150
|
31
|
+
True
|
32
|
+
|
33
|
+
"""
|
11
34
|
|
12
35
|
question_type = "functional"
|
13
36
|
default_instructions = ""
|
@@ -73,6 +96,7 @@ class QuestionFunctional(QuestionBase):
|
|
73
96
|
"""Required by Question, but not used by QuestionFunctional."""
|
74
97
|
raise NotImplementedError
|
75
98
|
|
99
|
+
@add_edsl_version
|
76
100
|
def to_dict(self):
|
77
101
|
return {
|
78
102
|
"question_name": self.question_name,
|
@@ -81,6 +105,8 @@ class QuestionFunctional(QuestionBase):
|
|
81
105
|
"requires_loop": self.requires_loop,
|
82
106
|
"function_name": self.function_name,
|
83
107
|
}
|
108
|
+
|
109
|
+
|
84
110
|
|
85
111
|
@classmethod
|
86
112
|
def example(cls):
|
@@ -113,4 +139,9 @@ def main():
|
|
113
139
|
scenario = Scenario({"numbers": [1, 2, 3, 4, 5]})
|
114
140
|
agent = Agent(traits={"multiplier": 10})
|
115
141
|
results = question.by(scenario).by(agent).run()
|
116
|
-
|
142
|
+
assert results.select("answer.*").to_list()[0] == 150
|
143
|
+
|
144
|
+
if __name__ == "__main__":
|
145
|
+
#main()
|
146
|
+
import doctest
|
147
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
2
2
|
import time
|
3
3
|
from typing import Union
|
4
4
|
import random
|
5
|
-
|
5
|
+
from typing import Optional
|
6
6
|
from jinja2 import Template
|
7
7
|
|
8
8
|
from edsl.questions.QuestionBase import QuestionBase
|
@@ -10,7 +10,11 @@ from edsl.questions.descriptors import QuestionOptionsDescriptor
|
|
10
10
|
|
11
11
|
|
12
12
|
class QuestionMultipleChoice(QuestionBase):
|
13
|
-
"""This question prompts the agent to select one option from a list of options.
|
13
|
+
"""This question prompts the agent to select one option from a list of options.
|
14
|
+
|
15
|
+
https://docs.expectedparrot.com/en/latest/questions.html#questionmultiplechoice-class
|
16
|
+
|
17
|
+
"""
|
14
18
|
|
15
19
|
question_type = "multiple_choice"
|
16
20
|
purpose = "When options are known and limited"
|
@@ -35,27 +39,69 @@ class QuestionMultipleChoice(QuestionBase):
|
|
35
39
|
self.question_text = question_text
|
36
40
|
self.question_options = question_options
|
37
41
|
|
42
|
+
# @property
|
43
|
+
# def question_options(self) -> Union[list[str], list[list], list[float], list[int]]:
|
44
|
+
# """Return the question options."""
|
45
|
+
# return self._question_options
|
46
|
+
|
38
47
|
################
|
39
48
|
# Answer methods
|
40
49
|
################
|
41
50
|
def _validate_answer(
|
42
51
|
self, answer: dict[str, Union[str, int]]
|
43
52
|
) -> dict[str, Union[str, int]]:
|
44
|
-
"""Validate the answer.
|
53
|
+
"""Validate the answer.
|
54
|
+
|
55
|
+
>>> q = QuestionMultipleChoice.example()
|
56
|
+
>>> q._validate_answer({"answer": 0, "comment": "I like custard"})
|
57
|
+
{'answer': 0, 'comment': 'I like custard'}
|
58
|
+
|
59
|
+
>>> q = QuestionMultipleChoice(question_name="how_feeling", question_text="How are you?", question_options=["Good", "Great", "OK", "Bad"])
|
60
|
+
>>> q._validate_answer({"answer": -1, "comment": "I like custard"})
|
61
|
+
Traceback (most recent call last):
|
62
|
+
...
|
63
|
+
edsl.exceptions.questions.QuestionAnswerValidationError: Answer code must be a non-negative integer (got -1).
|
64
|
+
"""
|
45
65
|
self._validate_answer_template_basic(answer)
|
46
66
|
self._validate_answer_multiple_choice(answer)
|
47
67
|
return answer
|
48
68
|
|
49
69
|
def _translate_answer_code_to_answer(
|
50
|
-
self,
|
70
|
+
self,
|
71
|
+
answer_code: int,
|
72
|
+
scenario: Optional["Scenario"] = None
|
51
73
|
):
|
52
|
-
"""Translate the answer code to the actual answer.
|
74
|
+
"""Translate the answer code to the actual answer.
|
75
|
+
|
76
|
+
It is used to translate the answer code to the actual answer.
|
77
|
+
The question options might be templates, so they need to be rendered with the scenario.
|
78
|
+
|
79
|
+
>>> q = QuestionMultipleChoice.example()
|
80
|
+
>>> q._translate_answer_code_to_answer(0, {})
|
81
|
+
'Good'
|
82
|
+
|
83
|
+
>>> q = QuestionMultipleChoice(question_name="how_feeling", question_text="How are you?", question_options=["{{emotion[0]}}", "emotion[1]"])
|
84
|
+
>>> q._translate_answer_code_to_answer(0, {"emotion": ["Happy", "Sad"]})
|
85
|
+
'Happy'
|
86
|
+
|
87
|
+
"""
|
53
88
|
from edsl.scenarios.Scenario import Scenario
|
54
89
|
|
55
90
|
scenario = scenario or Scenario()
|
56
|
-
|
57
|
-
|
58
|
-
|
91
|
+
|
92
|
+
if isinstance(self.question_options, str):
|
93
|
+
# If dynamic options are provided like {{ options }}, render them with the scenario
|
94
|
+
from jinja2 import Environment, meta
|
95
|
+
env = Environment()
|
96
|
+
parsed_content = env.parse(self.question_options)
|
97
|
+
question_option_key = list(meta.find_undeclared_variables(parsed_content))[0]
|
98
|
+
translated_options = scenario.get(question_option_key)
|
99
|
+
else:
|
100
|
+
translated_options = [
|
101
|
+
Template(str(option)).render(scenario) for option in self.question_options
|
102
|
+
]
|
103
|
+
#print("Translated options:", translated_options)
|
104
|
+
#breakpoint()
|
59
105
|
return translated_options[int(answer_code)]
|
60
106
|
|
61
107
|
def _simulate_answer(
|
@@ -75,6 +121,7 @@ class QuestionMultipleChoice(QuestionBase):
|
|
75
121
|
|
76
122
|
@property
|
77
123
|
def question_html_content(self) -> str:
|
124
|
+
"""Return the HTML version of the question."""
|
78
125
|
if hasattr(self, "option_labels"):
|
79
126
|
option_labels = self.option_labels
|
80
127
|
else:
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
from abc import ABC, abstractmethod
|
4
4
|
import re
|
5
|
-
from typing import Any, Callable
|
5
|
+
from typing import Any, Callable, List, Optional
|
6
6
|
from edsl.exceptions import (
|
7
7
|
QuestionCreationValidationError,
|
8
8
|
QuestionAnswerValidationError,
|
@@ -242,6 +242,15 @@ class QuestionNameDescriptor(BaseDescriptor):
|
|
242
242
|
class QuestionOptionsDescriptor(BaseDescriptor):
|
243
243
|
"""Validate that `question_options` is a list, does not exceed the min/max lengths, and has unique items."""
|
244
244
|
|
245
|
+
@classmethod
|
246
|
+
def example(cls):
|
247
|
+
class TestQuestion:
|
248
|
+
question_options = QuestionOptionsDescriptor()
|
249
|
+
|
250
|
+
def __init__(self, question_options: List[str]):
|
251
|
+
self.question_options = question_options
|
252
|
+
return TestQuestion
|
253
|
+
|
245
254
|
def __init__(
|
246
255
|
self,
|
247
256
|
num_choices: int = None,
|
@@ -254,7 +263,31 @@ class QuestionOptionsDescriptor(BaseDescriptor):
|
|
254
263
|
self.q_budget = q_budget
|
255
264
|
|
256
265
|
def validate(self, value: Any, instance) -> None:
|
257
|
-
"""Validate the question options.
|
266
|
+
"""Validate the question options.
|
267
|
+
|
268
|
+
>>> q_class = QuestionOptionsDescriptor.example()
|
269
|
+
>>> _ = q_class(["a", "b", "c"])
|
270
|
+
>>> _ = q_class(["a", "b", "c", "d", "d"])
|
271
|
+
Traceback (most recent call last):
|
272
|
+
...
|
273
|
+
edsl.exceptions.questions.QuestionCreationValidationError: Question options must be unique (got ['a', 'b', 'c', 'd', 'd']).
|
274
|
+
|
275
|
+
We allow dynamic question options, which are strings of the form '{{ question_options }}'.
|
276
|
+
|
277
|
+
>>> _ = q_class("{{dynamic_options}}")
|
278
|
+
>>> _ = q_class("dynamic_options")
|
279
|
+
Traceback (most recent call last):
|
280
|
+
...
|
281
|
+
edsl.exceptions.questions.QuestionCreationValidationError: Dynamic question options must be of the form: '{{ question_options }}'.
|
282
|
+
"""
|
283
|
+
if isinstance(value, str):
|
284
|
+
# Check if the string is a dynamic question option
|
285
|
+
if "{{" in value and "}}" in value:
|
286
|
+
return None
|
287
|
+
else:
|
288
|
+
raise QuestionCreationValidationError(
|
289
|
+
"Dynamic question options must be of the form: '{{ question_options }}'."
|
290
|
+
)
|
258
291
|
if not isinstance(value, list):
|
259
292
|
raise QuestionCreationValidationError(
|
260
293
|
f"Question options must be a list (got {value})."
|
@@ -339,3 +372,8 @@ class QuestionTextDescriptor(BaseDescriptor):
|
|
339
372
|
f"WARNING: Question text contains a single-braced substring: If you intended to parameterize the question with a Scenario this should be changed to a double-braced substring, e.g. {{variable}}.\nSee details on constructing Scenarios in the docs: https://docs.expectedparrot.com/en/latest/scenarios.html",
|
340
373
|
UserWarning,
|
341
374
|
)
|
375
|
+
|
376
|
+
if __name__ == "__main__":
|
377
|
+
import doctest
|
378
|
+
|
379
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|