edsl 0.1.30__tar.gz → 0.1.30.dev1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {edsl-0.1.30 → edsl-0.1.30.dev1}/PKG-INFO +1 -1
- edsl-0.1.30.dev1/edsl/__version__.py +1 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/agents/Agent.py +6 -8
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/agents/AgentList.py +19 -9
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/agents/Invigilator.py +5 -4
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conversation/car_buying.py +1 -1
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/data/Cache.py +16 -25
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/data/CacheEntry.py +7 -6
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/data_transfer_models.py +0 -4
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/Jobs.py +2 -17
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/buckets/ModelBuckets.py +0 -10
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/buckets/TokenBucket.py +3 -31
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/Interview.py +73 -99
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/InterviewTaskBuildingMixin.py +19 -9
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/runners/JobsRunnerAsyncio.py +0 -4
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tasks/QuestionTaskCreator.py +6 -10
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/language_models/LanguageModel.py +6 -12
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/notebooks/Notebook.py +9 -9
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionFreeText.py +2 -4
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionFunctional.py +2 -34
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionMultipleChoice.py +8 -57
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/descriptors.py +2 -42
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/DatasetExportMixin.py +5 -84
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/Result.py +5 -53
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/Results.py +30 -70
- edsl-0.1.30.dev1/edsl/scenarios/FileStore.py +140 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/scenarios/Scenario.py +19 -12
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/scenarios/ScenarioList.py +6 -8
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/study/Study.py +7 -5
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/Survey.py +12 -44
- {edsl-0.1.30 → edsl-0.1.30.dev1}/pyproject.toml +1 -1
- edsl-0.1.30/edsl/__version__.py +0 -1
- edsl-0.1.30/edsl/scenarios/FileStore.py +0 -299
- {edsl-0.1.30 → edsl-0.1.30.dev1}/LICENSE +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/README.md +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/Base.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/BaseDiff.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/agents/InvigilatorBase.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/agents/PromptConstructionMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/agents/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/agents/descriptors.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/base/Base.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/config.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/AgentConstructionMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/Conjure.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/InputData.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/InputDataCSV.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/InputDataMixinQuestionStats.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/InputDataPyRead.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/InputDataSPSS.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/InputDataStata.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/QuestionOptionMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/QuestionTypeMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/RawQuestion.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/SurveyResponses.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/examples/placeholder.txt +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/naming_utilities.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conjure/utilities.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conversation/Conversation.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conversation/mug_negotiation.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/conversation/next_speaker_utilities.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/coop/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/coop/coop.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/coop/utils.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/data/CacheHandler.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/data/SQLiteDict.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/data/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/data/orm.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/enums.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/agents.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/configuration.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/coop.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/data.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/general.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/jobs.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/language_models.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/prompts.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/questions.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/results.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/exceptions/surveys.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/AnthropicService.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/DeepInfraService.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/GoogleService.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/InferenceServiceABC.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/InferenceServicesCollection.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/OpenAIService.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/models_available_cache.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/rate_limits_cache.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/registry.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/inference_services/write_available.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/Answers.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/buckets/BucketCollection.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/InterviewStatistic.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/InterviewStatisticsCollection.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/InterviewStatusDictionary.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/InterviewStatusLog.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/InterviewStatusMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/ReportErrors.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/interview_exception_tracking.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/interview_status_enum.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/interviews/retry_management.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/runners/JobsRunnerStatusData.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/runners/JobsRunnerStatusMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tasks/TaskCreators.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tasks/TaskHistory.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tasks/TaskStatusLog.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tasks/task_management.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tasks/task_status_enum.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tokens/InterviewTokenUsage.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/jobs/tokens/TokenUsage.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/language_models/ModelList.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/language_models/RegisterLanguageModelsMeta.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/language_models/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/language_models/registry.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/language_models/repair.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/language_models/unused/ReplicateBase.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/notebooks/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/Prompt.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/QuestionInstructionsBase.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/agent_instructions.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/agent_persona.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_budget.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_checkbox.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_extract.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_freetext.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_linear_scale.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_list.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_multiple_choice.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_numerical.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/library/question_rank.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/prompt_config.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/prompts/registry.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/AnswerValidatorMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionBase.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionBudget.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionCheckBox.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionExtract.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionList.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionNumerical.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/QuestionRank.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/RegisterQuestionsMeta.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/SimpleAskMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/compose_questions.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/derived/QuestionLikertFive.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/derived/QuestionLinearScale.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/derived/QuestionTopK.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/derived/QuestionYesNo.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/derived/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/question_registry.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/questions/settings.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/Dataset.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/ResultsDBMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/ResultsExportMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/ResultsFetchMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/ResultsGGMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/ResultsToolsMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/results/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/scenarios/ScenarioHtmlMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/scenarios/ScenarioImageMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/scenarios/ScenarioListExportMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/scenarios/ScenarioListPdfMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/scenarios/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/shared.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/study/ObjectEntry.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/study/ProofOfWork.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/study/SnapShot.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/study/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/DAG.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/Memory.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/MemoryPlan.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/Rule.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/RuleCollection.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/SurveyCSS.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/SurveyExportMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/SurveyFlowVisualizationMixin.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/base.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/surveys/descriptors.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/tools/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/tools/clusters.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/tools/embeddings.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/tools/embeddings_plotting.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/tools/plotting.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/tools/summarize.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/SystemInfo.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/ast_utilities.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/data/Registry.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/data/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/data/scooter_results.json +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/decorators.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/gcp_bucket/__init__.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/gcp_bucket/cloud_storage.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/gcp_bucket/simple_example.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/interface.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/repair_functions.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/restricted_python.py +0 -0
- {edsl-0.1.30 → edsl-0.1.30.dev1}/edsl/utilities/utilities.py +0 -0
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "0.1.30.dev1"
|
@@ -4,8 +4,7 @@ from __future__ import annotations
|
|
4
4
|
import copy
|
5
5
|
import inspect
|
6
6
|
import types
|
7
|
-
from typing import Callable, Optional, Union
|
8
|
-
from uuid import uuid4
|
7
|
+
from typing import Any, Callable, Optional, Union, Dict, Sequence
|
9
8
|
from edsl.Base import Base
|
10
9
|
|
11
10
|
from edsl.exceptions.agents import (
|
@@ -689,14 +688,13 @@ class Agent(Base):
|
|
689
688
|
return table
|
690
689
|
|
691
690
|
@classmethod
|
692
|
-
def example(cls
|
693
|
-
"""
|
694
|
-
Returns an example Agent instance.
|
691
|
+
def example(cls) -> Agent:
|
692
|
+
"""Return an example agent.
|
695
693
|
|
696
|
-
|
694
|
+
>>> Agent.example()
|
695
|
+
Agent(traits = {'age': 22, 'hair': 'brown', 'height': 5.5})
|
697
696
|
"""
|
698
|
-
|
699
|
-
return cls(traits={"age": 22, "hair": f"brown{addition}", "height": 5.5})
|
697
|
+
return cls(traits={"age": 22, "hair": "brown", "height": 5.5})
|
700
698
|
|
701
699
|
def code(self) -> str:
|
702
700
|
"""Return the code for the agent.
|
@@ -11,15 +11,23 @@ Example usage:
|
|
11
11
|
"""
|
12
12
|
|
13
13
|
from __future__ import annotations
|
14
|
-
import csv
|
15
|
-
import json
|
16
14
|
from collections import UserList
|
17
|
-
from typing import
|
15
|
+
from typing import Optional, Union, Sequence, List, Any
|
18
16
|
from rich import print_json
|
19
17
|
from rich.table import Table
|
18
|
+
import json
|
19
|
+
import csv
|
20
|
+
|
21
|
+
|
20
22
|
from simpleeval import EvalWithCompoundTypes
|
23
|
+
|
21
24
|
from edsl.Base import Base
|
22
|
-
|
25
|
+
|
26
|
+
# from edsl.agents import Agent
|
27
|
+
from edsl.utilities.decorators import (
|
28
|
+
add_edsl_version,
|
29
|
+
remove_edsl_version,
|
30
|
+
)
|
23
31
|
|
24
32
|
|
25
33
|
class AgentList(UserList, Base):
|
@@ -231,15 +239,17 @@ class AgentList(UserList, Base):
|
|
231
239
|
return cls(agents)
|
232
240
|
|
233
241
|
@classmethod
|
234
|
-
def example(cls
|
235
|
-
"""
|
236
|
-
|
242
|
+
def example(cls) -> "AgentList":
|
243
|
+
"""Return an example AgentList.
|
244
|
+
|
245
|
+
>>> al = AgentList.example()
|
246
|
+
>>> len(al)
|
247
|
+
2
|
237
248
|
|
238
|
-
:param randomize: If True, uses Agent's randomize method.
|
239
249
|
"""
|
240
250
|
from edsl.agents.Agent import Agent
|
241
251
|
|
242
|
-
return cls([Agent.example(
|
252
|
+
return cls([Agent.example(), Agent.example()])
|
243
253
|
|
244
254
|
@classmethod
|
245
255
|
def from_list(self, trait_name: str, values: List[Any]):
|
@@ -74,14 +74,15 @@ class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
|
|
74
74
|
|
75
75
|
This cleans up the raw response to make it suitable to pass to AgentResponseDict.
|
76
76
|
"""
|
77
|
+
# not actually used, but this removes the temptation to delete agent from the signature
|
77
78
|
_ = agent
|
78
79
|
try:
|
79
80
|
response = question._validate_answer(raw_response)
|
80
81
|
except Exception as e:
|
81
|
-
"""If the response is invalid, remove it from the cache and raise the exception."""
|
82
82
|
self._remove_from_cache(raw_response)
|
83
83
|
raise e
|
84
84
|
|
85
|
+
# breakpoint()
|
85
86
|
question_dict = self.survey.question_names_to_questions()
|
86
87
|
for other_question, answer in self.current_answers.items():
|
87
88
|
if other_question in question_dict:
|
@@ -94,10 +95,12 @@ class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
|
|
94
95
|
question_dict[new_question].comment = answer
|
95
96
|
|
96
97
|
combined_dict = {**question_dict, **scenario}
|
98
|
+
# print("combined_dict: ", combined_dict)
|
99
|
+
# print("response: ", response)
|
100
|
+
# breakpoint()
|
97
101
|
answer = question._translate_answer_code_to_answer(
|
98
102
|
response["answer"], combined_dict
|
99
103
|
)
|
100
|
-
# breakpoint()
|
101
104
|
data = {
|
102
105
|
"answer": answer,
|
103
106
|
"comment": response.get(
|
@@ -108,8 +111,6 @@ class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
|
|
108
111
|
"cached_response": raw_response.get("cached_response", None),
|
109
112
|
"usage": raw_response.get("usage", {}),
|
110
113
|
"raw_model_response": raw_model_response,
|
111
|
-
"cache_used": raw_response.get("cache_used", False),
|
112
|
-
"cache_key": raw_response.get("cache_key", None),
|
113
114
|
}
|
114
115
|
return AgentResponseDict(**data)
|
115
116
|
|
@@ -30,7 +30,7 @@ c1 = Conversation(agent_list=AgentList([a1, a3, a2]), max_turns=5, verbose=True)
|
|
30
30
|
c2 = Conversation(agent_list=AgentList([a1, a2]), max_turns=5, verbose=True)
|
31
31
|
|
32
32
|
c = Cache.load("car_talk.json.gz")
|
33
|
-
|
33
|
+
breakpoint()
|
34
34
|
combo = ConversationList([c1, c2], cache=c)
|
35
35
|
combo.run()
|
36
36
|
results = combo.to_results()
|
@@ -7,10 +7,17 @@ import json
|
|
7
7
|
import os
|
8
8
|
import warnings
|
9
9
|
from typing import Optional, Union
|
10
|
-
|
10
|
+
import time
|
11
|
+
from edsl.config import CONFIG
|
11
12
|
from edsl.data.CacheEntry import CacheEntry
|
13
|
+
|
14
|
+
# from edsl.data.SQLiteDict import SQLiteDict
|
15
|
+
from edsl.Base import Base
|
12
16
|
from edsl.utilities.utilities import dict_hash
|
13
|
-
from edsl.utilities.decorators import
|
17
|
+
from edsl.utilities.decorators import (
|
18
|
+
add_edsl_version,
|
19
|
+
remove_edsl_version,
|
20
|
+
)
|
14
21
|
|
15
22
|
|
16
23
|
class Cache(Base):
|
@@ -34,7 +41,6 @@ class Cache(Base):
|
|
34
41
|
data: Optional[Union["SQLiteDict", dict]] = None,
|
35
42
|
immediate_write: bool = True,
|
36
43
|
method=None,
|
37
|
-
verbose=False,
|
38
44
|
):
|
39
45
|
"""
|
40
46
|
Create two dictionaries to store the cache data.
|
@@ -53,7 +59,6 @@ class Cache(Base):
|
|
53
59
|
self.new_entries = {}
|
54
60
|
self.new_entries_to_write_later = {}
|
55
61
|
self.coop = None
|
56
|
-
self.verbose = verbose
|
57
62
|
|
58
63
|
self.filename = filename
|
59
64
|
if filename and data:
|
@@ -117,7 +122,7 @@ class Cache(Base):
|
|
117
122
|
system_prompt: str,
|
118
123
|
user_prompt: str,
|
119
124
|
iteration: int,
|
120
|
-
) ->
|
125
|
+
) -> Union[None, str]:
|
121
126
|
"""
|
122
127
|
Fetch a value (LLM output) from the cache.
|
123
128
|
|
@@ -130,7 +135,7 @@ class Cache(Base):
|
|
130
135
|
Return None if the response is not found.
|
131
136
|
|
132
137
|
>>> c = Cache()
|
133
|
-
>>> c.fetch(model="gpt-3", parameters="default", system_prompt="Hello", user_prompt="Hi", iteration=1)
|
138
|
+
>>> c.fetch(model="gpt-3", parameters="default", system_prompt="Hello", user_prompt="Hi", iteration=1) is None
|
134
139
|
True
|
135
140
|
|
136
141
|
|
@@ -146,13 +151,8 @@ class Cache(Base):
|
|
146
151
|
)
|
147
152
|
entry = self.data.get(key, None)
|
148
153
|
if entry is not None:
|
149
|
-
if self.verbose:
|
150
|
-
print(f"Cache hit for key: {key}")
|
151
154
|
self.fetched_data[key] = entry
|
152
|
-
else
|
153
|
-
if self.verbose:
|
154
|
-
print(f"Cache miss for key: {key}")
|
155
|
-
return None if entry is None else entry.output, key
|
155
|
+
return None if entry is None else entry.output
|
156
156
|
|
157
157
|
def store(
|
158
158
|
self,
|
@@ -354,9 +354,6 @@ class Cache(Base):
|
|
354
354
|
for key, entry in self.new_entries_to_write_later.items():
|
355
355
|
self.data[key] = entry
|
356
356
|
|
357
|
-
if self.filename:
|
358
|
-
self.write(self.filename)
|
359
|
-
|
360
357
|
####################
|
361
358
|
# DUNDER / USEFUL
|
362
359
|
####################
|
@@ -473,18 +470,12 @@ class Cache(Base):
|
|
473
470
|
webbrowser.open("file://" + filepath)
|
474
471
|
|
475
472
|
@classmethod
|
476
|
-
def example(cls
|
473
|
+
def example(cls) -> Cache:
|
477
474
|
"""
|
478
|
-
|
479
|
-
|
480
|
-
:param randomize: If True, uses CacheEntry's randomize method.
|
475
|
+
Return an example Cache.
|
476
|
+
The example Cache has one entry.
|
481
477
|
"""
|
482
|
-
return cls(
|
483
|
-
data={
|
484
|
-
CacheEntry.example(randomize).key: CacheEntry.example(),
|
485
|
-
CacheEntry.example(randomize).key: CacheEntry.example(),
|
486
|
-
}
|
487
|
-
)
|
478
|
+
return cls(data={CacheEntry.example().key: CacheEntry.example()})
|
488
479
|
|
489
480
|
|
490
481
|
if __name__ == "__main__":
|
@@ -2,8 +2,11 @@ from __future__ import annotations
|
|
2
2
|
import json
|
3
3
|
import datetime
|
4
4
|
import hashlib
|
5
|
+
import random
|
5
6
|
from typing import Optional
|
6
|
-
|
7
|
+
|
8
|
+
|
9
|
+
# TODO: Timestamp should probably be float?
|
7
10
|
|
8
11
|
|
9
12
|
class CacheEntry:
|
@@ -148,12 +151,10 @@ class CacheEntry:
|
|
148
151
|
@classmethod
|
149
152
|
def example(cls, randomize: bool = False) -> CacheEntry:
|
150
153
|
"""
|
151
|
-
Returns
|
152
|
-
|
153
|
-
:param randomize: If True, adds a random string to the system prompt.
|
154
|
+
Returns a CacheEntry example.
|
154
155
|
"""
|
155
|
-
# if random, create a
|
156
|
-
addition = "" if not randomize else str(
|
156
|
+
# if random, create a random number for 0-100
|
157
|
+
addition = "" if not randomize else str(random.randint(0, 1000))
|
157
158
|
return CacheEntry(
|
158
159
|
model="gpt-3.5-turbo",
|
159
160
|
parameters={"temperature": 0.5},
|
@@ -17,8 +17,6 @@ class AgentResponseDict(UserDict):
|
|
17
17
|
cached_response=None,
|
18
18
|
raw_model_response=None,
|
19
19
|
simple_model_raw_response=None,
|
20
|
-
cache_used=None,
|
21
|
-
cache_key=None,
|
22
20
|
):
|
23
21
|
"""Initialize the AgentResponseDict object."""
|
24
22
|
usage = usage or {"prompt_tokens": 0, "completion_tokens": 0}
|
@@ -32,7 +30,5 @@ class AgentResponseDict(UserDict):
|
|
32
30
|
"cached_response": cached_response,
|
33
31
|
"raw_model_response": raw_model_response,
|
34
32
|
"simple_model_raw_response": simple_model_raw_response,
|
35
|
-
"cache_used": cache_used,
|
36
|
-
"cache_key": cache_key,
|
37
33
|
}
|
38
34
|
)
|
@@ -461,13 +461,6 @@ class Jobs(Base):
|
|
461
461
|
remote_inference = False
|
462
462
|
|
463
463
|
if remote_inference:
|
464
|
-
from edsl.agents.Agent import Agent
|
465
|
-
from edsl.language_models.registry import Model
|
466
|
-
from edsl.results.Result import Result
|
467
|
-
from edsl.results.Results import Results
|
468
|
-
from edsl.scenarios.Scenario import Scenario
|
469
|
-
from edsl.surveys.Survey import Survey
|
470
|
-
|
471
464
|
self._output("Remote inference activated. Sending job to server...")
|
472
465
|
if remote_cache:
|
473
466
|
self._output(
|
@@ -687,9 +680,7 @@ class Jobs(Base):
|
|
687
680
|
# Example methods
|
688
681
|
#######################
|
689
682
|
@classmethod
|
690
|
-
def example(
|
691
|
-
cls, throw_exception_probability: int = 0, randomize: bool = False
|
692
|
-
) -> Jobs:
|
683
|
+
def example(cls, throw_exception_probability=0) -> Jobs:
|
693
684
|
"""Return an example Jobs instance.
|
694
685
|
|
695
686
|
:param throw_exception_probability: the probability that an exception will be thrown when answering a question. This is useful for testing error handling.
|
@@ -699,13 +690,10 @@ class Jobs(Base):
|
|
699
690
|
|
700
691
|
"""
|
701
692
|
import random
|
702
|
-
from uuid import uuid4
|
703
693
|
from edsl.questions import QuestionMultipleChoice
|
704
694
|
from edsl.agents.Agent import Agent
|
705
695
|
from edsl.scenarios.Scenario import Scenario
|
706
696
|
|
707
|
-
addition = "" if not randomize else str(uuid4())
|
708
|
-
|
709
697
|
# (status, question, period)
|
710
698
|
agent_answers = {
|
711
699
|
("Joyful", "how_feeling", "morning"): "OK",
|
@@ -748,10 +736,7 @@ class Jobs(Base):
|
|
748
736
|
base_survey = Survey(questions=[q1, q2])
|
749
737
|
|
750
738
|
scenario_list = ScenarioList(
|
751
|
-
[
|
752
|
-
Scenario({"period": f"morning{addition}"}),
|
753
|
-
Scenario({"period": "afternoon"}),
|
754
|
-
]
|
739
|
+
[Scenario({"period": "morning"}), Scenario({"period": "afternoon"})]
|
755
740
|
)
|
756
741
|
job = base_survey.by(scenario_list).by(joy_agent, sad_agent)
|
757
742
|
|
@@ -25,16 +25,6 @@ class ModelBuckets:
|
|
25
25
|
tokens_bucket=self.tokens_bucket + other.tokens_bucket,
|
26
26
|
)
|
27
27
|
|
28
|
-
def turbo_mode_on(self):
|
29
|
-
"""Set the refill rate to infinity for both buckets."""
|
30
|
-
self.requests_bucket.turbo_mode_on()
|
31
|
-
self.tokens_bucket.turbo_mode_on()
|
32
|
-
|
33
|
-
def turbo_mode_off(self):
|
34
|
-
"""Restore the refill rate to its original value for both buckets."""
|
35
|
-
self.requests_bucket.turbo_mode_off()
|
36
|
-
self.tokens_bucket.turbo_mode_off()
|
37
|
-
|
38
28
|
@classmethod
|
39
29
|
def infinity_bucket(cls, model_name: str = "not_specified") -> "ModelBuckets":
|
40
30
|
"""Create a bucket with infinite capacity and refill rate."""
|
@@ -17,29 +17,11 @@ class TokenBucket:
|
|
17
17
|
self.bucket_name = bucket_name
|
18
18
|
self.bucket_type = bucket_type
|
19
19
|
self.capacity = capacity # Maximum number of tokens
|
20
|
-
self._old_capacity = capacity
|
21
20
|
self.tokens = capacity # Current number of available tokens
|
22
21
|
self.refill_rate = refill_rate # Rate at which tokens are refilled
|
23
|
-
self._old_refill_rate = refill_rate
|
24
22
|
self.last_refill = time.monotonic() # Last refill time
|
23
|
+
|
25
24
|
self.log: List[Any] = []
|
26
|
-
self.turbo_mode = False
|
27
|
-
|
28
|
-
def turbo_mode_on(self):
|
29
|
-
"""Set the refill rate to infinity."""
|
30
|
-
if self.turbo_mode:
|
31
|
-
pass
|
32
|
-
else:
|
33
|
-
# pass
|
34
|
-
self.turbo_mode = True
|
35
|
-
self.capacity = float("inf")
|
36
|
-
self.refill_rate = float("inf")
|
37
|
-
|
38
|
-
def turbo_mode_off(self):
|
39
|
-
"""Restore the refill rate to its original value."""
|
40
|
-
self.turbo_mode = False
|
41
|
-
self.capacity = self._old_capacity
|
42
|
-
self.refill_rate = self._old_refill_rate
|
43
25
|
|
44
26
|
def __add__(self, other) -> "TokenBucket":
|
45
27
|
"""Combine two token buckets.
|
@@ -73,17 +55,7 @@ class TokenBucket:
|
|
73
55
|
self.log.append((time.monotonic(), self.tokens))
|
74
56
|
|
75
57
|
def refill(self) -> None:
|
76
|
-
"""Refill the bucket with new tokens based on elapsed time.
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
>>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
|
81
|
-
>>> bucket.tokens = 0
|
82
|
-
>>> bucket.refill()
|
83
|
-
>>> bucket.tokens > 0
|
84
|
-
True
|
85
|
-
|
86
|
-
"""
|
58
|
+
"""Refill the bucket with new tokens based on elapsed time."""
|
87
59
|
now = time.monotonic()
|
88
60
|
elapsed = now - self.last_refill
|
89
61
|
refill_amount = elapsed * self.refill_rate
|
@@ -126,7 +98,7 @@ class TokenBucket:
|
|
126
98
|
raise ValueError(msg)
|
127
99
|
while self.tokens < amount:
|
128
100
|
self.refill()
|
129
|
-
await asyncio.sleep(0.
|
101
|
+
await asyncio.sleep(0.1) # Sleep briefly to prevent busy waiting
|
130
102
|
self.tokens -= amount
|
131
103
|
|
132
104
|
now = time.monotonic()
|