edsl 0.1.14__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +348 -38
- edsl/BaseDiff.py +260 -0
- edsl/TemplateLoader.py +24 -0
- edsl/__init__.py +46 -10
- edsl/__version__.py +1 -0
- edsl/agents/Agent.py +842 -144
- edsl/agents/AgentList.py +521 -25
- edsl/agents/Invigilator.py +250 -374
- edsl/agents/InvigilatorBase.py +257 -0
- edsl/agents/PromptConstructor.py +272 -0
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/descriptors.py +43 -13
- edsl/agents/prompt_helpers.py +129 -0
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -0
- edsl/auto/StageBase.py +243 -0
- edsl/auto/StageGenerateSurvey.py +178 -0
- edsl/auto/StageLabelQuestions.py +125 -0
- edsl/auto/StagePersona.py +61 -0
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -0
- edsl/auto/StagePersonaDimensionValues.py +74 -0
- edsl/auto/StagePersonaDimensions.py +69 -0
- edsl/auto/StageQuestions.py +74 -0
- edsl/auto/SurveyCreatorPipeline.py +21 -0
- edsl/auto/utilities.py +218 -0
- edsl/base/Base.py +279 -0
- edsl/config.py +121 -104
- edsl/conversation/Conversation.py +290 -0
- edsl/conversation/car_buying.py +59 -0
- edsl/conversation/chips.py +95 -0
- edsl/conversation/mug_negotiation.py +81 -0
- edsl/conversation/next_speaker_utilities.py +93 -0
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -0
- edsl/coop/__init__.py +1 -0
- edsl/coop/coop.py +1029 -134
- edsl/coop/utils.py +131 -0
- edsl/data/Cache.py +560 -89
- edsl/data/CacheEntry.py +230 -0
- edsl/data/CacheHandler.py +168 -0
- edsl/data/RemoteCacheSync.py +186 -0
- edsl/data/SQLiteDict.py +292 -0
- edsl/data/__init__.py +5 -3
- edsl/data/orm.py +6 -33
- edsl/data_transfer_models.py +74 -27
- edsl/enums.py +165 -8
- edsl/exceptions/BaseException.py +21 -0
- edsl/exceptions/__init__.py +52 -46
- edsl/exceptions/agents.py +33 -15
- edsl/exceptions/cache.py +5 -0
- edsl/exceptions/coop.py +8 -0
- edsl/exceptions/general.py +34 -0
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +15 -0
- edsl/exceptions/language_models.py +46 -1
- edsl/exceptions/questions.py +80 -5
- edsl/exceptions/results.py +16 -5
- edsl/exceptions/scenarios.py +29 -0
- edsl/exceptions/surveys.py +13 -10
- edsl/inference_services/AnthropicService.py +106 -0
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -0
- edsl/inference_services/AzureAI.py +215 -0
- edsl/inference_services/DeepInfraService.py +18 -0
- edsl/inference_services/GoogleService.py +143 -0
- edsl/inference_services/GroqService.py +20 -0
- edsl/inference_services/InferenceServiceABC.py +80 -0
- edsl/inference_services/InferenceServicesCollection.py +138 -0
- edsl/inference_services/MistralAIService.py +120 -0
- edsl/inference_services/OllamaService.py +18 -0
- edsl/inference_services/OpenAIService.py +236 -0
- edsl/inference_services/PerplexityService.py +160 -0
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -0
- edsl/inference_services/TogetherAIService.py +172 -0
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -0
- edsl/inference_services/rate_limits_cache.py +25 -0
- edsl/inference_services/registry.py +41 -0
- edsl/inference_services/write_available.py +10 -0
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +21 -20
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +684 -204
- edsl/jobs/JobsChecks.py +172 -0
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -0
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -0
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -0
- edsl/jobs/buckets/ModelBuckets.py +65 -0
- edsl/jobs/buckets/TokenBucket.py +283 -0
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +392 -0
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -0
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -0
- edsl/jobs/interviews/InterviewStatistic.py +63 -0
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -0
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -0
- edsl/jobs/interviews/InterviewStatusLog.py +92 -0
- edsl/jobs/interviews/ReportErrors.py +66 -0
- edsl/jobs/interviews/interview_status_enum.py +9 -0
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -110
- edsl/jobs/runners/JobsRunnerStatus.py +298 -0
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -0
- edsl/jobs/tasks/TaskCreators.py +64 -0
- edsl/jobs/tasks/TaskHistory.py +470 -0
- edsl/jobs/tasks/TaskStatusLog.py +23 -0
- edsl/jobs/tasks/task_status_enum.py +161 -0
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -0
- edsl/jobs/tokens/TokenUsage.py +34 -0
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +507 -386
- edsl/language_models/ModelList.py +164 -0
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -0
- edsl/language_models/__init__.py +1 -8
- edsl/language_models/fake_openai_call.py +15 -0
- edsl/language_models/fake_openai_service.py +61 -0
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +109 -41
- edsl/language_models/utilities.py +65 -0
- edsl/notebooks/Notebook.py +263 -0
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -0
- edsl/prompts/Prompt.py +222 -93
- edsl/prompts/__init__.py +1 -1
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -0
- edsl/questions/QuestionBasePromptsMixin.py +221 -0
- edsl/questions/QuestionBudget.py +164 -67
- edsl/questions/QuestionCheckBox.py +281 -62
- edsl/questions/QuestionDict.py +343 -0
- edsl/questions/QuestionExtract.py +136 -50
- edsl/questions/QuestionFreeText.py +79 -55
- edsl/questions/QuestionFunctional.py +138 -41
- edsl/questions/QuestionList.py +184 -57
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +293 -69
- edsl/questions/QuestionNumerical.py +109 -56
- edsl/questions/QuestionRank.py +244 -49
- edsl/questions/Quick.py +41 -0
- edsl/questions/SimpleAskMixin.py +74 -0
- edsl/questions/__init__.py +9 -6
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +153 -38
- edsl/questions/compose_questions.py +13 -7
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -0
- edsl/questions/derived/QuestionLikertFive.py +28 -26
- edsl/questions/derived/QuestionLinearScale.py +41 -28
- edsl/questions/derived/QuestionTopK.py +34 -26
- edsl/questions/derived/QuestionYesNo.py +40 -27
- edsl/questions/descriptors.py +228 -74
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -0
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -0
- edsl/questions/prompt_templates/question_extract.jinja +11 -0
- edsl/questions/prompt_templates/question_free_text.jinja +3 -0
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -0
- edsl/questions/prompt_templates/question_list.jinja +17 -0
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -0
- edsl/questions/prompt_templates/question_numerical.jinja +37 -0
- edsl/questions/question_base_gen_mixin.py +168 -0
- edsl/questions/question_registry.py +130 -46
- edsl/questions/register_questions_meta.py +71 -0
- edsl/questions/response_validator_abc.py +188 -0
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +5 -2
- edsl/questions/templates/__init__.py +0 -0
- edsl/questions/templates/budget/__init__.py +0 -0
- edsl/questions/templates/budget/answering_instructions.jinja +7 -0
- edsl/questions/templates/budget/question_presentation.jinja +7 -0
- edsl/questions/templates/checkbox/__init__.py +0 -0
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -0
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -0
- edsl/questions/templates/dict/__init__.py +0 -0
- edsl/questions/templates/dict/answering_instructions.jinja +21 -0
- edsl/questions/templates/dict/question_presentation.jinja +1 -0
- edsl/questions/templates/extract/__init__.py +0 -0
- edsl/questions/templates/extract/answering_instructions.jinja +7 -0
- edsl/questions/templates/extract/question_presentation.jinja +1 -0
- edsl/questions/templates/free_text/__init__.py +0 -0
- edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
- edsl/questions/templates/free_text/question_presentation.jinja +1 -0
- edsl/questions/templates/likert_five/__init__.py +0 -0
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -0
- edsl/questions/templates/likert_five/question_presentation.jinja +12 -0
- edsl/questions/templates/linear_scale/__init__.py +0 -0
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -0
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -0
- edsl/questions/templates/list/__init__.py +0 -0
- edsl/questions/templates/list/answering_instructions.jinja +4 -0
- edsl/questions/templates/list/question_presentation.jinja +5 -0
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/__init__.py +0 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -0
- edsl/questions/templates/multiple_choice/html.jinja +0 -0
- edsl/questions/templates/multiple_choice/question_presentation.jinja +12 -0
- edsl/questions/templates/numerical/__init__.py +0 -0
- edsl/questions/templates/numerical/answering_instructions.jinja +7 -0
- edsl/questions/templates/numerical/question_presentation.jinja +7 -0
- edsl/questions/templates/rank/__init__.py +0 -0
- edsl/questions/templates/rank/answering_instructions.jinja +11 -0
- edsl/questions/templates/rank/question_presentation.jinja +15 -0
- edsl/questions/templates/top_k/__init__.py +0 -0
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -0
- edsl/questions/templates/top_k/question_presentation.jinja +22 -0
- edsl/questions/templates/yes_no/__init__.py +0 -0
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -0
- edsl/questions/templates/yes_no/question_presentation.jinja +12 -0
- edsl/results/CSSParameterizer.py +108 -0
- edsl/results/Dataset.py +550 -19
- edsl/results/DatasetExportMixin.py +594 -0
- edsl/results/DatasetTree.py +295 -0
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +477 -173
- edsl/results/Results.py +987 -269
- edsl/results/ResultsExportMixin.py +28 -125
- edsl/results/ResultsGGMixin.py +83 -15
- edsl/results/TableDisplay.py +125 -0
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +1 -1
- edsl/results/file_exports.py +252 -0
- edsl/results/results_fetch_mixin.py +33 -0
- edsl/results/results_selector.py +145 -0
- edsl/results/results_tools_mixin.py +98 -0
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +78 -0
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -0
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +543 -0
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +431 -62
- edsl/scenarios/ScenarioHtmlMixin.py +65 -0
- edsl/scenarios/ScenarioList.py +1415 -45
- edsl/scenarios/ScenarioListExportMixin.py +45 -0
- edsl/scenarios/ScenarioListPdfMixin.py +239 -0
- edsl/scenarios/__init__.py +2 -0
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +49 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/scenario_join.py +131 -0
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -0
- edsl/study/ObjectEntry.py +173 -0
- edsl/study/ProofOfWork.py +113 -0
- edsl/study/SnapShot.py +80 -0
- edsl/study/Study.py +521 -0
- edsl/study/__init__.py +4 -0
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +92 -11
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +9 -4
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +156 -35
- edsl/surveys/Rule.py +221 -74
- edsl/surveys/RuleCollection.py +241 -61
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1079 -339
- edsl/surveys/SurveyCSS.py +273 -0
- edsl/surveys/SurveyExportMixin.py +235 -40
- edsl/surveys/SurveyFlowVisualization.py +181 -0
- edsl/surveys/SurveyQualtricsImport.py +284 -0
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +4 -2
- edsl/surveys/base.py +19 -3
- edsl/surveys/descriptors.py +17 -6
- edsl/surveys/instructions/ChangeInstruction.py +48 -0
- edsl/surveys/instructions/Instruction.py +56 -0
- edsl/surveys/instructions/InstructionCollection.py +82 -0
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/templates/error_reporting/base.html +24 -0
- edsl/templates/error_reporting/exceptions_by_model.html +35 -0
- edsl/templates/error_reporting/exceptions_by_question_name.html +17 -0
- edsl/templates/error_reporting/exceptions_by_type.html +17 -0
- edsl/templates/error_reporting/interview_details.html +116 -0
- edsl/templates/error_reporting/interviews.html +19 -0
- edsl/templates/error_reporting/overview.html +5 -0
- edsl/templates/error_reporting/performance_plot.html +2 -0
- edsl/templates/error_reporting/report.css +74 -0
- edsl/templates/error_reporting/report.html +118 -0
- edsl/templates/error_reporting/report.js +25 -0
- edsl/tools/__init__.py +1 -0
- edsl/tools/clusters.py +192 -0
- edsl/tools/embeddings.py +27 -0
- edsl/tools/embeddings_plotting.py +118 -0
- edsl/tools/plotting.py +112 -0
- edsl/tools/summarize.py +18 -0
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +5 -0
- edsl/utilities/__init__.py +21 -20
- edsl/utilities/ast_utilities.py +3 -0
- edsl/utilities/data/Registry.py +2 -0
- edsl/utilities/decorators.py +41 -0
- edsl/utilities/gcp_bucket/__init__.py +0 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -0
- edsl/utilities/interface.py +310 -60
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -0
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -0
- edsl/utilities/restricted_python.py +70 -0
- edsl/utilities/utilities.py +203 -13
- edsl-0.1.40.dist-info/METADATA +111 -0
- edsl-0.1.40.dist-info/RECORD +362 -0
- {edsl-0.1.14.dist-info → edsl-0.1.40.dist-info}/WHEEL +1 -1
- edsl/agents/AgentListExportMixin.py +0 -24
- edsl/coop/old.py +0 -31
- edsl/data/Database.py +0 -141
- edsl/data/crud.py +0 -121
- edsl/jobs/Interview.py +0 -417
- edsl/jobs/JobsRunner.py +0 -63
- edsl/jobs/JobsRunnerStatusMixin.py +0 -115
- edsl/jobs/base.py +0 -47
- edsl/jobs/buckets.py +0 -166
- edsl/jobs/runners/JobsRunnerDryRun.py +0 -19
- edsl/jobs/runners/JobsRunnerStreaming.py +0 -54
- edsl/jobs/task_management.py +0 -218
- edsl/jobs/token_tracking.py +0 -78
- edsl/language_models/DeepInfra.py +0 -69
- edsl/language_models/OpenAI.py +0 -98
- edsl/language_models/model_interfaces/GeminiPro.py +0 -66
- edsl/language_models/model_interfaces/LanguageModelOpenAIFour.py +0 -8
- edsl/language_models/model_interfaces/LanguageModelOpenAIThreeFiveTurbo.py +0 -8
- edsl/language_models/model_interfaces/LlamaTwo13B.py +0 -21
- edsl/language_models/model_interfaces/LlamaTwo70B.py +0 -21
- edsl/language_models/model_interfaces/Mixtral8x7B.py +0 -24
- edsl/language_models/registry.py +0 -81
- edsl/language_models/schemas.py +0 -15
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/prompts/QuestionInstructionsBase.py +0 -6
- edsl/prompts/library/agent_instructions.py +0 -29
- edsl/prompts/library/agent_persona.py +0 -17
- edsl/prompts/library/question_budget.py +0 -26
- edsl/prompts/library/question_checkbox.py +0 -32
- edsl/prompts/library/question_extract.py +0 -19
- edsl/prompts/library/question_freetext.py +0 -14
- edsl/prompts/library/question_linear_scale.py +0 -20
- edsl/prompts/library/question_list.py +0 -22
- edsl/prompts/library/question_multiple_choice.py +0 -44
- edsl/prompts/library/question_numerical.py +0 -31
- edsl/prompts/library/question_rank.py +0 -21
- edsl/prompts/prompt_config.py +0 -33
- edsl/prompts/registry.py +0 -185
- edsl/questions/Question.py +0 -240
- edsl/report/InputOutputDataTypes.py +0 -134
- edsl/report/RegressionMixin.py +0 -28
- edsl/report/ReportOutputs.py +0 -1228
- edsl/report/ResultsFetchMixin.py +0 -106
- edsl/report/ResultsOutputMixin.py +0 -14
- edsl/report/demo.ipynb +0 -645
- edsl/results/ResultsDBMixin.py +0 -184
- edsl/surveys/SurveyFlowVisualizationMixin.py +0 -92
- edsl/trackers/Tracker.py +0 -91
- edsl/trackers/TrackerAPI.py +0 -196
- edsl/trackers/TrackerTasks.py +0 -70
- edsl/utilities/pastebin.py +0 -141
- edsl-0.1.14.dist-info/METADATA +0 -69
- edsl-0.1.14.dist-info/RECORD +0 -141
- /edsl/{language_models/model_interfaces → inference_services}/__init__.py +0 -0
- /edsl/{report/__init__.py → jobs/runners/JobsRunnerStatusData.py} +0 -0
- /edsl/{trackers/__init__.py → language_models/ServiceDataSources.py} +0 -0
- {edsl-0.1.14.dist-info → edsl-0.1.40.dist-info}/LICENSE +0 -0
@@ -0,0 +1,138 @@
|
|
1
|
+
from collections.abc import AsyncGenerator
|
2
|
+
from typing import List, TypeVar, Generator, Tuple, TYPE_CHECKING
|
3
|
+
from dataclasses import dataclass
|
4
|
+
import asyncio
|
5
|
+
from contextlib import asynccontextmanager
|
6
|
+
from edsl.data_transfer_models import EDSLResultObjectInput
|
7
|
+
|
8
|
+
from edsl.results.Result import Result
|
9
|
+
from edsl.jobs.interviews.Interview import Interview
|
10
|
+
|
11
|
+
if TYPE_CHECKING:
|
12
|
+
from edsl.jobs.Jobs import Jobs
|
13
|
+
|
14
|
+
|
15
|
+
@dataclass
|
16
|
+
class InterviewResult:
|
17
|
+
result: Result
|
18
|
+
interview: Interview
|
19
|
+
order: int
|
20
|
+
|
21
|
+
|
22
|
+
from edsl.jobs.data_structures import RunConfig
|
23
|
+
|
24
|
+
|
25
|
+
class AsyncInterviewRunner:
|
26
|
+
MAX_CONCURRENT = 5
|
27
|
+
|
28
|
+
def __init__(self, jobs: "Jobs", run_config: RunConfig):
|
29
|
+
self.jobs = jobs
|
30
|
+
self.run_config = run_config
|
31
|
+
self._initialized = asyncio.Event()
|
32
|
+
|
33
|
+
def _expand_interviews(self) -> Generator["Interview", None, None]:
|
34
|
+
"""Populates self.total_interviews with n copies of each interview.
|
35
|
+
|
36
|
+
It also has to set the cache for each interview.
|
37
|
+
|
38
|
+
:param n: how many times to run each interview.
|
39
|
+
"""
|
40
|
+
for interview in self.jobs.generate_interviews():
|
41
|
+
for iteration in range(self.run_config.parameters.n):
|
42
|
+
if iteration > 0:
|
43
|
+
yield interview.duplicate(
|
44
|
+
iteration=iteration, cache=self.run_config.environment.cache
|
45
|
+
)
|
46
|
+
else:
|
47
|
+
interview.cache = self.run_config.environment.cache
|
48
|
+
yield interview
|
49
|
+
|
50
|
+
async def _conduct_interview(
|
51
|
+
self, interview: "Interview"
|
52
|
+
) -> Tuple["Result", "Interview"]:
|
53
|
+
"""Conducts an interview and returns the result object, along with the associated interview.
|
54
|
+
|
55
|
+
We return the interview because it is not populated with exceptions, if any.
|
56
|
+
|
57
|
+
:param interview: the interview to conduct
|
58
|
+
:return: the result of the interview
|
59
|
+
|
60
|
+
'extracted_answers' is a dictionary of the answers to the questions in the interview.
|
61
|
+
This is not the same as the generated_tokens---it can include substantial cleaning and processing / validation.
|
62
|
+
"""
|
63
|
+
# the model buckets are used to track usage rates
|
64
|
+
# model_buckets = self.bucket_collection[interview.model]
|
65
|
+
# model_buckets = self.run_config.environment.bucket_collection[interview.model]
|
66
|
+
|
67
|
+
# get the results of the interview e.g., {'how_are_you':"Good" 'how_are_you_generated_tokens': "Good"}
|
68
|
+
extracted_answers: dict[str, str]
|
69
|
+
model_response_objects: List[EDSLResultObjectInput]
|
70
|
+
|
71
|
+
extracted_answers, model_response_objects = (
|
72
|
+
await interview.async_conduct_interview(self.run_config)
|
73
|
+
)
|
74
|
+
result = Result.from_interview(
|
75
|
+
interview=interview,
|
76
|
+
extracted_answers=extracted_answers,
|
77
|
+
model_response_objects=model_response_objects,
|
78
|
+
)
|
79
|
+
return result, interview
|
80
|
+
|
81
|
+
async def run(
|
82
|
+
self,
|
83
|
+
) -> AsyncGenerator[tuple[Result, Interview], None]:
|
84
|
+
"""Creates and processes tasks asynchronously, yielding results as they complete.
|
85
|
+
|
86
|
+
Uses TaskGroup for structured concurrency and automated cleanup.
|
87
|
+
Results are yielded as they become available while maintaining controlled concurrency.
|
88
|
+
"""
|
89
|
+
interviews = list(self._expand_interviews())
|
90
|
+
self._initialized.set()
|
91
|
+
|
92
|
+
async def _process_single_interview(
|
93
|
+
interview: Interview, idx: int
|
94
|
+
) -> InterviewResult:
|
95
|
+
try:
|
96
|
+
result, interview = await self._conduct_interview(interview)
|
97
|
+
self.run_config.environment.jobs_runner_status.add_completed_interview(
|
98
|
+
result
|
99
|
+
)
|
100
|
+
result.order = idx
|
101
|
+
return InterviewResult(result, interview, idx)
|
102
|
+
except Exception as e:
|
103
|
+
# breakpoint()
|
104
|
+
if self.run_config.parameters.stop_on_exception:
|
105
|
+
raise
|
106
|
+
# logger.error(f"Task failed with error: {e}")
|
107
|
+
return None
|
108
|
+
|
109
|
+
# Process interviews in chunks
|
110
|
+
for i in range(0, len(interviews), self.MAX_CONCURRENT):
|
111
|
+
chunk = interviews[i : i + self.MAX_CONCURRENT]
|
112
|
+
tasks = [
|
113
|
+
asyncio.create_task(_process_single_interview(interview, idx))
|
114
|
+
for idx, interview in enumerate(chunk, start=i)
|
115
|
+
]
|
116
|
+
|
117
|
+
try:
|
118
|
+
# Wait for all tasks in the chunk to complete
|
119
|
+
results = await asyncio.gather(
|
120
|
+
*tasks,
|
121
|
+
return_exceptions=not self.run_config.parameters.stop_on_exception
|
122
|
+
)
|
123
|
+
|
124
|
+
# Process successful results
|
125
|
+
for result in (r for r in results if r is not None):
|
126
|
+
yield result.result, result.interview
|
127
|
+
|
128
|
+
except Exception as e:
|
129
|
+
if self.run_config.parameters.stop_on_exception:
|
130
|
+
raise
|
131
|
+
# logger.error(f"Chunk processing failed with error: {e}")
|
132
|
+
continue
|
133
|
+
|
134
|
+
finally:
|
135
|
+
# Clean up any remaining tasks
|
136
|
+
for task in tasks:
|
137
|
+
if not task.done():
|
138
|
+
task.cancel()
|
@@ -0,0 +1,104 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
from collections import UserDict
|
3
|
+
from edsl.jobs.buckets.TokenBucket import TokenBucket
|
4
|
+
from edsl.jobs.buckets.ModelBuckets import ModelBuckets
|
5
|
+
|
6
|
+
# from functools import wraps
|
7
|
+
from threading import RLock
|
8
|
+
|
9
|
+
from edsl.jobs.decorators import synchronized_class
|
10
|
+
|
11
|
+
|
12
|
+
@synchronized_class
|
13
|
+
class BucketCollection(UserDict):
|
14
|
+
"""A Jobs object will have a whole collection of model buckets, as multiple models could be used.
|
15
|
+
|
16
|
+
The keys here are the models, and the values are the ModelBuckets objects.
|
17
|
+
Models themselves are hashable, so this works.
|
18
|
+
"""
|
19
|
+
|
20
|
+
def __init__(self, infinity_buckets: bool = False):
|
21
|
+
"""Create a new BucketCollection.
|
22
|
+
An infinity bucket is a bucket that never runs out of tokens or requests.
|
23
|
+
"""
|
24
|
+
super().__init__()
|
25
|
+
self.infinity_buckets = infinity_buckets
|
26
|
+
self.models_to_services = {}
|
27
|
+
self.services_to_buckets = {}
|
28
|
+
self._lock = RLock()
|
29
|
+
|
30
|
+
from edsl.config import CONFIG
|
31
|
+
import os
|
32
|
+
|
33
|
+
url = os.environ.get("EDSL_REMOTE_TOKEN_BUCKET_URL", None)
|
34
|
+
|
35
|
+
if url == "None" or url is None:
|
36
|
+
self.remote_url = None
|
37
|
+
# print(f"Using remote token bucket URL: {url}")
|
38
|
+
else:
|
39
|
+
self.remote_url = url
|
40
|
+
|
41
|
+
@classmethod
|
42
|
+
def from_models(
|
43
|
+
cls, models_list: list, infinity_buckets: bool = False
|
44
|
+
) -> "BucketCollection":
|
45
|
+
"""Create a BucketCollection from a list of models."""
|
46
|
+
bucket_collection = cls(infinity_buckets=infinity_buckets)
|
47
|
+
for model in models_list:
|
48
|
+
bucket_collection.add_model(model)
|
49
|
+
return bucket_collection
|
50
|
+
|
51
|
+
def get_tokens(
|
52
|
+
self, model: "LanguageModel", bucket_type: str, num_tokens: int
|
53
|
+
) -> int:
|
54
|
+
"""Get the number of tokens remaining in the bucket."""
|
55
|
+
relevant_bucket = getattr(self[model], bucket_type)
|
56
|
+
return relevant_bucket.get_tokens(num_tokens)
|
57
|
+
|
58
|
+
def __repr__(self):
|
59
|
+
return f"BucketCollection({self.data})"
|
60
|
+
|
61
|
+
def add_model(self, model: "LanguageModel") -> None:
|
62
|
+
"""Adds a model to the bucket collection.
|
63
|
+
|
64
|
+
This will create the token and request buckets for the model."""
|
65
|
+
|
66
|
+
# compute the TPS and RPS from the model
|
67
|
+
if not self.infinity_buckets:
|
68
|
+
TPS = model.tpm / 60.0
|
69
|
+
RPS = model.rpm / 60.0
|
70
|
+
else:
|
71
|
+
TPS = float("inf")
|
72
|
+
RPS = float("inf")
|
73
|
+
|
74
|
+
if model.model not in self.models_to_services:
|
75
|
+
service = model._inference_service_
|
76
|
+
if service not in self.services_to_buckets:
|
77
|
+
requests_bucket = TokenBucket(
|
78
|
+
bucket_name=service,
|
79
|
+
bucket_type="requests",
|
80
|
+
capacity=RPS,
|
81
|
+
refill_rate=RPS,
|
82
|
+
remote_url=self.remote_url,
|
83
|
+
)
|
84
|
+
tokens_bucket = TokenBucket(
|
85
|
+
bucket_name=service,
|
86
|
+
bucket_type="tokens",
|
87
|
+
capacity=TPS,
|
88
|
+
refill_rate=TPS,
|
89
|
+
remote_url=self.remote_url,
|
90
|
+
)
|
91
|
+
self.services_to_buckets[service] = ModelBuckets(
|
92
|
+
requests_bucket, tokens_bucket
|
93
|
+
)
|
94
|
+
self.models_to_services[model.model] = service
|
95
|
+
self[model] = self.services_to_buckets[service]
|
96
|
+
else:
|
97
|
+
self[model] = self.services_to_buckets[self.models_to_services[model.model]]
|
98
|
+
|
99
|
+
def visualize(self) -> dict:
|
100
|
+
"""Visualize the token and request buckets for each model."""
|
101
|
+
plots = {}
|
102
|
+
for model in self:
|
103
|
+
plots[model] = self[model].visualize()
|
104
|
+
return plots
|
@@ -0,0 +1,65 @@
|
|
1
|
+
# from edsl.jobs.buckets.TokenBucket import TokenBucket
|
2
|
+
|
3
|
+
|
4
|
+
class ModelBuckets:
|
5
|
+
"""A class to represent the token and request buckets for a model.
|
6
|
+
|
7
|
+
Most LLM model services have limits both on requests-per-minute (RPM) and tokens-per-minute (TPM).
|
8
|
+
A request is one call to the service. The number of tokens required for a request depends on parameters.
|
9
|
+
"""
|
10
|
+
|
11
|
+
def __init__(self, requests_bucket: "TokenBucket", tokens_bucket: "TokenBucket"):
|
12
|
+
"""Initialize the model buckets.
|
13
|
+
|
14
|
+
The requests bucket captures requests per unit of time.
|
15
|
+
The tokens bucket captures the number of language model tokens.
|
16
|
+
|
17
|
+
"""
|
18
|
+
self.requests_bucket = requests_bucket
|
19
|
+
self.tokens_bucket = tokens_bucket
|
20
|
+
|
21
|
+
def __add__(self, other: "ModelBuckets"):
|
22
|
+
"""Combine two model buckets."""
|
23
|
+
return ModelBuckets(
|
24
|
+
requests_bucket=self.requests_bucket + other.requests_bucket,
|
25
|
+
tokens_bucket=self.tokens_bucket + other.tokens_bucket,
|
26
|
+
)
|
27
|
+
|
28
|
+
def turbo_mode_on(self):
|
29
|
+
"""Set the refill rate to infinity for both buckets."""
|
30
|
+
self.requests_bucket.turbo_mode_on()
|
31
|
+
self.tokens_bucket.turbo_mode_on()
|
32
|
+
|
33
|
+
def turbo_mode_off(self):
|
34
|
+
"""Restore the refill rate to its original value for both buckets."""
|
35
|
+
self.requests_bucket.turbo_mode_off()
|
36
|
+
self.tokens_bucket.turbo_mode_off()
|
37
|
+
|
38
|
+
@classmethod
|
39
|
+
def infinity_bucket(cls, model_name: str = "not_specified") -> "ModelBuckets":
|
40
|
+
"""Create a bucket with infinite capacity and refill rate."""
|
41
|
+
from edsl.jobs.buckets.TokenBucket import TokenBucket
|
42
|
+
|
43
|
+
return cls(
|
44
|
+
requests_bucket=TokenBucket(
|
45
|
+
bucket_name=model_name,
|
46
|
+
bucket_type="requests",
|
47
|
+
capacity=float("inf"),
|
48
|
+
refill_rate=float("inf"),
|
49
|
+
),
|
50
|
+
tokens_bucket=TokenBucket(
|
51
|
+
bucket_name=model_name,
|
52
|
+
bucket_type="tokens",
|
53
|
+
capacity=float("inf"),
|
54
|
+
refill_rate=float("inf"),
|
55
|
+
),
|
56
|
+
)
|
57
|
+
|
58
|
+
def visualize(self):
|
59
|
+
"""Visualize the token and request buckets."""
|
60
|
+
plot1 = self.requests_bucket.visualize()
|
61
|
+
plot2 = self.tokens_bucket.visualize()
|
62
|
+
return plot1, plot2
|
63
|
+
|
64
|
+
def __repr__(self):
|
65
|
+
return f"ModelBuckets(requests_bucket={self.requests_bucket}, tokens_bucket={self.tokens_bucket})"
|
@@ -0,0 +1,283 @@
|
|
1
|
+
from typing import Union, List, Any, Optional
|
2
|
+
import asyncio
|
3
|
+
import time
|
4
|
+
from threading import RLock
|
5
|
+
from edsl.jobs.decorators import synchronized_class
|
6
|
+
|
7
|
+
from typing import Union, List, Any, Optional
|
8
|
+
import asyncio
|
9
|
+
import time
|
10
|
+
from threading import RLock
|
11
|
+
from edsl.jobs.decorators import synchronized_class
|
12
|
+
|
13
|
+
|
14
|
+
@synchronized_class
|
15
|
+
class TokenBucket:
|
16
|
+
"""This is a token bucket used to respect rate limits to services.
|
17
|
+
It can operate either locally or remotely via a REST API based on initialization parameters.
|
18
|
+
"""
|
19
|
+
|
20
|
+
def __new__(
|
21
|
+
cls,
|
22
|
+
*,
|
23
|
+
bucket_name: str,
|
24
|
+
bucket_type: str,
|
25
|
+
capacity: Union[int, float],
|
26
|
+
refill_rate: Union[int, float],
|
27
|
+
remote_url: Optional[str] = None,
|
28
|
+
):
|
29
|
+
"""Factory method to create either a local or remote token bucket.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
bucket_name: Name of the bucket
|
33
|
+
bucket_type: Type of the bucket
|
34
|
+
capacity: Maximum number of tokens
|
35
|
+
refill_rate: Rate at which tokens are refilled
|
36
|
+
remote_url: If provided, creates a remote token bucket client
|
37
|
+
"""
|
38
|
+
if remote_url is not None:
|
39
|
+
# Import here to avoid circular imports
|
40
|
+
from edsl.jobs.buckets.TokenBucketClient import TokenBucketClient
|
41
|
+
|
42
|
+
return TokenBucketClient(
|
43
|
+
bucket_name=bucket_name,
|
44
|
+
bucket_type=bucket_type,
|
45
|
+
capacity=capacity,
|
46
|
+
refill_rate=refill_rate,
|
47
|
+
api_base_url=remote_url,
|
48
|
+
)
|
49
|
+
|
50
|
+
# Create a local token bucket
|
51
|
+
instance = super(TokenBucket, cls).__new__(cls)
|
52
|
+
return instance
|
53
|
+
|
54
|
+
def __init__(
|
55
|
+
self,
|
56
|
+
*,
|
57
|
+
bucket_name: str,
|
58
|
+
bucket_type: str,
|
59
|
+
capacity: Union[int, float],
|
60
|
+
refill_rate: Union[int, float],
|
61
|
+
remote_url: Optional[str] = None,
|
62
|
+
):
|
63
|
+
# Skip initialization if this is a remote bucket
|
64
|
+
if remote_url is not None:
|
65
|
+
return
|
66
|
+
|
67
|
+
self.bucket_name = bucket_name
|
68
|
+
self.bucket_type = bucket_type
|
69
|
+
self.capacity = capacity
|
70
|
+
self.added_tokens = 0
|
71
|
+
self._lock = RLock()
|
72
|
+
|
73
|
+
self.target_rate = (
|
74
|
+
capacity * 60
|
75
|
+
) # set this here because it can change with turbo mode
|
76
|
+
|
77
|
+
self._old_capacity = capacity
|
78
|
+
self.tokens = capacity # Current number of available tokens
|
79
|
+
self.refill_rate = refill_rate # Rate at which tokens are refilled
|
80
|
+
self._old_refill_rate = refill_rate
|
81
|
+
self.last_refill = time.monotonic() # Last refill time
|
82
|
+
self.log: List[Any] = []
|
83
|
+
self.turbo_mode = False
|
84
|
+
|
85
|
+
self.creation_time = time.monotonic()
|
86
|
+
|
87
|
+
self.num_requests = 0
|
88
|
+
self.num_released = 0
|
89
|
+
self.tokens_returned = 0
|
90
|
+
|
91
|
+
def turbo_mode_on(self):
|
92
|
+
"""Set the refill rate to infinity."""
|
93
|
+
if self.turbo_mode:
|
94
|
+
pass
|
95
|
+
else:
|
96
|
+
# pass
|
97
|
+
self.turbo_mode = True
|
98
|
+
self.capacity = float("inf")
|
99
|
+
self.refill_rate = float("inf")
|
100
|
+
|
101
|
+
def turbo_mode_off(self):
|
102
|
+
"""Restore the refill rate to its original value."""
|
103
|
+
self.turbo_mode = False
|
104
|
+
self.capacity = self._old_capacity
|
105
|
+
self.refill_rate = self._old_refill_rate
|
106
|
+
|
107
|
+
def __add__(self, other) -> "TokenBucket":
|
108
|
+
"""Combine two token buckets.
|
109
|
+
|
110
|
+
The resulting bucket has the minimum capacity and refill rate of the two buckets.
|
111
|
+
This is useful, for example, if we have two calls to the same model on the same service but have different temperatures.
|
112
|
+
"""
|
113
|
+
return TokenBucket(
|
114
|
+
bucket_name=self.bucket_name,
|
115
|
+
bucket_type=self.bucket_type,
|
116
|
+
capacity=min(self.capacity, other.capacity),
|
117
|
+
refill_rate=min(self.refill_rate, other.refill_rate),
|
118
|
+
)
|
119
|
+
|
120
|
+
def __repr__(self):
|
121
|
+
return f"TokenBucket(bucket_name={self.bucket_name}, bucket_type='{self.bucket_type}', capacity={self.capacity}, refill_rate={self.refill_rate})"
|
122
|
+
|
123
|
+
def add_tokens(self, tokens: Union[int, float]) -> None:
|
124
|
+
"""Add tokens to the bucket, up to the maximum capacity.
|
125
|
+
|
126
|
+
:param tokens: The number of tokens to add to the bucket.
|
127
|
+
|
128
|
+
>>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
|
129
|
+
>>> bucket.tokens
|
130
|
+
10
|
131
|
+
>>> bucket.add_tokens(5)
|
132
|
+
>>> bucket.tokens
|
133
|
+
10
|
134
|
+
"""
|
135
|
+
self.tokens_returned += tokens
|
136
|
+
self.tokens = min(self.capacity, self.tokens + tokens)
|
137
|
+
self.log.append((time.monotonic(), self.tokens))
|
138
|
+
|
139
|
+
def refill(self) -> None:
|
140
|
+
"""Refill the bucket with new tokens based on elapsed time.
|
141
|
+
|
142
|
+
|
143
|
+
|
144
|
+
>>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
|
145
|
+
>>> bucket.tokens = 0
|
146
|
+
>>> bucket.refill()
|
147
|
+
>>> bucket.tokens > 0
|
148
|
+
True
|
149
|
+
"""
|
150
|
+
"""Refill the bucket with new tokens based on elapsed time."""
|
151
|
+
now = time.monotonic()
|
152
|
+
# print(f"Time is now: {now}; Last refill time: {self.last_refill}")
|
153
|
+
elapsed = now - self.last_refill
|
154
|
+
# print("Elapsed time: ", elapsed)
|
155
|
+
refill_amount = elapsed * self.refill_rate
|
156
|
+
self.tokens = min(self.capacity, self.tokens + refill_amount)
|
157
|
+
self.last_refill = now
|
158
|
+
|
159
|
+
if self.tokens < self.capacity:
|
160
|
+
pass
|
161
|
+
# print(f"Refilled. Current tokens: {self.tokens:.4f}")
|
162
|
+
# print(f"Elapsed time: {elapsed:.4f} seconds")
|
163
|
+
# print(f"Refill amount: {refill_amount:.4f}")
|
164
|
+
|
165
|
+
self.log.append((now, self.tokens))
|
166
|
+
|
167
|
+
def wait_time(self, requested_tokens: Union[float, int]) -> float:
|
168
|
+
"""Calculate the time to wait for the requested number of tokens."""
|
169
|
+
# self.refill() # Update the current token count
|
170
|
+
if self.tokens >= requested_tokens:
|
171
|
+
return 0
|
172
|
+
return (requested_tokens - self.tokens) / self.refill_rate
|
173
|
+
|
174
|
+
async def get_tokens(
|
175
|
+
self, amount: Union[int, float] = 1, cheat_bucket_capacity=True
|
176
|
+
) -> None:
|
177
|
+
"""Wait for the specified number of tokens to become available.
|
178
|
+
|
179
|
+
|
180
|
+
:param amount: The number of tokens
|
181
|
+
:param warn: If True, warn if the requested amount exceeds the bucket capacity.
|
182
|
+
|
183
|
+
>>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
|
184
|
+
>>> asyncio.run(bucket.get_tokens(5))
|
185
|
+
>>> bucket.tokens
|
186
|
+
5
|
187
|
+
>>> asyncio.run(bucket.get_tokens(9))
|
188
|
+
>>> bucket.tokens < 1
|
189
|
+
True
|
190
|
+
|
191
|
+
>>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=10, refill_rate=1)
|
192
|
+
>>> asyncio.run(bucket.get_tokens(11, cheat_bucket_capacity=False))
|
193
|
+
Traceback (most recent call last):
|
194
|
+
...
|
195
|
+
ValueError: Requested amount exceeds bucket capacity. Bucket capacity: 10, requested amount: 11. As the bucket never overflows, the requested amount will never be available.
|
196
|
+
>>> asyncio.run(bucket.get_tokens(11, cheat_bucket_capacity=True))
|
197
|
+
>>> bucket.capacity
|
198
|
+
12.100000000000001
|
199
|
+
"""
|
200
|
+
self.num_requests += amount
|
201
|
+
if amount >= self.capacity:
|
202
|
+
if not cheat_bucket_capacity:
|
203
|
+
msg = f"Requested amount exceeds bucket capacity. Bucket capacity: {self.capacity}, requested amount: {amount}. As the bucket never overflows, the requested amount will never be available."
|
204
|
+
raise ValueError(msg)
|
205
|
+
else:
|
206
|
+
self.capacity = amount * 1.10
|
207
|
+
self._old_capacity = self.capacity
|
208
|
+
|
209
|
+
start_time = time.monotonic()
|
210
|
+
while True:
|
211
|
+
self.refill() # Refill based on elapsed time
|
212
|
+
if self.tokens >= amount:
|
213
|
+
self.tokens -= amount
|
214
|
+
break
|
215
|
+
|
216
|
+
wait_time = self.wait_time(amount)
|
217
|
+
if wait_time > 0:
|
218
|
+
await asyncio.sleep(wait_time)
|
219
|
+
|
220
|
+
self.num_released += amount
|
221
|
+
now = time.monotonic()
|
222
|
+
self.log.append((now, self.tokens))
|
223
|
+
return None
|
224
|
+
|
225
|
+
def get_log(self) -> list[tuple]:
|
226
|
+
return self.log
|
227
|
+
|
228
|
+
def visualize(self):
|
229
|
+
"""Visualize the token bucket over time."""
|
230
|
+
times, tokens = zip(*self.get_log())
|
231
|
+
start_time = times[0]
|
232
|
+
times = [t - start_time for t in times] # Normalize time to start from 0
|
233
|
+
from matplotlib import pyplot as plt
|
234
|
+
|
235
|
+
plt.figure(figsize=(10, 6))
|
236
|
+
plt.plot(times, tokens, label="Tokens Available")
|
237
|
+
plt.xlabel("Time (seconds)", fontsize=12)
|
238
|
+
plt.ylabel("Number of Tokens", fontsize=12)
|
239
|
+
details = f"{self.bucket_name} ({self.bucket_type}) Bucket Usage Over Time\nCapacity: {self.capacity:.1f}, Refill Rate: {self.refill_rate:.1f}/second"
|
240
|
+
plt.title(details, fontsize=14)
|
241
|
+
|
242
|
+
plt.legend()
|
243
|
+
plt.grid(True)
|
244
|
+
plt.tight_layout()
|
245
|
+
plt.show()
|
246
|
+
|
247
|
+
def get_throughput(self, time_window: Optional[float] = None) -> float:
|
248
|
+
"""
|
249
|
+
Calculate the empirical bucket throughput in tokens per minute for the specified time window.
|
250
|
+
|
251
|
+
:param time_window: The time window in seconds to calculate the throughput for.
|
252
|
+
:return: The throughput in tokens per minute.
|
253
|
+
|
254
|
+
>>> bucket = TokenBucket(bucket_name="test", bucket_type="test", capacity=100, refill_rate=10)
|
255
|
+
>>> asyncio.run(bucket.get_tokens(50))
|
256
|
+
>>> time.sleep(1) # Wait for 1 second
|
257
|
+
>>> asyncio.run(bucket.get_tokens(30))
|
258
|
+
>>> throughput = bucket.get_throughput(1)
|
259
|
+
>>> 4750 < throughput < 4850
|
260
|
+
True
|
261
|
+
"""
|
262
|
+
now = time.monotonic()
|
263
|
+
|
264
|
+
if time_window is None:
|
265
|
+
start_time = self.creation_time
|
266
|
+
else:
|
267
|
+
start_time = now - time_window
|
268
|
+
|
269
|
+
if start_time < self.creation_time:
|
270
|
+
start_time = self.creation_time
|
271
|
+
|
272
|
+
elapsed_time = now - start_time
|
273
|
+
|
274
|
+
if elapsed_time == 0:
|
275
|
+
return self.num_released / 0.001
|
276
|
+
|
277
|
+
return (self.num_released / elapsed_time) * 60
|
278
|
+
|
279
|
+
|
280
|
+
if __name__ == "__main__":
|
281
|
+
import doctest
|
282
|
+
|
283
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|