edsl 0.1.46__py3-none-any.whl → 0.1.48__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/__init__.py +44 -39
- edsl/__version__.py +1 -1
- edsl/agents/__init__.py +4 -2
- edsl/agents/{Agent.py → agent.py} +442 -152
- edsl/agents/{AgentList.py → agent_list.py} +220 -162
- edsl/agents/descriptors.py +46 -7
- edsl/{exceptions/agents.py → agents/exceptions.py} +3 -12
- edsl/base/__init__.py +75 -0
- edsl/base/base_class.py +1303 -0
- edsl/base/data_transfer_models.py +114 -0
- edsl/base/enums.py +215 -0
- edsl/base.py +8 -0
- edsl/buckets/__init__.py +25 -0
- edsl/buckets/bucket_collection.py +324 -0
- edsl/buckets/model_buckets.py +206 -0
- edsl/buckets/token_bucket.py +502 -0
- edsl/{jobs/buckets/TokenBucketAPI.py → buckets/token_bucket_api.py} +1 -1
- edsl/buckets/token_bucket_client.py +509 -0
- edsl/caching/__init__.py +20 -0
- edsl/caching/cache.py +814 -0
- edsl/caching/cache_entry.py +427 -0
- edsl/{data/CacheHandler.py → caching/cache_handler.py} +14 -15
- edsl/caching/exceptions.py +24 -0
- edsl/caching/orm.py +30 -0
- edsl/{data/RemoteCacheSync.py → caching/remote_cache_sync.py} +3 -3
- edsl/caching/sql_dict.py +441 -0
- edsl/config/__init__.py +8 -0
- edsl/config/config_class.py +177 -0
- edsl/config.py +4 -176
- edsl/conversation/Conversation.py +7 -7
- edsl/conversation/car_buying.py +4 -4
- edsl/conversation/chips.py +6 -6
- edsl/coop/__init__.py +25 -2
- edsl/coop/coop.py +430 -113
- edsl/coop/{ExpectedParrotKeyHandler.py → ep_key_handling.py} +86 -10
- edsl/coop/exceptions.py +62 -0
- edsl/coop/price_fetcher.py +126 -0
- edsl/coop/utils.py +89 -24
- edsl/data_transfer_models.py +5 -72
- edsl/dataset/__init__.py +10 -0
- edsl/{results/Dataset.py → dataset/dataset.py} +116 -36
- edsl/dataset/dataset_operations_mixin.py +1492 -0
- edsl/{results/DatasetTree.py → dataset/dataset_tree.py} +156 -75
- edsl/{results/TableDisplay.py → dataset/display/table_display.py} +18 -7
- edsl/{results → dataset/display}/table_renderers.py +58 -2
- edsl/{results → dataset}/file_exports.py +4 -5
- edsl/{results → dataset}/smart_objects.py +2 -2
- edsl/enums.py +5 -205
- edsl/inference_services/__init__.py +5 -0
- edsl/inference_services/{AvailableModelCacheHandler.py → available_model_cache_handler.py} +2 -3
- edsl/inference_services/{AvailableModelFetcher.py → available_model_fetcher.py} +8 -14
- edsl/inference_services/data_structures.py +3 -2
- edsl/{exceptions/inference_services.py → inference_services/exceptions.py} +1 -1
- edsl/inference_services/{InferenceServiceABC.py → inference_service_abc.py} +1 -1
- edsl/inference_services/{InferenceServicesCollection.py → inference_services_collection.py} +8 -7
- edsl/inference_services/registry.py +4 -41
- edsl/inference_services/{ServiceAvailability.py → service_availability.py} +5 -25
- edsl/inference_services/services/__init__.py +31 -0
- edsl/inference_services/{AnthropicService.py → services/anthropic_service.py} +3 -3
- edsl/inference_services/{AwsBedrock.py → services/aws_bedrock.py} +2 -2
- edsl/inference_services/{AzureAI.py → services/azure_ai.py} +2 -2
- edsl/inference_services/{DeepInfraService.py → services/deep_infra_service.py} +1 -3
- edsl/inference_services/{DeepSeekService.py → services/deep_seek_service.py} +2 -4
- edsl/inference_services/{GoogleService.py → services/google_service.py} +5 -4
- edsl/inference_services/{GroqService.py → services/groq_service.py} +1 -1
- edsl/inference_services/{MistralAIService.py → services/mistral_ai_service.py} +3 -3
- edsl/inference_services/{OllamaService.py → services/ollama_service.py} +1 -7
- edsl/inference_services/{OpenAIService.py → services/open_ai_service.py} +5 -6
- edsl/inference_services/{PerplexityService.py → services/perplexity_service.py} +12 -12
- edsl/inference_services/{TestService.py → services/test_service.py} +7 -6
- edsl/inference_services/{TogetherAIService.py → services/together_ai_service.py} +2 -6
- edsl/inference_services/{XAIService.py → services/xai_service.py} +1 -1
- edsl/inference_services/write_available.py +1 -2
- edsl/instructions/__init__.py +6 -0
- edsl/{surveys/instructions/Instruction.py → instructions/instruction.py} +11 -6
- edsl/{surveys/instructions/InstructionCollection.py → instructions/instruction_collection.py} +10 -5
- edsl/{surveys/InstructionHandler.py → instructions/instruction_handler.py} +3 -3
- edsl/{jobs/interviews → interviews}/ReportErrors.py +2 -2
- edsl/interviews/__init__.py +4 -0
- edsl/{jobs/AnswerQuestionFunctionConstructor.py → interviews/answering_function.py} +45 -18
- edsl/{jobs/interviews/InterviewExceptionEntry.py → interviews/exception_tracking.py} +107 -22
- edsl/interviews/interview.py +638 -0
- edsl/{jobs/interviews/InterviewStatusDictionary.py → interviews/interview_status_dictionary.py} +21 -12
- edsl/{jobs/interviews/InterviewStatusLog.py → interviews/interview_status_log.py} +16 -7
- edsl/{jobs/InterviewTaskManager.py → interviews/interview_task_manager.py} +12 -7
- edsl/{jobs/RequestTokenEstimator.py → interviews/request_token_estimator.py} +8 -3
- edsl/{jobs/interviews/InterviewStatistic.py → interviews/statistics.py} +36 -10
- edsl/invigilators/__init__.py +38 -0
- edsl/invigilators/invigilator_base.py +477 -0
- edsl/{agents/Invigilator.py → invigilators/invigilators.py} +263 -10
- edsl/invigilators/prompt_constructor.py +476 -0
- edsl/{agents → invigilators}/prompt_helpers.py +2 -1
- edsl/{agents/QuestionInstructionPromptBuilder.py → invigilators/question_instructions_prompt_builder.py} +18 -13
- edsl/{agents → invigilators}/question_option_processor.py +96 -21
- edsl/{agents/QuestionTemplateReplacementsBuilder.py → invigilators/question_template_replacements_builder.py} +64 -12
- edsl/jobs/__init__.py +7 -1
- edsl/jobs/async_interview_runner.py +99 -35
- edsl/jobs/check_survey_scenario_compatibility.py +7 -5
- edsl/jobs/data_structures.py +153 -22
- edsl/{exceptions/jobs.py → jobs/exceptions.py} +2 -1
- edsl/jobs/{FetchInvigilator.py → fetch_invigilator.py} +4 -4
- edsl/jobs/{loggers/HTMLTableJobLogger.py → html_table_job_logger.py} +6 -2
- edsl/jobs/{Jobs.py → jobs.py} +321 -155
- edsl/jobs/{JobsChecks.py → jobs_checks.py} +15 -7
- edsl/jobs/{JobsComponentConstructor.py → jobs_component_constructor.py} +20 -17
- edsl/jobs/{InterviewsConstructor.py → jobs_interview_constructor.py} +10 -5
- edsl/jobs/jobs_pricing_estimation.py +347 -0
- edsl/jobs/{JobsRemoteInferenceLogger.py → jobs_remote_inference_logger.py} +4 -3
- edsl/jobs/jobs_runner_asyncio.py +282 -0
- edsl/jobs/{JobsRemoteInferenceHandler.py → remote_inference.py} +19 -22
- edsl/jobs/results_exceptions_handler.py +2 -2
- edsl/key_management/__init__.py +28 -0
- edsl/key_management/key_lookup.py +161 -0
- edsl/{language_models/key_management/KeyLookupBuilder.py → key_management/key_lookup_builder.py} +118 -47
- edsl/key_management/key_lookup_collection.py +82 -0
- edsl/key_management/models.py +218 -0
- edsl/language_models/__init__.py +7 -2
- edsl/language_models/{ComputeCost.py → compute_cost.py} +18 -3
- edsl/{exceptions/language_models.py → language_models/exceptions.py} +2 -1
- edsl/language_models/language_model.py +1080 -0
- edsl/language_models/model.py +10 -25
- edsl/language_models/{ModelList.py → model_list.py} +9 -14
- edsl/language_models/{RawResponseHandler.py → raw_response_handler.py} +1 -1
- edsl/language_models/{RegisterLanguageModelsMeta.py → registry.py} +1 -1
- edsl/language_models/repair.py +4 -4
- edsl/language_models/utilities.py +4 -4
- edsl/notebooks/__init__.py +3 -1
- edsl/notebooks/{Notebook.py → notebook.py} +7 -8
- edsl/prompts/__init__.py +1 -1
- edsl/{exceptions/prompts.py → prompts/exceptions.py} +3 -1
- edsl/prompts/{Prompt.py → prompt.py} +101 -95
- edsl/questions/HTMLQuestion.py +1 -1
- edsl/questions/__init__.py +154 -25
- edsl/questions/answer_validator_mixin.py +1 -1
- edsl/questions/compose_questions.py +4 -3
- edsl/questions/derived/question_likert_five.py +166 -0
- edsl/questions/derived/{QuestionLinearScale.py → question_linear_scale.py} +4 -4
- edsl/questions/derived/{QuestionTopK.py → question_top_k.py} +4 -4
- edsl/questions/derived/{QuestionYesNo.py → question_yes_no.py} +4 -5
- edsl/questions/descriptors.py +24 -30
- edsl/questions/loop_processor.py +65 -19
- edsl/questions/question_base.py +881 -0
- edsl/questions/question_base_gen_mixin.py +15 -16
- edsl/questions/{QuestionBasePromptsMixin.py → question_base_prompts_mixin.py} +2 -2
- edsl/questions/{QuestionBudget.py → question_budget.py} +3 -4
- edsl/questions/{QuestionCheckBox.py → question_check_box.py} +16 -16
- edsl/questions/{QuestionDict.py → question_dict.py} +39 -5
- edsl/questions/{QuestionExtract.py → question_extract.py} +9 -9
- edsl/questions/question_free_text.py +282 -0
- edsl/questions/{QuestionFunctional.py → question_functional.py} +6 -5
- edsl/questions/{QuestionList.py → question_list.py} +6 -7
- edsl/questions/{QuestionMatrix.py → question_matrix.py} +6 -5
- edsl/questions/{QuestionMultipleChoice.py → question_multiple_choice.py} +126 -21
- edsl/questions/{QuestionNumerical.py → question_numerical.py} +5 -5
- edsl/questions/{QuestionRank.py → question_rank.py} +6 -6
- edsl/questions/question_registry.py +10 -16
- edsl/questions/register_questions_meta.py +8 -4
- edsl/questions/response_validator_abc.py +17 -16
- edsl/results/__init__.py +4 -1
- edsl/{exceptions/results.py → results/exceptions.py} +1 -1
- edsl/results/report.py +197 -0
- edsl/results/{Result.py → result.py} +131 -45
- edsl/results/{Results.py → results.py} +420 -216
- edsl/results/results_selector.py +344 -25
- edsl/scenarios/__init__.py +30 -3
- edsl/scenarios/{ConstructDownloadLink.py → construct_download_link.py} +7 -0
- edsl/scenarios/directory_scanner.py +156 -13
- edsl/scenarios/document_chunker.py +186 -0
- edsl/scenarios/exceptions.py +101 -0
- edsl/scenarios/file_methods.py +2 -3
- edsl/scenarios/file_store.py +755 -0
- edsl/scenarios/handlers/__init__.py +14 -14
- edsl/scenarios/handlers/{csv.py → csv_file_store.py} +1 -2
- edsl/scenarios/handlers/{docx.py → docx_file_store.py} +8 -7
- edsl/scenarios/handlers/{html.py → html_file_store.py} +1 -2
- edsl/scenarios/handlers/{jpeg.py → jpeg_file_store.py} +1 -1
- edsl/scenarios/handlers/{json.py → json_file_store.py} +1 -1
- edsl/scenarios/handlers/latex_file_store.py +5 -0
- edsl/scenarios/handlers/{md.py → md_file_store.py} +1 -1
- edsl/scenarios/handlers/{pdf.py → pdf_file_store.py} +2 -2
- edsl/scenarios/handlers/{png.py → png_file_store.py} +1 -1
- edsl/scenarios/handlers/{pptx.py → pptx_file_store.py} +8 -7
- edsl/scenarios/handlers/{py.py → py_file_store.py} +1 -3
- edsl/scenarios/handlers/{sql.py → sql_file_store.py} +2 -1
- edsl/scenarios/handlers/{sqlite.py → sqlite_file_store.py} +2 -3
- edsl/scenarios/handlers/{txt.py → txt_file_store.py} +1 -1
- edsl/scenarios/scenario.py +928 -0
- edsl/scenarios/scenario_join.py +18 -5
- edsl/scenarios/{ScenarioList.py → scenario_list.py} +424 -106
- edsl/scenarios/{ScenarioListPdfMixin.py → scenario_list_pdf_tools.py} +16 -15
- edsl/scenarios/scenario_selector.py +5 -1
- edsl/study/ObjectEntry.py +2 -2
- edsl/study/SnapShot.py +5 -5
- edsl/study/Study.py +20 -21
- edsl/study/__init__.py +6 -4
- edsl/surveys/__init__.py +7 -4
- edsl/surveys/dag/__init__.py +2 -0
- edsl/surveys/{ConstructDAG.py → dag/construct_dag.py} +3 -3
- edsl/surveys/{DAG.py → dag/dag.py} +13 -10
- edsl/surveys/descriptors.py +1 -1
- edsl/surveys/{EditSurvey.py → edit_survey.py} +9 -9
- edsl/{exceptions/surveys.py → surveys/exceptions.py} +1 -2
- edsl/surveys/memory/__init__.py +3 -0
- edsl/surveys/{MemoryPlan.py → memory/memory_plan.py} +10 -9
- edsl/surveys/rules/__init__.py +3 -0
- edsl/surveys/{Rule.py → rules/rule.py} +103 -43
- edsl/surveys/{RuleCollection.py → rules/rule_collection.py} +21 -30
- edsl/surveys/{RuleManager.py → rules/rule_manager.py} +19 -13
- edsl/surveys/survey.py +1743 -0
- edsl/surveys/{SurveyExportMixin.py → survey_export.py} +22 -27
- edsl/surveys/{SurveyFlowVisualization.py → survey_flow_visualization.py} +11 -2
- edsl/surveys/{Simulator.py → survey_simulator.py} +10 -3
- edsl/tasks/__init__.py +32 -0
- edsl/{jobs/tasks/QuestionTaskCreator.py → tasks/question_task_creator.py} +115 -57
- edsl/tasks/task_creators.py +135 -0
- edsl/{jobs/tasks/TaskHistory.py → tasks/task_history.py} +86 -47
- edsl/{jobs/tasks → tasks}/task_status_enum.py +91 -7
- edsl/tasks/task_status_log.py +85 -0
- edsl/tokens/__init__.py +2 -0
- edsl/tokens/interview_token_usage.py +53 -0
- edsl/utilities/PrettyList.py +1 -1
- edsl/utilities/SystemInfo.py +25 -22
- edsl/utilities/__init__.py +29 -21
- edsl/utilities/gcp_bucket/__init__.py +2 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +99 -96
- edsl/utilities/interface.py +44 -536
- edsl/{results/MarkdownToPDF.py → utilities/markdown_to_pdf.py} +13 -5
- edsl/utilities/repair_functions.py +1 -1
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/METADATA +3 -2
- edsl-0.1.48.dist-info/RECORD +347 -0
- edsl/Base.py +0 -426
- edsl/BaseDiff.py +0 -260
- edsl/agents/InvigilatorBase.py +0 -260
- edsl/agents/PromptConstructor.py +0 -318
- edsl/auto/AutoStudy.py +0 -130
- edsl/auto/StageBase.py +0 -243
- edsl/auto/StageGenerateSurvey.py +0 -178
- edsl/auto/StageLabelQuestions.py +0 -125
- edsl/auto/StagePersona.py +0 -61
- edsl/auto/StagePersonaDimensionValueRanges.py +0 -88
- edsl/auto/StagePersonaDimensionValues.py +0 -74
- edsl/auto/StagePersonaDimensions.py +0 -69
- edsl/auto/StageQuestions.py +0 -74
- edsl/auto/SurveyCreatorPipeline.py +0 -21
- edsl/auto/utilities.py +0 -218
- edsl/base/Base.py +0 -279
- edsl/coop/PriceFetcher.py +0 -54
- edsl/data/Cache.py +0 -580
- edsl/data/CacheEntry.py +0 -230
- edsl/data/SQLiteDict.py +0 -292
- edsl/data/__init__.py +0 -5
- edsl/data/orm.py +0 -10
- edsl/exceptions/cache.py +0 -5
- edsl/exceptions/coop.py +0 -14
- edsl/exceptions/data.py +0 -14
- edsl/exceptions/scenarios.py +0 -29
- edsl/jobs/Answers.py +0 -43
- edsl/jobs/JobsPrompts.py +0 -354
- edsl/jobs/buckets/BucketCollection.py +0 -134
- edsl/jobs/buckets/ModelBuckets.py +0 -65
- edsl/jobs/buckets/TokenBucket.py +0 -283
- edsl/jobs/buckets/TokenBucketClient.py +0 -191
- edsl/jobs/interviews/Interview.py +0 -395
- edsl/jobs/interviews/InterviewExceptionCollection.py +0 -99
- edsl/jobs/interviews/InterviewStatisticsCollection.py +0 -25
- edsl/jobs/runners/JobsRunnerAsyncio.py +0 -163
- edsl/jobs/runners/JobsRunnerStatusData.py +0 -0
- edsl/jobs/tasks/TaskCreators.py +0 -64
- edsl/jobs/tasks/TaskStatusLog.py +0 -23
- edsl/jobs/tokens/InterviewTokenUsage.py +0 -27
- edsl/language_models/LanguageModel.py +0 -635
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/key_management/KeyLookup.py +0 -63
- edsl/language_models/key_management/KeyLookupCollection.py +0 -38
- edsl/language_models/key_management/models.py +0 -137
- edsl/questions/QuestionBase.py +0 -539
- edsl/questions/QuestionFreeText.py +0 -130
- edsl/questions/derived/QuestionLikertFive.py +0 -76
- edsl/results/DatasetExportMixin.py +0 -911
- edsl/results/ResultsExportMixin.py +0 -45
- edsl/results/TextEditor.py +0 -50
- edsl/results/results_fetch_mixin.py +0 -33
- edsl/results/results_tools_mixin.py +0 -98
- edsl/scenarios/DocumentChunker.py +0 -104
- edsl/scenarios/FileStore.py +0 -564
- edsl/scenarios/Scenario.py +0 -548
- edsl/scenarios/ScenarioHtmlMixin.py +0 -65
- edsl/scenarios/ScenarioListExportMixin.py +0 -45
- edsl/scenarios/handlers/latex.py +0 -5
- edsl/shared.py +0 -1
- edsl/surveys/Survey.py +0 -1306
- edsl/surveys/SurveyQualtricsImport.py +0 -284
- edsl/surveys/SurveyToApp.py +0 -141
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/tools/__init__.py +0 -1
- edsl/tools/clusters.py +0 -192
- edsl/tools/embeddings.py +0 -27
- edsl/tools/embeddings_plotting.py +0 -118
- edsl/tools/plotting.py +0 -112
- edsl/tools/summarize.py +0 -18
- edsl/utilities/data/Registry.py +0 -6
- edsl/utilities/data/__init__.py +0 -1
- edsl/utilities/data/scooter_results.json +0 -1
- edsl-0.1.46.dist-info/RECORD +0 -366
- /edsl/coop/{CoopFunctionsMixin.py → coop_functions.py} +0 -0
- /edsl/{results → dataset/display}/CSSParameterizer.py +0 -0
- /edsl/{language_models/key_management → dataset/display}/__init__.py +0 -0
- /edsl/{results → dataset/display}/table_data_class.py +0 -0
- /edsl/{results → dataset/display}/table_display.css +0 -0
- /edsl/{results/ResultsGGMixin.py → dataset/r/ggplot.py} +0 -0
- /edsl/{results → dataset}/tree_explore.py +0 -0
- /edsl/{surveys/instructions/ChangeInstruction.py → instructions/change_instruction.py} +0 -0
- /edsl/{jobs/interviews → interviews}/interview_status_enum.py +0 -0
- /edsl/jobs/{runners/JobsRunnerStatus.py → jobs_runner_status.py} +0 -0
- /edsl/language_models/{PriceManager.py → price_manager.py} +0 -0
- /edsl/language_models/{fake_openai_call.py → unused/fake_openai_call.py} +0 -0
- /edsl/language_models/{fake_openai_service.py → unused/fake_openai_service.py} +0 -0
- /edsl/notebooks/{NotebookToLaTeX.py → notebook_to_latex.py} +0 -0
- /edsl/{exceptions/questions.py → questions/exceptions.py} +0 -0
- /edsl/questions/{SimpleAskMixin.py → simple_ask_mixin.py} +0 -0
- /edsl/surveys/{Memory.py → memory/memory.py} +0 -0
- /edsl/surveys/{MemoryManagement.py → memory/memory_management.py} +0 -0
- /edsl/surveys/{SurveyCSS.py → survey_css.py} +0 -0
- /edsl/{jobs/tokens/TokenUsage.py → tokens/token_usage.py} +0 -0
- /edsl/{results/MarkdownToDocx.py → utilities/markdown_to_docx.py} +0 -0
- /edsl/{TemplateLoader.py → utilities/template_loader.py} +0 -0
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/LICENSE +0 -0
- {edsl-0.1.46.dist-info → edsl-0.1.48.dist-info}/WHEEL +0 -0
@@ -0,0 +1,282 @@
|
|
1
|
+
"""
|
2
|
+
Asynchronous execution engine for EDSL jobs.
|
3
|
+
|
4
|
+
This module provides the core functionality for running interviews asynchronously,
|
5
|
+
which is essential for efficient execution of large jobs. It handles the complex
|
6
|
+
process of coordinating multiple concurrent interviews, managing progress tracking,
|
7
|
+
and gracefully handling cancellations and errors.
|
8
|
+
|
9
|
+
Key components:
|
10
|
+
- JobsRunnerAsyncio: The main class that orchestrates async execution
|
11
|
+
- Progress bar integration with remote status tracking
|
12
|
+
- Error handling and graceful cancellation
|
13
|
+
- Result collection and organization
|
14
|
+
|
15
|
+
This module is primarily used internally by the Jobs class and is typically not
|
16
|
+
accessed directly by end users, though advanced users may need to understand its
|
17
|
+
behavior when customizing job execution.
|
18
|
+
"""
|
19
|
+
from __future__ import annotations
|
20
|
+
import time
|
21
|
+
import asyncio
|
22
|
+
import threading
|
23
|
+
import warnings
|
24
|
+
from typing import TYPE_CHECKING, List, Generator, Tuple, Optional, Any
|
25
|
+
|
26
|
+
if TYPE_CHECKING:
|
27
|
+
from ..results import Results
|
28
|
+
|
29
|
+
from ..results import Results
|
30
|
+
from ..tasks import TaskHistory
|
31
|
+
from ..utilities.decorators import jupyter_nb_handler
|
32
|
+
|
33
|
+
from .jobs_runner_status import JobsRunnerStatus
|
34
|
+
from .async_interview_runner import AsyncInterviewRunner
|
35
|
+
from .data_structures import RunEnvironment, RunParameters, RunConfig
|
36
|
+
|
37
|
+
if TYPE_CHECKING:
|
38
|
+
from ..jobs import Jobs
|
39
|
+
from ..interviews import Interview
|
40
|
+
|
41
|
+
|
42
|
+
class JobsRunnerAsyncio:
|
43
|
+
"""
|
44
|
+
Executes a collection of interviews asynchronously with progress tracking.
|
45
|
+
|
46
|
+
This class is the main execution engine for EDSL jobs. It manages the asynchronous
|
47
|
+
running of interviews, handles progress tracking, and organizes results. It is
|
48
|
+
instantiated by a Jobs object and handles the complex execution logic that makes
|
49
|
+
parallel interview processing efficient.
|
50
|
+
|
51
|
+
Key responsibilities:
|
52
|
+
1. Coordinating asynchronous execution of interviews
|
53
|
+
2. Tracking and reporting progress
|
54
|
+
3. Handling errors and cancellations
|
55
|
+
4. Collecting and organizing results
|
56
|
+
|
57
|
+
This class supports two main execution modes:
|
58
|
+
- run(): For synchronous contexts (returns after completion)
|
59
|
+
- run_async(): For asynchronous contexts (can be awaited)
|
60
|
+
"""
|
61
|
+
|
62
|
+
def __init__(self, jobs: "Jobs", environment: RunEnvironment):
|
63
|
+
"""
|
64
|
+
Initialize a JobsRunnerAsyncio instance.
|
65
|
+
|
66
|
+
Parameters:
|
67
|
+
jobs (Jobs): The Jobs instance containing the interviews to run
|
68
|
+
environment (RunEnvironment): The environment configuration containing
|
69
|
+
resources like cache, key_lookup, and bucket_collection
|
70
|
+
|
71
|
+
Notes:
|
72
|
+
- The Jobs instance provides the interviews to be executed
|
73
|
+
- The environment contains resources like caches and API keys
|
74
|
+
- Additional runtime state like completion status is initialized when run() is called
|
75
|
+
"""
|
76
|
+
self.jobs = jobs
|
77
|
+
self.environment = environment
|
78
|
+
# These will be set when run() is called
|
79
|
+
self.start_time = None
|
80
|
+
self.completed = None
|
81
|
+
|
82
|
+
def __len__(self):
|
83
|
+
return len(self.jobs)
|
84
|
+
|
85
|
+
async def run_async(self, parameters: RunParameters) -> 'Results':
|
86
|
+
"""
|
87
|
+
Execute interviews asynchronously without progress tracking.
|
88
|
+
|
89
|
+
This method provides a simplified version of the run method, primarily used
|
90
|
+
by other modules that need direct access to asynchronous execution without
|
91
|
+
the full feature set of the main run() method. This is a lower-level interface
|
92
|
+
that doesn't include progress bars or advanced error handling.
|
93
|
+
|
94
|
+
Parameters:
|
95
|
+
parameters (RunParameters): Configuration parameters for the run
|
96
|
+
|
97
|
+
Returns:
|
98
|
+
Results: A Results object containing all responses and metadata
|
99
|
+
|
100
|
+
Notes:
|
101
|
+
- This method doesn't support progress bars or interactive cancellation
|
102
|
+
- It doesn't handle keyboard interrupts specially
|
103
|
+
- It's primarily meant for internal use by other EDSL components
|
104
|
+
- For most use cases, the main run() method is preferred
|
105
|
+
"""
|
106
|
+
# Initialize a simple status tracker (no progress bar)
|
107
|
+
self.environment.jobs_runner_status = JobsRunnerStatus(self, n=parameters.n)
|
108
|
+
data = []
|
109
|
+
task_history = TaskHistory(include_traceback=False)
|
110
|
+
|
111
|
+
run_config = RunConfig(parameters=parameters, environment=self.environment)
|
112
|
+
result_generator = AsyncInterviewRunner(self.jobs, run_config)
|
113
|
+
|
114
|
+
# Process results as they come in
|
115
|
+
async for result, interview in result_generator.run():
|
116
|
+
data.append(result)
|
117
|
+
task_history.add_interview(interview)
|
118
|
+
|
119
|
+
# Create the results object
|
120
|
+
results = Results(survey=self.jobs.survey, task_history=task_history, data=data)
|
121
|
+
|
122
|
+
# Extract only the relevant cache entries
|
123
|
+
relevant_cache = results.relevant_cache(self.environment.cache)
|
124
|
+
|
125
|
+
return Results(
|
126
|
+
survey=self.jobs.survey,
|
127
|
+
task_history=task_history,
|
128
|
+
data=data,
|
129
|
+
cache=relevant_cache,
|
130
|
+
)
|
131
|
+
|
132
|
+
def simple_run(self, parameters: Optional[RunParameters] = None) -> Results:
|
133
|
+
"""
|
134
|
+
Run interviews synchronously with minimal configuration.
|
135
|
+
|
136
|
+
This is a convenience method that provides a very simple synchronous interface
|
137
|
+
for running jobs. It's primarily used for quick tests or debugging, not for
|
138
|
+
production use.
|
139
|
+
|
140
|
+
Parameters:
|
141
|
+
parameters (RunParameters, optional): Configuration parameters for the run.
|
142
|
+
If not provided, default parameters will be used.
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
Results: A Results object containing all responses and metadata
|
146
|
+
|
147
|
+
Notes:
|
148
|
+
- This method is synchronous (blocks until completion)
|
149
|
+
- It doesn't include progress tracking or advanced error handling
|
150
|
+
- For production use, use the main run() method instead
|
151
|
+
"""
|
152
|
+
if parameters is None:
|
153
|
+
parameters = RunParameters()
|
154
|
+
|
155
|
+
data = asyncio.run(self.run_async(parameters))
|
156
|
+
return Results(survey=self.jobs.survey, data=data)
|
157
|
+
|
158
|
+
@jupyter_nb_handler
|
159
|
+
async def run(self, parameters: RunParameters) -> Results:
|
160
|
+
"""
|
161
|
+
Execute interviews asynchronously with full feature support.
|
162
|
+
|
163
|
+
This is the main method for running jobs with full feature support, including
|
164
|
+
progress tracking, error handling, and graceful cancellation. It's decorated
|
165
|
+
with @jupyter_nb_handler to ensure proper handling in notebook environments.
|
166
|
+
|
167
|
+
Parameters:
|
168
|
+
parameters (RunParameters): Configuration parameters for the run
|
169
|
+
|
170
|
+
Returns:
|
171
|
+
Results: A Results object containing all responses and metadata
|
172
|
+
|
173
|
+
Raises:
|
174
|
+
Exception: Any unhandled exception from interviews if stop_on_exception=True
|
175
|
+
KeyboardInterrupt: If the user interrupts execution and it can't be handled gracefully
|
176
|
+
|
177
|
+
Notes:
|
178
|
+
- Supports progress bars with remote tracking via Coop
|
179
|
+
- Handles keyboard interrupts gracefully
|
180
|
+
- Manages concurrent execution of multiple interviews
|
181
|
+
- Collects and consolidates results from all completed interviews
|
182
|
+
- Can be used in both async and sync contexts due to the @jupyter_nb_handler decorator
|
183
|
+
"""
|
184
|
+
|
185
|
+
run_config = RunConfig(parameters=parameters, environment=self.environment)
|
186
|
+
|
187
|
+
self.start_time = time.monotonic()
|
188
|
+
self.completed = False
|
189
|
+
|
190
|
+
from edsl.coop import Coop
|
191
|
+
|
192
|
+
coop = Coop()
|
193
|
+
endpoint_url = coop.get_progress_bar_url()
|
194
|
+
|
195
|
+
def set_up_jobs_runner_status(jobs_runner_status):
|
196
|
+
if jobs_runner_status is not None:
|
197
|
+
return jobs_runner_status(
|
198
|
+
self,
|
199
|
+
n=parameters.n,
|
200
|
+
endpoint_url=endpoint_url,
|
201
|
+
job_uuid=parameters.job_uuid,
|
202
|
+
)
|
203
|
+
else:
|
204
|
+
return JobsRunnerStatus(
|
205
|
+
self,
|
206
|
+
n=parameters.n,
|
207
|
+
endpoint_url=endpoint_url,
|
208
|
+
job_uuid=parameters.job_uuid,
|
209
|
+
)
|
210
|
+
|
211
|
+
run_config.environment.jobs_runner_status = set_up_jobs_runner_status(
|
212
|
+
self.environment.jobs_runner_status
|
213
|
+
)
|
214
|
+
|
215
|
+
async def get_results(results) -> None:
|
216
|
+
"""Conducted the interviews and append to the results list."""
|
217
|
+
result_generator = AsyncInterviewRunner(self.jobs, run_config)
|
218
|
+
async for result, interview in result_generator.run():
|
219
|
+
results.append(result)
|
220
|
+
results.task_history.add_interview(interview)
|
221
|
+
|
222
|
+
self.completed = True
|
223
|
+
|
224
|
+
def run_progress_bar(stop_event, jobs_runner_status) -> None:
|
225
|
+
"""Runs the progress bar in a separate thread."""
|
226
|
+
jobs_runner_status.update_progress(stop_event)
|
227
|
+
|
228
|
+
def set_up_progress_bar(progress_bar: bool, jobs_runner_status):
|
229
|
+
progress_thread = None
|
230
|
+
if progress_bar and jobs_runner_status.has_ep_api_key():
|
231
|
+
jobs_runner_status.setup()
|
232
|
+
progress_thread = threading.Thread(
|
233
|
+
target=run_progress_bar, args=(stop_event, jobs_runner_status)
|
234
|
+
)
|
235
|
+
progress_thread.start()
|
236
|
+
elif progress_bar:
|
237
|
+
warnings.warn(
|
238
|
+
"You need an Expected Parrot API key to view job progress bars."
|
239
|
+
)
|
240
|
+
return progress_thread
|
241
|
+
|
242
|
+
results = Results(
|
243
|
+
survey=self.jobs.survey,
|
244
|
+
data=[],
|
245
|
+
task_history=TaskHistory(),
|
246
|
+
# cache=self.environment.cache.new_entries_cache(),
|
247
|
+
)
|
248
|
+
|
249
|
+
stop_event = threading.Event()
|
250
|
+
progress_thread = set_up_progress_bar(
|
251
|
+
parameters.progress_bar, run_config.environment.jobs_runner_status
|
252
|
+
)
|
253
|
+
|
254
|
+
exception_to_raise = None
|
255
|
+
try:
|
256
|
+
await get_results(results)
|
257
|
+
except KeyboardInterrupt:
|
258
|
+
print("Keyboard interrupt received. Stopping gracefully...")
|
259
|
+
stop_event.set()
|
260
|
+
except Exception as e:
|
261
|
+
if parameters.stop_on_exception:
|
262
|
+
exception_to_raise = e
|
263
|
+
stop_event.set()
|
264
|
+
finally:
|
265
|
+
stop_event.set()
|
266
|
+
if progress_thread is not None:
|
267
|
+
progress_thread.join()
|
268
|
+
|
269
|
+
if exception_to_raise:
|
270
|
+
raise exception_to_raise
|
271
|
+
|
272
|
+
relevant_cache = results.relevant_cache(self.environment.cache)
|
273
|
+
results.cache = relevant_cache
|
274
|
+
# breakpoint()
|
275
|
+
results.bucket_collection = self.environment.bucket_collection
|
276
|
+
|
277
|
+
from edsl.jobs.results_exceptions_handler import ResultsExceptionsHandler
|
278
|
+
|
279
|
+
results_exceptions_handler = ResultsExceptionsHandler(results, parameters)
|
280
|
+
|
281
|
+
results_exceptions_handler.handle_exceptions()
|
282
|
+
return results
|
@@ -6,20 +6,17 @@ from dataclasses import dataclass
|
|
6
6
|
Seconds = NewType("Seconds", float)
|
7
7
|
JobUUID = NewType("JobUUID", str)
|
8
8
|
|
9
|
-
from edsl.exceptions.coop import CoopServerResponseError
|
10
|
-
|
11
9
|
if TYPE_CHECKING:
|
12
|
-
from
|
13
|
-
from
|
14
|
-
from
|
15
|
-
from edsl.jobs.JobsRemoteInferenceLogger import JobLogger
|
16
|
-
|
17
|
-
from edsl.coop.coop import RemoteInferenceResponse, RemoteInferenceCreationInfo
|
10
|
+
from ..results import Results
|
11
|
+
from .jobs import Jobs
|
12
|
+
from .jobs_remote_inference_logger import JobLogger
|
18
13
|
|
19
|
-
from
|
20
|
-
from
|
21
|
-
from
|
14
|
+
from ..coop import CoopServerResponseError
|
15
|
+
from ..coop.utils import VisibilityType
|
16
|
+
from ..coop.coop import RemoteInferenceResponse, RemoteInferenceCreationInfo
|
22
17
|
|
18
|
+
from .jobs_status_enums import JobsStatus
|
19
|
+
from .jobs_remote_inference_logger import JobLogger
|
23
20
|
|
24
21
|
class RemoteJobConstants:
|
25
22
|
"""Constants for remote job handling."""
|
@@ -54,12 +51,12 @@ class JobsRemoteInferenceHandler:
|
|
54
51
|
self.remote_inference_url = f"{self.expected_parrot_url}/home/remote-inference"
|
55
52
|
|
56
53
|
def _create_logger(self) -> JobLogger:
|
57
|
-
from
|
58
|
-
from
|
54
|
+
from ..utilities import is_notebook
|
55
|
+
from .jobs_remote_inference_logger import (
|
59
56
|
JupyterJobLogger,
|
60
57
|
StdOutJobLogger,
|
61
58
|
)
|
62
|
-
from
|
59
|
+
from .html_table_job_logger import HTMLTableJobLogger
|
63
60
|
|
64
61
|
if is_notebook():
|
65
62
|
return HTMLTableJobLogger(verbose=self.verbose)
|
@@ -72,7 +69,7 @@ class JobsRemoteInferenceHandler:
|
|
72
69
|
return False
|
73
70
|
if not disable_remote_inference:
|
74
71
|
try:
|
75
|
-
from
|
72
|
+
from ..coop import Coop
|
76
73
|
|
77
74
|
user_edsl_settings = Coop().edsl_settings
|
78
75
|
return user_edsl_settings.get("remote_inference", False)
|
@@ -87,11 +84,11 @@ class JobsRemoteInferenceHandler:
|
|
87
84
|
self,
|
88
85
|
iterations: int = 1,
|
89
86
|
remote_inference_description: Optional[str] = None,
|
90
|
-
remote_inference_results_visibility: Optional[VisibilityType] = "unlisted",
|
87
|
+
remote_inference_results_visibility: Optional['VisibilityType'] = "unlisted",
|
91
88
|
fresh: Optional[bool] = False,
|
92
89
|
) -> RemoteJobInfo:
|
93
|
-
from
|
94
|
-
from
|
90
|
+
from ..config import CONFIG
|
91
|
+
from ..coop import Coop
|
95
92
|
|
96
93
|
logger = self._create_logger()
|
97
94
|
|
@@ -140,8 +137,8 @@ class JobsRemoteInferenceHandler:
|
|
140
137
|
@staticmethod
|
141
138
|
def check_status(
|
142
139
|
job_uuid: JobUUID,
|
143
|
-
) -> RemoteInferenceResponse:
|
144
|
-
from
|
140
|
+
) -> 'RemoteInferenceResponse':
|
141
|
+
from ..coop import Coop
|
145
142
|
|
146
143
|
coop = Coop()
|
147
144
|
return coop.remote_inference_get(job_uuid)
|
@@ -152,7 +149,7 @@ class JobsRemoteInferenceHandler:
|
|
152
149
|
if testing_simulated_response is not None:
|
153
150
|
return lambda job_uuid: testing_simulated_response
|
154
151
|
else:
|
155
|
-
from
|
152
|
+
from ..coop import Coop
|
156
153
|
|
157
154
|
coop = Coop()
|
158
155
|
return coop.remote_inference_get
|
@@ -164,7 +161,7 @@ class JobsRemoteInferenceHandler:
|
|
164
161
|
if testing_simulated_response is not None:
|
165
162
|
return lambda results_uuid, expected_object_type: Results.example()
|
166
163
|
else:
|
167
|
-
from
|
164
|
+
from ..coop import Coop
|
168
165
|
|
169
166
|
coop = Coop()
|
170
167
|
return coop.get
|
@@ -1,6 +1,6 @@
|
|
1
1
|
from typing import Optional, TYPE_CHECKING, Protocol
|
2
2
|
import sys
|
3
|
-
from edsl.scenarios.FileStore import HTMLFileStore
|
3
|
+
#from edsl.scenarios.FileStore import HTMLFileStore
|
4
4
|
from edsl.config import CONFIG
|
5
5
|
from edsl.coop.coop import Coop
|
6
6
|
|
@@ -88,6 +88,6 @@ class ResultsExceptionsHandler:
|
|
88
88
|
|
89
89
|
# Handle remote logging if enabled
|
90
90
|
if self.remote_logging:
|
91
|
-
filestore =
|
91
|
+
filestore = FileStore(filepath)
|
92
92
|
coop_details = filestore.push(description="Exceptions Report")
|
93
93
|
print(coop_details)
|
@@ -0,0 +1,28 @@
|
|
1
|
+
"""Key management system for API tokens and rate limits.
|
2
|
+
|
3
|
+
The key_management module provides a flexible system for managing API keys, credentials, and
|
4
|
+
rate limits for various language model services. It handles discovery, storage, and retrieval
|
5
|
+
of API keys from multiple sources including environment variables, configuration files, and
|
6
|
+
remote services.
|
7
|
+
|
8
|
+
Key components:
|
9
|
+
- KeyLookup: Dictionary-like container for service credentials and rate limits
|
10
|
+
- KeyLookupBuilder: Factory that builds KeyLookup objects by gathering credentials
|
11
|
+
- KeyLookupCollection: Singleton collection to avoid rebuilding KeyLookup objects
|
12
|
+
- Data models: Structured representations of API keys, rate limits, and credentials
|
13
|
+
|
14
|
+
This module supports multiple credential sources with configurable priority, allowing
|
15
|
+
EDSL to use different API keys in different environments while maintaining a consistent
|
16
|
+
interface for the rest of the system.
|
17
|
+
|
18
|
+
Typical usage:
|
19
|
+
```python
|
20
|
+
from edsl.key_management import KeyLookupBuilder
|
21
|
+
keys = KeyLookupBuilder().build()
|
22
|
+
openai_key = keys['openai'].api_token
|
23
|
+
```
|
24
|
+
"""
|
25
|
+
|
26
|
+
from .key_lookup import KeyLookup
|
27
|
+
from .key_lookup_collection import KeyLookupCollection
|
28
|
+
from .key_lookup_builder import KeyLookupBuilder
|
@@ -0,0 +1,161 @@
|
|
1
|
+
from collections import UserDict
|
2
|
+
from dataclasses import asdict
|
3
|
+
|
4
|
+
from ..enums import service_to_api_keyname
|
5
|
+
|
6
|
+
from .models import LanguageModelInput
|
7
|
+
|
8
|
+
class KeyLookup(UserDict):
|
9
|
+
"""Dictionary-like container for storing and accessing language model service credentials.
|
10
|
+
|
11
|
+
KeyLookup provides a centralized store for API keys, rate limits, and other configuration
|
12
|
+
needed to authenticate with various language model services. It inherits from UserDict,
|
13
|
+
using service names as keys (e.g., 'openai', 'anthropic') and LanguageModelInput objects
|
14
|
+
as values.
|
15
|
+
|
16
|
+
The class provides convenient methods for:
|
17
|
+
- Serializing to and from dictionaries for storage
|
18
|
+
- Generating .env files for environment configuration
|
19
|
+
- Creating example instances for testing
|
20
|
+
|
21
|
+
Typical usage:
|
22
|
+
>>> from edsl.key_management.models import LanguageModelInput
|
23
|
+
>>> lookup = KeyLookup()
|
24
|
+
>>> lookup['openai'] = LanguageModelInput(api_token='sk-key123', rpm=60, tpm=100000)
|
25
|
+
>>> openai_config = lookup['openai']
|
26
|
+
>>> openai_config.api_token
|
27
|
+
'sk-key123'
|
28
|
+
|
29
|
+
Serialization example:
|
30
|
+
>>> lookup = KeyLookup()
|
31
|
+
>>> lm_input = LanguageModelInput.example()
|
32
|
+
>>> lookup['test'] = lm_input
|
33
|
+
>>> lookup.to_dict()['test']['api_token']
|
34
|
+
'sk-abcd123'
|
35
|
+
>>> restored = KeyLookup.from_dict(lookup.to_dict())
|
36
|
+
>>> restored['test'].api_token
|
37
|
+
'sk-abcd123'
|
38
|
+
|
39
|
+
Technical Notes:
|
40
|
+
- Uses LanguageModelInput dataclass for structured storage
|
41
|
+
- Preserves source information for debugging and transparency
|
42
|
+
- Supports conversion to environment variable format
|
43
|
+
"""
|
44
|
+
|
45
|
+
def to_dict(self):
|
46
|
+
"""Convert the KeyLookup to a serializable dictionary.
|
47
|
+
|
48
|
+
Converts each LanguageModelInput value to a dictionary using dataclasses.asdict,
|
49
|
+
producing a nested dictionary structure suitable for JSON serialization.
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
dict: A dictionary with service names as keys and serialized LanguageModelInput
|
53
|
+
objects as values
|
54
|
+
|
55
|
+
Examples:
|
56
|
+
>>> kl = KeyLookup.example()
|
57
|
+
>>> serialized = kl.to_dict()
|
58
|
+
>>> 'test' in serialized
|
59
|
+
True
|
60
|
+
>>> 'api_token' in serialized['test']
|
61
|
+
True
|
62
|
+
|
63
|
+
>>> kl2 = KeyLookup.from_dict(kl.to_dict())
|
64
|
+
>>> kl2 == kl # Equal content
|
65
|
+
True
|
66
|
+
>>> kl2 is kl # But different objects
|
67
|
+
False
|
68
|
+
"""
|
69
|
+
return {k: asdict(v) for k, v in self.data.items()}
|
70
|
+
|
71
|
+
@classmethod
|
72
|
+
def from_dict(cls, d):
|
73
|
+
"""Create a KeyLookup instance from a dictionary representation.
|
74
|
+
|
75
|
+
Converts a dictionary produced by to_dict() back into a KeyLookup instance,
|
76
|
+
reconstructing LanguageModelInput objects from their serialized form.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
d (dict): Dictionary with service names as keys and serialized
|
80
|
+
LanguageModelInput objects as values
|
81
|
+
|
82
|
+
Returns:
|
83
|
+
KeyLookup: A new KeyLookup instance populated with the deserialized data
|
84
|
+
|
85
|
+
Examples:
|
86
|
+
>>> data = {
|
87
|
+
... 'openai': {
|
88
|
+
... 'api_token': 'sk-test',
|
89
|
+
... 'rpm': 60,
|
90
|
+
... 'tpm': 100000
|
91
|
+
... }
|
92
|
+
... }
|
93
|
+
>>> lookup = KeyLookup.from_dict(data)
|
94
|
+
>>> lookup['openai'].api_token
|
95
|
+
'sk-test'
|
96
|
+
"""
|
97
|
+
return cls({k: LanguageModelInput(**v) for k, v in d.items()})
|
98
|
+
|
99
|
+
@classmethod
|
100
|
+
def example(cls):
|
101
|
+
"""Create an example KeyLookup instance for testing and documentation.
|
102
|
+
|
103
|
+
Returns:
|
104
|
+
KeyLookup: A new KeyLookup instance with example services and credentials
|
105
|
+
|
106
|
+
Examples:
|
107
|
+
>>> example = KeyLookup.example()
|
108
|
+
>>> 'test' in example
|
109
|
+
True
|
110
|
+
>>> 'openai' in example
|
111
|
+
True
|
112
|
+
"""
|
113
|
+
return cls(
|
114
|
+
{
|
115
|
+
"test": LanguageModelInput.example(),
|
116
|
+
"openai": LanguageModelInput.example(),
|
117
|
+
}
|
118
|
+
)
|
119
|
+
|
120
|
+
def to_dot_env(self):
|
121
|
+
"""Generate environment variable definitions for a .env file.
|
122
|
+
|
123
|
+
Creates a string with environment variable definitions suitable for a .env file,
|
124
|
+
containing service API keys and rate limits in the standard format expected
|
125
|
+
by the key_management system.
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
str: A string with newline-separated environment variable definitions
|
129
|
+
|
130
|
+
Examples:
|
131
|
+
>>> lookup = KeyLookup({
|
132
|
+
... 'test': LanguageModelInput(api_token='test', rpm=10, tpm=20000),
|
133
|
+
... 'openai': LanguageModelInput(api_token='sk-1234', rpm=60, tpm=100000)
|
134
|
+
... })
|
135
|
+
>>> env_str = lookup.to_dot_env()
|
136
|
+
>>> 'EDSL_SERVICE_RPM_OPENAI=60' in env_str
|
137
|
+
True
|
138
|
+
>>> 'OPENAI_API_KEY=sk-1234' in env_str
|
139
|
+
True
|
140
|
+
|
141
|
+
Technical Notes:
|
142
|
+
- Skips the 'test' service which is for internal testing
|
143
|
+
- Handles special cases for service names that don't match their API key names
|
144
|
+
- Includes API IDs for services that require them (e.g., AWS Bedrock)
|
145
|
+
"""
|
146
|
+
lines = []
|
147
|
+
for service, lm_input in self.items():
|
148
|
+
if service != "test":
|
149
|
+
lines.append(f"EDSL_SERVICE_RPM_{service.upper()}={lm_input.rpm}")
|
150
|
+
lines.append(f"EDSL_SERVICE_TPM_{service.upper()}={lm_input.tpm}")
|
151
|
+
key_name = service_to_api_keyname.get(service, service)
|
152
|
+
lines.append(f"{key_name.upper()}={lm_input.api_token}")
|
153
|
+
if lm_input.api_id is not None:
|
154
|
+
lines.append(f"{service.upper()}_API_ID={lm_input.api_id}")
|
155
|
+
return "\n".join([f"{line}" for line in lines])
|
156
|
+
|
157
|
+
|
158
|
+
if __name__ == "__main__":
|
159
|
+
import doctest
|
160
|
+
|
161
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|