edsl 0.1.15__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +348 -38
- edsl/BaseDiff.py +260 -0
- edsl/TemplateLoader.py +24 -0
- edsl/__init__.py +45 -10
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +842 -144
- edsl/agents/AgentList.py +521 -25
- edsl/agents/Invigilator.py +250 -374
- edsl/agents/InvigilatorBase.py +257 -0
- edsl/agents/PromptConstructor.py +272 -0
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/descriptors.py +43 -13
- edsl/agents/prompt_helpers.py +129 -0
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -0
- edsl/auto/StageBase.py +243 -0
- edsl/auto/StageGenerateSurvey.py +178 -0
- edsl/auto/StageLabelQuestions.py +125 -0
- edsl/auto/StagePersona.py +61 -0
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -0
- edsl/auto/StagePersonaDimensionValues.py +74 -0
- edsl/auto/StagePersonaDimensions.py +69 -0
- edsl/auto/StageQuestions.py +74 -0
- edsl/auto/SurveyCreatorPipeline.py +21 -0
- edsl/auto/utilities.py +218 -0
- edsl/base/Base.py +279 -0
- edsl/config.py +115 -113
- edsl/conversation/Conversation.py +290 -0
- edsl/conversation/car_buying.py +59 -0
- edsl/conversation/chips.py +95 -0
- edsl/conversation/mug_negotiation.py +81 -0
- edsl/conversation/next_speaker_utilities.py +93 -0
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -0
- edsl/coop/__init__.py +1 -0
- edsl/coop/coop.py +1029 -134
- edsl/coop/utils.py +131 -0
- edsl/data/Cache.py +560 -89
- edsl/data/CacheEntry.py +230 -0
- edsl/data/CacheHandler.py +168 -0
- edsl/data/RemoteCacheSync.py +186 -0
- edsl/data/SQLiteDict.py +292 -0
- edsl/data/__init__.py +5 -3
- edsl/data/orm.py +6 -33
- edsl/data_transfer_models.py +74 -27
- edsl/enums.py +165 -8
- edsl/exceptions/BaseException.py +21 -0
- edsl/exceptions/__init__.py +52 -46
- edsl/exceptions/agents.py +33 -15
- edsl/exceptions/cache.py +5 -0
- edsl/exceptions/coop.py +8 -0
- edsl/exceptions/general.py +34 -0
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +15 -0
- edsl/exceptions/language_models.py +46 -1
- edsl/exceptions/questions.py +80 -5
- edsl/exceptions/results.py +16 -5
- edsl/exceptions/scenarios.py +29 -0
- edsl/exceptions/surveys.py +13 -10
- edsl/inference_services/AnthropicService.py +106 -0
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -0
- edsl/inference_services/AzureAI.py +215 -0
- edsl/inference_services/DeepInfraService.py +18 -0
- edsl/inference_services/GoogleService.py +143 -0
- edsl/inference_services/GroqService.py +20 -0
- edsl/inference_services/InferenceServiceABC.py +80 -0
- edsl/inference_services/InferenceServicesCollection.py +138 -0
- edsl/inference_services/MistralAIService.py +120 -0
- edsl/inference_services/OllamaService.py +18 -0
- edsl/inference_services/OpenAIService.py +236 -0
- edsl/inference_services/PerplexityService.py +160 -0
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -0
- edsl/inference_services/TogetherAIService.py +172 -0
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -0
- edsl/inference_services/rate_limits_cache.py +25 -0
- edsl/inference_services/registry.py +41 -0
- edsl/inference_services/write_available.py +10 -0
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +21 -20
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +684 -206
- edsl/jobs/JobsChecks.py +172 -0
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -0
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -0
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -0
- edsl/jobs/buckets/ModelBuckets.py +65 -0
- edsl/jobs/buckets/TokenBucket.py +283 -0
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +392 -0
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -0
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -0
- edsl/jobs/interviews/InterviewStatistic.py +63 -0
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -0
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -0
- edsl/jobs/interviews/InterviewStatusLog.py +92 -0
- edsl/jobs/interviews/ReportErrors.py +66 -0
- edsl/jobs/interviews/interview_status_enum.py +9 -0
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -110
- edsl/jobs/runners/JobsRunnerStatus.py +298 -0
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -0
- edsl/jobs/tasks/TaskCreators.py +64 -0
- edsl/jobs/tasks/TaskHistory.py +470 -0
- edsl/jobs/tasks/TaskStatusLog.py +23 -0
- edsl/jobs/tasks/task_status_enum.py +161 -0
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -0
- edsl/jobs/tokens/TokenUsage.py +34 -0
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +507 -386
- edsl/language_models/ModelList.py +164 -0
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -0
- edsl/language_models/__init__.py +1 -8
- edsl/language_models/fake_openai_call.py +15 -0
- edsl/language_models/fake_openai_service.py +61 -0
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +109 -41
- edsl/language_models/utilities.py +65 -0
- edsl/notebooks/Notebook.py +263 -0
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -0
- edsl/prompts/Prompt.py +222 -93
- edsl/prompts/__init__.py +1 -1
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -0
- edsl/questions/QuestionBasePromptsMixin.py +221 -0
- edsl/questions/QuestionBudget.py +164 -67
- edsl/questions/QuestionCheckBox.py +281 -62
- edsl/questions/QuestionDict.py +343 -0
- edsl/questions/QuestionExtract.py +136 -50
- edsl/questions/QuestionFreeText.py +79 -55
- edsl/questions/QuestionFunctional.py +138 -41
- edsl/questions/QuestionList.py +184 -57
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +293 -69
- edsl/questions/QuestionNumerical.py +109 -56
- edsl/questions/QuestionRank.py +244 -49
- edsl/questions/Quick.py +41 -0
- edsl/questions/SimpleAskMixin.py +74 -0
- edsl/questions/__init__.py +9 -6
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +153 -38
- edsl/questions/compose_questions.py +13 -7
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -0
- edsl/questions/derived/QuestionLikertFive.py +28 -26
- edsl/questions/derived/QuestionLinearScale.py +41 -28
- edsl/questions/derived/QuestionTopK.py +34 -26
- edsl/questions/derived/QuestionYesNo.py +40 -27
- edsl/questions/descriptors.py +228 -74
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -0
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -0
- edsl/questions/prompt_templates/question_extract.jinja +11 -0
- edsl/questions/prompt_templates/question_free_text.jinja +3 -0
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -0
- edsl/questions/prompt_templates/question_list.jinja +17 -0
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -0
- edsl/questions/prompt_templates/question_numerical.jinja +37 -0
- edsl/questions/question_base_gen_mixin.py +168 -0
- edsl/questions/question_registry.py +130 -46
- edsl/questions/register_questions_meta.py +71 -0
- edsl/questions/response_validator_abc.py +188 -0
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +5 -2
- edsl/questions/templates/__init__.py +0 -0
- edsl/questions/templates/budget/__init__.py +0 -0
- edsl/questions/templates/budget/answering_instructions.jinja +7 -0
- edsl/questions/templates/budget/question_presentation.jinja +7 -0
- edsl/questions/templates/checkbox/__init__.py +0 -0
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -0
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -0
- edsl/questions/templates/dict/__init__.py +0 -0
- edsl/questions/templates/dict/answering_instructions.jinja +21 -0
- edsl/questions/templates/dict/question_presentation.jinja +1 -0
- edsl/questions/templates/extract/__init__.py +0 -0
- edsl/questions/templates/extract/answering_instructions.jinja +7 -0
- edsl/questions/templates/extract/question_presentation.jinja +1 -0
- edsl/questions/templates/free_text/__init__.py +0 -0
- edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
- edsl/questions/templates/free_text/question_presentation.jinja +1 -0
- edsl/questions/templates/likert_five/__init__.py +0 -0
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -0
- edsl/questions/templates/likert_five/question_presentation.jinja +12 -0
- edsl/questions/templates/linear_scale/__init__.py +0 -0
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -0
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -0
- edsl/questions/templates/list/__init__.py +0 -0
- edsl/questions/templates/list/answering_instructions.jinja +4 -0
- edsl/questions/templates/list/question_presentation.jinja +5 -0
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/__init__.py +0 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -0
- edsl/questions/templates/multiple_choice/html.jinja +0 -0
- edsl/questions/templates/multiple_choice/question_presentation.jinja +12 -0
- edsl/questions/templates/numerical/__init__.py +0 -0
- edsl/questions/templates/numerical/answering_instructions.jinja +7 -0
- edsl/questions/templates/numerical/question_presentation.jinja +7 -0
- edsl/questions/templates/rank/__init__.py +0 -0
- edsl/questions/templates/rank/answering_instructions.jinja +11 -0
- edsl/questions/templates/rank/question_presentation.jinja +15 -0
- edsl/questions/templates/top_k/__init__.py +0 -0
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -0
- edsl/questions/templates/top_k/question_presentation.jinja +22 -0
- edsl/questions/templates/yes_no/__init__.py +0 -0
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -0
- edsl/questions/templates/yes_no/question_presentation.jinja +12 -0
- edsl/results/CSSParameterizer.py +108 -0
- edsl/results/Dataset.py +550 -19
- edsl/results/DatasetExportMixin.py +594 -0
- edsl/results/DatasetTree.py +295 -0
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +477 -173
- edsl/results/Results.py +987 -269
- edsl/results/ResultsExportMixin.py +28 -125
- edsl/results/ResultsGGMixin.py +83 -15
- edsl/results/TableDisplay.py +125 -0
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +1 -1
- edsl/results/file_exports.py +252 -0
- edsl/results/results_fetch_mixin.py +33 -0
- edsl/results/results_selector.py +145 -0
- edsl/results/results_tools_mixin.py +98 -0
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +78 -0
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -0
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +543 -0
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +431 -62
- edsl/scenarios/ScenarioHtmlMixin.py +65 -0
- edsl/scenarios/ScenarioList.py +1415 -45
- edsl/scenarios/ScenarioListExportMixin.py +45 -0
- edsl/scenarios/ScenarioListPdfMixin.py +239 -0
- edsl/scenarios/__init__.py +2 -0
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +49 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/scenario_join.py +131 -0
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -0
- edsl/study/ObjectEntry.py +173 -0
- edsl/study/ProofOfWork.py +113 -0
- edsl/study/SnapShot.py +80 -0
- edsl/study/Study.py +521 -0
- edsl/study/__init__.py +4 -0
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +92 -11
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +9 -4
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +156 -35
- edsl/surveys/Rule.py +221 -74
- edsl/surveys/RuleCollection.py +241 -61
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1079 -339
- edsl/surveys/SurveyCSS.py +273 -0
- edsl/surveys/SurveyExportMixin.py +235 -40
- edsl/surveys/SurveyFlowVisualization.py +181 -0
- edsl/surveys/SurveyQualtricsImport.py +284 -0
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +4 -2
- edsl/surveys/base.py +19 -3
- edsl/surveys/descriptors.py +17 -6
- edsl/surveys/instructions/ChangeInstruction.py +48 -0
- edsl/surveys/instructions/Instruction.py +56 -0
- edsl/surveys/instructions/InstructionCollection.py +82 -0
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/templates/error_reporting/base.html +24 -0
- edsl/templates/error_reporting/exceptions_by_model.html +35 -0
- edsl/templates/error_reporting/exceptions_by_question_name.html +17 -0
- edsl/templates/error_reporting/exceptions_by_type.html +17 -0
- edsl/templates/error_reporting/interview_details.html +116 -0
- edsl/templates/error_reporting/interviews.html +19 -0
- edsl/templates/error_reporting/overview.html +5 -0
- edsl/templates/error_reporting/performance_plot.html +2 -0
- edsl/templates/error_reporting/report.css +74 -0
- edsl/templates/error_reporting/report.html +118 -0
- edsl/templates/error_reporting/report.js +25 -0
- edsl/tools/__init__.py +1 -0
- edsl/tools/clusters.py +192 -0
- edsl/tools/embeddings.py +27 -0
- edsl/tools/embeddings_plotting.py +118 -0
- edsl/tools/plotting.py +112 -0
- edsl/tools/summarize.py +18 -0
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +5 -0
- edsl/utilities/__init__.py +21 -20
- edsl/utilities/ast_utilities.py +3 -0
- edsl/utilities/data/Registry.py +2 -0
- edsl/utilities/decorators.py +41 -0
- edsl/utilities/gcp_bucket/__init__.py +0 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -0
- edsl/utilities/interface.py +310 -60
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -0
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -0
- edsl/utilities/restricted_python.py +70 -0
- edsl/utilities/utilities.py +203 -13
- edsl-0.1.40.dist-info/METADATA +111 -0
- edsl-0.1.40.dist-info/RECORD +362 -0
- {edsl-0.1.15.dist-info → edsl-0.1.40.dist-info}/WHEEL +1 -1
- edsl/agents/AgentListExportMixin.py +0 -24
- edsl/coop/old.py +0 -31
- edsl/data/Database.py +0 -141
- edsl/data/crud.py +0 -121
- edsl/jobs/Interview.py +0 -435
- edsl/jobs/JobsRunner.py +0 -63
- edsl/jobs/JobsRunnerStatusMixin.py +0 -115
- edsl/jobs/base.py +0 -47
- edsl/jobs/buckets.py +0 -178
- edsl/jobs/runners/JobsRunnerDryRun.py +0 -19
- edsl/jobs/runners/JobsRunnerStreaming.py +0 -54
- edsl/jobs/task_management.py +0 -215
- edsl/jobs/token_tracking.py +0 -78
- edsl/language_models/DeepInfra.py +0 -69
- edsl/language_models/OpenAI.py +0 -98
- edsl/language_models/model_interfaces/GeminiPro.py +0 -66
- edsl/language_models/model_interfaces/LanguageModelOpenAIFour.py +0 -8
- edsl/language_models/model_interfaces/LanguageModelOpenAIThreeFiveTurbo.py +0 -8
- edsl/language_models/model_interfaces/LlamaTwo13B.py +0 -21
- edsl/language_models/model_interfaces/LlamaTwo70B.py +0 -21
- edsl/language_models/model_interfaces/Mixtral8x7B.py +0 -24
- edsl/language_models/registry.py +0 -81
- edsl/language_models/schemas.py +0 -15
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/prompts/QuestionInstructionsBase.py +0 -6
- edsl/prompts/library/agent_instructions.py +0 -29
- edsl/prompts/library/agent_persona.py +0 -17
- edsl/prompts/library/question_budget.py +0 -26
- edsl/prompts/library/question_checkbox.py +0 -32
- edsl/prompts/library/question_extract.py +0 -19
- edsl/prompts/library/question_freetext.py +0 -14
- edsl/prompts/library/question_linear_scale.py +0 -20
- edsl/prompts/library/question_list.py +0 -22
- edsl/prompts/library/question_multiple_choice.py +0 -44
- edsl/prompts/library/question_numerical.py +0 -31
- edsl/prompts/library/question_rank.py +0 -21
- edsl/prompts/prompt_config.py +0 -33
- edsl/prompts/registry.py +0 -185
- edsl/questions/Question.py +0 -240
- edsl/report/InputOutputDataTypes.py +0 -134
- edsl/report/RegressionMixin.py +0 -28
- edsl/report/ReportOutputs.py +0 -1228
- edsl/report/ResultsFetchMixin.py +0 -106
- edsl/report/ResultsOutputMixin.py +0 -14
- edsl/report/demo.ipynb +0 -645
- edsl/results/ResultsDBMixin.py +0 -184
- edsl/surveys/SurveyFlowVisualizationMixin.py +0 -92
- edsl/trackers/Tracker.py +0 -91
- edsl/trackers/TrackerAPI.py +0 -196
- edsl/trackers/TrackerTasks.py +0 -70
- edsl/utilities/pastebin.py +0 -141
- edsl-0.1.15.dist-info/METADATA +0 -69
- edsl-0.1.15.dist-info/RECORD +0 -142
- /edsl/{language_models/model_interfaces → inference_services}/__init__.py +0 -0
- /edsl/{report/__init__.py → jobs/runners/JobsRunnerStatusData.py} +0 -0
- /edsl/{trackers/__init__.py → language_models/ServiceDataSources.py} +0 -0
- {edsl-0.1.15.dist-info → edsl-0.1.40.dist-info}/LICENSE +0 -0
@@ -0,0 +1,90 @@
|
|
1
|
+
from typing import Any, List, Optional
|
2
|
+
import os
|
3
|
+
import asyncio
|
4
|
+
from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
5
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
6
|
+
from edsl.inference_services.rate_limits_cache import rate_limits
|
7
|
+
from edsl.utilities.utilities import fix_partial_correct_response
|
8
|
+
|
9
|
+
from edsl.enums import InferenceServiceType
|
10
|
+
import random
|
11
|
+
|
12
|
+
|
13
|
+
class TestService(InferenceServiceABC):
|
14
|
+
"""OpenAI service class."""
|
15
|
+
|
16
|
+
_inference_service_ = "test"
|
17
|
+
_env_key_name_ = None
|
18
|
+
_base_url_ = None
|
19
|
+
|
20
|
+
_sync_client_ = None
|
21
|
+
_async_client_ = None
|
22
|
+
|
23
|
+
_sync_client_instance = None
|
24
|
+
_async_client_instance = None
|
25
|
+
|
26
|
+
key_sequence = None
|
27
|
+
usage_sequence = None
|
28
|
+
model_exclude_list = []
|
29
|
+
input_token_name = "prompt_tokens"
|
30
|
+
output_token_name = "completion_tokens"
|
31
|
+
|
32
|
+
@classmethod
|
33
|
+
def available(cls) -> list[str]:
|
34
|
+
return ["test"]
|
35
|
+
|
36
|
+
@classmethod
|
37
|
+
def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
|
38
|
+
throw_exception = False
|
39
|
+
|
40
|
+
class TestServiceLanguageModel(LanguageModel):
|
41
|
+
_model_ = "test"
|
42
|
+
_parameters_ = {"temperature": 0.5}
|
43
|
+
_inference_service_ = InferenceServiceType.TEST.value
|
44
|
+
usage_sequence = ["usage"]
|
45
|
+
key_sequence = ["message", 0, "text"]
|
46
|
+
input_token_name = cls.input_token_name
|
47
|
+
output_token_name = cls.output_token_name
|
48
|
+
_rpm = 1000
|
49
|
+
_tpm = 100000
|
50
|
+
|
51
|
+
@property
|
52
|
+
def _canned_response(self):
|
53
|
+
if hasattr(self, "canned_response"):
|
54
|
+
|
55
|
+
return self.canned_response
|
56
|
+
else:
|
57
|
+
return "Hello, world"
|
58
|
+
|
59
|
+
async def async_execute_model_call(
|
60
|
+
self,
|
61
|
+
user_prompt: str,
|
62
|
+
system_prompt: str,
|
63
|
+
# func: Optional[callable] = None,
|
64
|
+
files_list: Optional[List["File"]] = None,
|
65
|
+
) -> dict[str, Any]:
|
66
|
+
await asyncio.sleep(0.1)
|
67
|
+
|
68
|
+
if hasattr(self, "throw_exception") and self.throw_exception:
|
69
|
+
if hasattr(self, "exception_probability"):
|
70
|
+
p = self.exception_probability
|
71
|
+
else:
|
72
|
+
p = 1
|
73
|
+
|
74
|
+
if random.random() < p:
|
75
|
+
raise Exception("This is a test error")
|
76
|
+
|
77
|
+
if hasattr(self, "func"):
|
78
|
+
return {
|
79
|
+
"message": [
|
80
|
+
{"text": self.func(user_prompt, system_prompt, files_list)}
|
81
|
+
],
|
82
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
83
|
+
}
|
84
|
+
|
85
|
+
return {
|
86
|
+
"message": [{"text": f"{self._canned_response}"}],
|
87
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
88
|
+
}
|
89
|
+
|
90
|
+
return TestServiceLanguageModel
|
@@ -0,0 +1,172 @@
|
|
1
|
+
import aiohttp
|
2
|
+
import json
|
3
|
+
import requests
|
4
|
+
from typing import Any, List, Optional
|
5
|
+
|
6
|
+
# from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
7
|
+
from edsl.language_models import LanguageModel
|
8
|
+
|
9
|
+
from edsl.inference_services.OpenAIService import OpenAIService
|
10
|
+
import openai
|
11
|
+
|
12
|
+
|
13
|
+
class TogetherAIService(OpenAIService):
|
14
|
+
"""DeepInfra service class."""
|
15
|
+
|
16
|
+
_inference_service_ = "together"
|
17
|
+
_env_key_name_ = "TOGETHER_API_KEY"
|
18
|
+
_base_url_ = "https://api.together.xyz/v1"
|
19
|
+
_models_list_cache: List[str] = []
|
20
|
+
|
21
|
+
# These are non-serverless models. There was no api param to filter them
|
22
|
+
model_exclude_list = [
|
23
|
+
"EleutherAI/llemma_7b",
|
24
|
+
"HuggingFaceH4/zephyr-7b-beta",
|
25
|
+
"Nexusflow/NexusRaven-V2-13B",
|
26
|
+
"NousResearch/Hermes-2-Theta-Llama-3-70B",
|
27
|
+
"NousResearch/Nous-Capybara-7B-V1p9",
|
28
|
+
"NousResearch/Nous-Hermes-13b",
|
29
|
+
"NousResearch/Nous-Hermes-2-Mistral-7B-DPO",
|
30
|
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
|
31
|
+
"NousResearch/Nous-Hermes-Llama2-13b",
|
32
|
+
"NousResearch/Nous-Hermes-Llama2-70b",
|
33
|
+
"NousResearch/Nous-Hermes-llama-2-7b",
|
34
|
+
"NumbersStation/nsql-llama-2-7B",
|
35
|
+
"Open-Orca/Mistral-7B-OpenOrca",
|
36
|
+
"Phind/Phind-CodeLlama-34B-Python-v1",
|
37
|
+
"Phind/Phind-CodeLlama-34B-v2",
|
38
|
+
"Qwen/Qwen1.5-0.5B",
|
39
|
+
"Qwen/Qwen1.5-0.5B-Chat",
|
40
|
+
"Qwen/Qwen1.5-1.8B",
|
41
|
+
"Qwen/Qwen1.5-1.8B-Chat",
|
42
|
+
"Qwen/Qwen1.5-14B",
|
43
|
+
"Qwen/Qwen1.5-14B-Chat",
|
44
|
+
"Qwen/Qwen1.5-32B",
|
45
|
+
"Qwen/Qwen1.5-32B-Chat",
|
46
|
+
"Qwen/Qwen1.5-4B",
|
47
|
+
"Qwen/Qwen1.5-4B-Chat",
|
48
|
+
"Qwen/Qwen1.5-72B",
|
49
|
+
"Qwen/Qwen1.5-7B",
|
50
|
+
"Qwen/Qwen1.5-7B-Chat",
|
51
|
+
"Qwen/Qwen2-1.5B",
|
52
|
+
"Qwen/Qwen2-1.5B-Instruct",
|
53
|
+
"Qwen/Qwen2-72B",
|
54
|
+
"Qwen/Qwen2-7B",
|
55
|
+
"Qwen/Qwen2-7B-Instruct",
|
56
|
+
"SG161222/Realistic_Vision_V3.0_VAE",
|
57
|
+
"Snowflake/snowflake-arctic-instruct",
|
58
|
+
"Undi95/ReMM-SLERP-L2-13B",
|
59
|
+
"Undi95/Toppy-M-7B",
|
60
|
+
"WizardLM/WizardCoder-Python-34B-V1.0",
|
61
|
+
"WizardLM/WizardLM-13B-V1.2",
|
62
|
+
"WizardLM/WizardLM-70B-V1.0",
|
63
|
+
"allenai/OLMo-7B",
|
64
|
+
"allenai/OLMo-7B-Instruct",
|
65
|
+
"bert-base-uncased",
|
66
|
+
"codellama/CodeLlama-13b-Instruct-hf",
|
67
|
+
"codellama/CodeLlama-13b-Python-hf",
|
68
|
+
"codellama/CodeLlama-13b-hf",
|
69
|
+
"codellama/CodeLlama-34b-Python-hf",
|
70
|
+
"codellama/CodeLlama-34b-hf",
|
71
|
+
"codellama/CodeLlama-70b-Instruct-hf",
|
72
|
+
"codellama/CodeLlama-70b-Python-hf",
|
73
|
+
"codellama/CodeLlama-70b-hf",
|
74
|
+
"codellama/CodeLlama-7b-Instruct-hf",
|
75
|
+
"codellama/CodeLlama-7b-Python-hf",
|
76
|
+
"codellama/CodeLlama-7b-hf",
|
77
|
+
"cognitivecomputations/dolphin-2.5-mixtral-8x7b",
|
78
|
+
"deepseek-ai/deepseek-coder-33b-instruct",
|
79
|
+
"garage-bAInd/Platypus2-70B-instruct",
|
80
|
+
"google/gemma-2b",
|
81
|
+
"google/gemma-7b",
|
82
|
+
"google/gemma-7b-it",
|
83
|
+
"gradientai/Llama-3-70B-Instruct-Gradient-1048k",
|
84
|
+
"hazyresearch/M2-BERT-2k-Retrieval-Encoder-V1",
|
85
|
+
"huggyllama/llama-13b",
|
86
|
+
"huggyllama/llama-30b",
|
87
|
+
"huggyllama/llama-65b",
|
88
|
+
"huggyllama/llama-7b",
|
89
|
+
"lmsys/vicuna-13b-v1.3",
|
90
|
+
"lmsys/vicuna-13b-v1.5",
|
91
|
+
"lmsys/vicuna-13b-v1.5-16k",
|
92
|
+
"lmsys/vicuna-7b-v1.3",
|
93
|
+
"lmsys/vicuna-7b-v1.5",
|
94
|
+
"meta-llama/Llama-2-13b-hf",
|
95
|
+
"meta-llama/Llama-2-70b-chat-hf",
|
96
|
+
"meta-llama/Llama-2-7b-hf",
|
97
|
+
"meta-llama/Llama-3-70b-hf",
|
98
|
+
"meta-llama/Llama-3-8b-hf",
|
99
|
+
"meta-llama/Meta-Llama-3-70B",
|
100
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
101
|
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
102
|
+
"meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
|
103
|
+
"meta-llama/Meta-Llama-3.1-70B-Reference",
|
104
|
+
"meta-llama/Meta-Llama-3.1-8B-Reference",
|
105
|
+
"microsoft/phi-2",
|
106
|
+
"mistralai/Mixtral-8x22B",
|
107
|
+
"openchat/openchat-3.5-1210",
|
108
|
+
"prompthero/openjourney",
|
109
|
+
"runwayml/stable-diffusion-v1-5",
|
110
|
+
"sentence-transformers/msmarco-bert-base-dot-v5",
|
111
|
+
"snorkelai/Snorkel-Mistral-PairRM-DPO",
|
112
|
+
"stabilityai/stable-diffusion-2-1",
|
113
|
+
"teknium/OpenHermes-2-Mistral-7B",
|
114
|
+
"teknium/OpenHermes-2p5-Mistral-7B",
|
115
|
+
"togethercomputer/CodeLlama-13b-Instruct",
|
116
|
+
"togethercomputer/CodeLlama-13b-Python",
|
117
|
+
"togethercomputer/CodeLlama-34b",
|
118
|
+
"togethercomputer/CodeLlama-34b-Python",
|
119
|
+
"togethercomputer/CodeLlama-7b-Instruct",
|
120
|
+
"togethercomputer/CodeLlama-7b-Python",
|
121
|
+
"togethercomputer/Koala-13B",
|
122
|
+
"togethercomputer/Koala-7B",
|
123
|
+
"togethercomputer/LLaMA-2-7B-32K",
|
124
|
+
"togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4",
|
125
|
+
"togethercomputer/StripedHyena-Hessian-7B",
|
126
|
+
"togethercomputer/alpaca-7b",
|
127
|
+
"togethercomputer/evo-1-131k-base",
|
128
|
+
"togethercomputer/evo-1-8k-base",
|
129
|
+
"togethercomputer/guanaco-13b",
|
130
|
+
"togethercomputer/guanaco-33b",
|
131
|
+
"togethercomputer/guanaco-65b",
|
132
|
+
"togethercomputer/guanaco-7b",
|
133
|
+
"togethercomputer/llama-2-13b",
|
134
|
+
"togethercomputer/llama-2-70b-chat",
|
135
|
+
"togethercomputer/llama-2-7b",
|
136
|
+
"wavymulder/Analog-Diffusion",
|
137
|
+
"zero-one-ai/Yi-34B",
|
138
|
+
"zero-one-ai/Yi-34B-Chat",
|
139
|
+
"zero-one-ai/Yi-6B",
|
140
|
+
]
|
141
|
+
|
142
|
+
_sync_client_ = openai.OpenAI
|
143
|
+
_async_client_ = openai.AsyncOpenAI
|
144
|
+
|
145
|
+
@classmethod
|
146
|
+
def get_model_list(cls, api_token=None):
|
147
|
+
# Togheter.ai has a different response in model list then openai
|
148
|
+
# and the OpenAI class returns an error when calling .models.list()
|
149
|
+
import requests
|
150
|
+
import os
|
151
|
+
|
152
|
+
url = "https://api.together.xyz/v1/models?filter=serverless"
|
153
|
+
if api_token is None:
|
154
|
+
api_token = os.getenv(cls._env_key_name_)
|
155
|
+
|
156
|
+
headers = {"accept": "application/json", "authorization": f"Bearer {api_token}"}
|
157
|
+
|
158
|
+
response = requests.get(url, headers=headers)
|
159
|
+
return response.json()
|
160
|
+
|
161
|
+
@classmethod
|
162
|
+
def available(cls) -> List[str]:
|
163
|
+
if not cls._models_list_cache:
|
164
|
+
try:
|
165
|
+
cls._models_list_cache = [
|
166
|
+
m["id"]
|
167
|
+
for m in cls.get_model_list()
|
168
|
+
if m["id"] not in cls.model_exclude_list
|
169
|
+
]
|
170
|
+
except Exception as e:
|
171
|
+
raise
|
172
|
+
return cls._models_list_cache
|
@@ -0,0 +1,134 @@
|
|
1
|
+
from collections import UserDict, defaultdict, UserList
|
2
|
+
from typing import Union, Optional, List
|
3
|
+
from edsl.enums import InferenceServiceLiteral
|
4
|
+
from dataclasses import dataclass
|
5
|
+
|
6
|
+
|
7
|
+
@dataclass
|
8
|
+
class LanguageModelInfo:
|
9
|
+
"""A dataclass for storing information about a language model.
|
10
|
+
|
11
|
+
|
12
|
+
>>> LanguageModelInfo("gpt-4-1106-preview", "openai")
|
13
|
+
LanguageModelInfo(model_name='gpt-4-1106-preview', service_name='openai')
|
14
|
+
|
15
|
+
>>> model_name, service = LanguageModelInfo.example()
|
16
|
+
>>> model_name
|
17
|
+
'gpt-4-1106-preview'
|
18
|
+
|
19
|
+
>>> LanguageModelInfo.example().service_name
|
20
|
+
'openai'
|
21
|
+
|
22
|
+
"""
|
23
|
+
|
24
|
+
model_name: str
|
25
|
+
service_name: str
|
26
|
+
|
27
|
+
def __iter__(self):
|
28
|
+
yield self.model_name
|
29
|
+
yield self.service_name
|
30
|
+
|
31
|
+
def __getitem__(self, key: int) -> str:
|
32
|
+
import warnings
|
33
|
+
|
34
|
+
warnings.warn(
|
35
|
+
"Accessing LanguageModelInfo via index is deprecated. "
|
36
|
+
"Please use .model_name, .service_name, or .index attributes instead.",
|
37
|
+
DeprecationWarning,
|
38
|
+
stacklevel=2,
|
39
|
+
)
|
40
|
+
|
41
|
+
if key == 0:
|
42
|
+
return self.model_name
|
43
|
+
elif key == 1:
|
44
|
+
return self.service_name
|
45
|
+
else:
|
46
|
+
raise IndexError("Index out of range")
|
47
|
+
|
48
|
+
@classmethod
|
49
|
+
def example(cls) -> "LanguageModelInfo":
|
50
|
+
return cls("gpt-4-1106-preview", "openai")
|
51
|
+
|
52
|
+
|
53
|
+
class ModelNamesList(UserList):
|
54
|
+
pass
|
55
|
+
|
56
|
+
|
57
|
+
class AvailableModels(UserList):
|
58
|
+
|
59
|
+
def __init__(self, data: List[LanguageModelInfo]) -> None:
|
60
|
+
super().__init__(data)
|
61
|
+
|
62
|
+
def __contains__(self, model_name: str) -> bool:
|
63
|
+
for model_entry in self:
|
64
|
+
if model_entry.model_name == model_name:
|
65
|
+
return True
|
66
|
+
return False
|
67
|
+
|
68
|
+
def print(self):
|
69
|
+
return self.to_dataset().print()
|
70
|
+
|
71
|
+
def to_dataset(self):
|
72
|
+
from edsl.scenarios.ScenarioList import ScenarioList
|
73
|
+
|
74
|
+
models, services = zip(
|
75
|
+
*[(model.model_name, model.service_name) for model in self]
|
76
|
+
)
|
77
|
+
return (
|
78
|
+
ScenarioList.from_list("model", models)
|
79
|
+
.add_list("service", services)
|
80
|
+
.to_dataset()
|
81
|
+
)
|
82
|
+
|
83
|
+
def to_model_list(self):
|
84
|
+
from edsl.language_models.ModelList import ModelList
|
85
|
+
|
86
|
+
return ModelList.from_available_models(self)
|
87
|
+
|
88
|
+
def search(
|
89
|
+
self, pattern: str, service_name: Optional[str] = None, regex: bool = False
|
90
|
+
) -> "AvailableModels":
|
91
|
+
import re
|
92
|
+
|
93
|
+
if not regex:
|
94
|
+
# Escape special regex characters except *
|
95
|
+
pattern = re.escape(pattern).replace(r"\*", ".*")
|
96
|
+
|
97
|
+
try:
|
98
|
+
regex = re.compile(pattern)
|
99
|
+
avm = AvailableModels(
|
100
|
+
[
|
101
|
+
entry
|
102
|
+
for entry in self
|
103
|
+
if regex.search(entry.model_name)
|
104
|
+
and (service_name is None or entry.service_name == service_name)
|
105
|
+
]
|
106
|
+
)
|
107
|
+
if len(avm) == 0:
|
108
|
+
raise ValueError(
|
109
|
+
"No models found matching the search pattern: " + pattern
|
110
|
+
)
|
111
|
+
else:
|
112
|
+
return avm
|
113
|
+
except re.error as e:
|
114
|
+
raise ValueError(f"Invalid regular expression pattern: {e}")
|
115
|
+
|
116
|
+
|
117
|
+
class ServiceToModelsMapping(UserDict):
|
118
|
+
def __init__(self, data: dict) -> None:
|
119
|
+
super().__init__(data)
|
120
|
+
|
121
|
+
@property
|
122
|
+
def service_names(self) -> list[str]:
|
123
|
+
return list(self.data.keys())
|
124
|
+
|
125
|
+
def _validate_service_names(self):
|
126
|
+
for service in self.service_names:
|
127
|
+
if service not in InferenceServiceLiteral:
|
128
|
+
raise ValueError(f"Invalid service name: {service}")
|
129
|
+
|
130
|
+
def model_to_services(self) -> dict:
|
131
|
+
self._model_to_service = defaultdict(list)
|
132
|
+
for service, models in self.data.items():
|
133
|
+
for model in models:
|
134
|
+
self._model_to_service[model].append(service)
|
@@ -0,0 +1,118 @@
|
|
1
|
+
models_available = {
|
2
|
+
"openai": [
|
3
|
+
"gpt-3.5-turbo-1106",
|
4
|
+
"gpt-4-0125-preview",
|
5
|
+
"gpt-4-turbo-preview",
|
6
|
+
"gpt-3.5-turbo-16k",
|
7
|
+
"gpt-4-1106-preview",
|
8
|
+
"gpt-4-turbo-2024-04-09",
|
9
|
+
"gpt-3.5-turbo-16k-0613",
|
10
|
+
"gpt-4o-2024-05-13",
|
11
|
+
"gpt-4-turbo",
|
12
|
+
"gpt-3.5-turbo-0613",
|
13
|
+
"gpt-4",
|
14
|
+
"gpt-4-0613",
|
15
|
+
"gpt-3.5-turbo-0125",
|
16
|
+
"gpt-3.5-turbo",
|
17
|
+
"gpt-3.5-turbo-instruct",
|
18
|
+
"gpt-3.5-turbo-instruct-0914",
|
19
|
+
"gpt-3.5-turbo-0301",
|
20
|
+
"gpt-4-vision-preview",
|
21
|
+
"gpt-4-1106-vision-preview",
|
22
|
+
"gpt-4o",
|
23
|
+
],
|
24
|
+
"anthropic": [
|
25
|
+
"claude-3-5-sonnet-20240620",
|
26
|
+
"claude-3-opus-20240229",
|
27
|
+
"claude-3-sonnet-20240229",
|
28
|
+
"claude-3-haiku-20240307",
|
29
|
+
],
|
30
|
+
"deep_infra": [
|
31
|
+
"meta-llama/Llama-2-13b-chat-hf",
|
32
|
+
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
33
|
+
"Gryphe/MythoMax-L2-13b-turbo",
|
34
|
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
35
|
+
"Austism/chronos-hermes-13b-v2",
|
36
|
+
"meta-llama/Llama-2-70b-chat-hf",
|
37
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
38
|
+
"meta-llama/Llama-2-7b-chat-hf",
|
39
|
+
"Qwen/Qwen2-72B-Instruct",
|
40
|
+
"HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
41
|
+
"cognitivecomputations/dolphin-2.6-mixtral-8x7b",
|
42
|
+
"bigcode/starcoder2-15b",
|
43
|
+
"microsoft/WizardLM-2-8x22B",
|
44
|
+
"codellama/CodeLlama-70b-Instruct-hf",
|
45
|
+
"Gryphe/MythoMax-L2-13b",
|
46
|
+
"microsoft/WizardLM-2-7B",
|
47
|
+
"01-ai/Yi-34B-Chat",
|
48
|
+
"bigcode/starcoder2-15b-instruct-v0.1",
|
49
|
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
50
|
+
"openchat/openchat-3.6-8b",
|
51
|
+
"meta-llama/Meta-Llama-3-8B-Instruct",
|
52
|
+
"microsoft/Phi-3-medium-4k-instruct",
|
53
|
+
"Phind/Phind-CodeLlama-34B-v2",
|
54
|
+
"google/codegemma-7b-it",
|
55
|
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
56
|
+
"deepinfra/airoboros-70b",
|
57
|
+
"mistralai/Mixtral-8x22B-v0.1",
|
58
|
+
"llava-hf/llava-1.5-7b-hf",
|
59
|
+
"codellama/CodeLlama-34b-Instruct-hf",
|
60
|
+
"google/gemma-1.1-7b-it",
|
61
|
+
"lizpreciatior/lzlv_70b_fp16_hf",
|
62
|
+
"databricks/dbrx-instruct",
|
63
|
+
"nvidia/Nemotron-4-340B-Instruct",
|
64
|
+
"Qwen/Qwen2-7B-Instruct",
|
65
|
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
66
|
+
"openchat/openchat_3.5",
|
67
|
+
],
|
68
|
+
"google": [
|
69
|
+
"gemini-1.0-pro",
|
70
|
+
"gemini-1.0-pro-001",
|
71
|
+
"gemini-1.0-pro-latest",
|
72
|
+
"gemini-1.0-pro-vision-latest",
|
73
|
+
"gemini-1.5-flash",
|
74
|
+
"gemini-1.5-flash-001",
|
75
|
+
"gemini-1.5-flash-001-tuning",
|
76
|
+
"gemini-1.5-flash-002",
|
77
|
+
"gemini-1.5-flash-8b",
|
78
|
+
"gemini-1.5-flash-8b-001",
|
79
|
+
"gemini-1.5-flash-8b-exp-0827",
|
80
|
+
"gemini-1.5-flash-8b-exp-0924",
|
81
|
+
"gemini-1.5-flash-8b-latest",
|
82
|
+
"gemini-1.5-flash-exp-0827",
|
83
|
+
"gemini-1.5-flash-latest",
|
84
|
+
"gemini-1.5-pro",
|
85
|
+
"gemini-1.5-pro-001",
|
86
|
+
"gemini-1.5-pro-002",
|
87
|
+
"gemini-1.5-pro-exp-0801",
|
88
|
+
"gemini-1.5-pro-exp-0827",
|
89
|
+
"gemini-1.5-pro-latest",
|
90
|
+
"gemini-pro",
|
91
|
+
"gemini-pro-vision",
|
92
|
+
],
|
93
|
+
"bedrock": [
|
94
|
+
"amazon.titan-tg1-large",
|
95
|
+
"amazon.titan-text-lite-v1",
|
96
|
+
"amazon.titan-text-express-v1",
|
97
|
+
"anthropic.claude-instant-v1",
|
98
|
+
"anthropic.claude-v2:1",
|
99
|
+
"anthropic.claude-v2",
|
100
|
+
"anthropic.claude-3-sonnet-20240229-v1:0",
|
101
|
+
"anthropic.claude-3-haiku-20240307-v1:0",
|
102
|
+
"anthropic.claude-3-opus-20240229-v1:0",
|
103
|
+
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
104
|
+
"cohere.command-text-v14",
|
105
|
+
"cohere.command-r-v1:0",
|
106
|
+
"cohere.command-r-plus-v1:0",
|
107
|
+
"cohere.command-light-text-v14",
|
108
|
+
"meta.llama3-8b-instruct-v1:0",
|
109
|
+
"meta.llama3-70b-instruct-v1:0",
|
110
|
+
"meta.llama3-1-8b-instruct-v1:0",
|
111
|
+
"meta.llama3-1-70b-instruct-v1:0",
|
112
|
+
"meta.llama3-1-405b-instruct-v1:0",
|
113
|
+
"mistral.mistral-7b-instruct-v0:2",
|
114
|
+
"mistral.mixtral-8x7b-instruct-v0:1",
|
115
|
+
"mistral.mistral-large-2402-v1:0",
|
116
|
+
"mistral.mistral-large-2407-v1:0",
|
117
|
+
],
|
118
|
+
}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
rate_limits = {
|
2
|
+
"openai": {
|
3
|
+
"date": "Tue, 02 Jul 2024 15:25:28 GMT",
|
4
|
+
"content-type": "application/json",
|
5
|
+
"transfer-encoding": "chunked",
|
6
|
+
"connection": "keep-alive",
|
7
|
+
"openai-organization": "user-wmu32omw8ulzzutk6mjhtqgk",
|
8
|
+
"openai-processing-ms": "760",
|
9
|
+
"openai-version": "2020-10-01",
|
10
|
+
"strict-transport-security": "max-age=31536000; includeSubDomains",
|
11
|
+
"x-ratelimit-limit-requests": "5000",
|
12
|
+
"x-ratelimit-limit-tokens": "600000",
|
13
|
+
"x-ratelimit-remaining-requests": "4999",
|
14
|
+
"x-ratelimit-remaining-tokens": "599978",
|
15
|
+
"x-ratelimit-reset-requests": "12ms",
|
16
|
+
"x-ratelimit-reset-tokens": "2ms",
|
17
|
+
"x-request-id": "req_971608f3647f660a0cd6537fbe21f69c",
|
18
|
+
"cf-cache-status": "DYNAMIC",
|
19
|
+
"set-cookie": "__cf_bm=MJfUk.0TXdjtiNkUUqlUO2gaN3wzm0iHsRQRWExy52o-1719933928-1.0.1.1-0xk9gFxy_mD1KzAsKQ_HpL2pdQJ90D4B5frt65xU.c9k9QwD0oTBILqXB0rykXNh04Pm1UB1.H_W9sFJVOcSaw; path=/; expires=Tue, 02-Jul-24 15:55:28 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None, _cfuvid=GbheRct_iw9_I8iLWmt5ZRcLYZ_QVnroCrAt8QMVsUg-1719933928399-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None",
|
20
|
+
"server": "cloudflare",
|
21
|
+
"cf-ray": "89cfa6059bb9b68f-OTP",
|
22
|
+
"content-encoding": "gzip",
|
23
|
+
"alt-svc": 'h3=":443"; ma=86400',
|
24
|
+
}
|
25
|
+
}
|
@@ -0,0 +1,41 @@
|
|
1
|
+
from edsl.inference_services.InferenceServicesCollection import (
|
2
|
+
InferenceServicesCollection,
|
3
|
+
)
|
4
|
+
|
5
|
+
from edsl.inference_services.OpenAIService import OpenAIService
|
6
|
+
from edsl.inference_services.AnthropicService import AnthropicService
|
7
|
+
from edsl.inference_services.DeepInfraService import DeepInfraService
|
8
|
+
from edsl.inference_services.GoogleService import GoogleService
|
9
|
+
from edsl.inference_services.GroqService import GroqService
|
10
|
+
from edsl.inference_services.AwsBedrock import AwsBedrockService
|
11
|
+
from edsl.inference_services.AzureAI import AzureAIService
|
12
|
+
from edsl.inference_services.OllamaService import OllamaService
|
13
|
+
from edsl.inference_services.TestService import TestService
|
14
|
+
from edsl.inference_services.TogetherAIService import TogetherAIService
|
15
|
+
from edsl.inference_services.PerplexityService import PerplexityService
|
16
|
+
|
17
|
+
try:
|
18
|
+
from edsl.inference_services.MistralAIService import MistralAIService
|
19
|
+
|
20
|
+
mistral_available = True
|
21
|
+
except Exception as e:
|
22
|
+
mistral_available = False
|
23
|
+
|
24
|
+
services = [
|
25
|
+
OpenAIService,
|
26
|
+
AnthropicService,
|
27
|
+
DeepInfraService,
|
28
|
+
GoogleService,
|
29
|
+
GroqService,
|
30
|
+
AwsBedrockService,
|
31
|
+
AzureAIService,
|
32
|
+
OllamaService,
|
33
|
+
TestService,
|
34
|
+
TogetherAIService,
|
35
|
+
PerplexityService,
|
36
|
+
]
|
37
|
+
|
38
|
+
if mistral_available:
|
39
|
+
services.append(MistralAIService)
|
40
|
+
|
41
|
+
default = InferenceServicesCollection(services)
|
@@ -0,0 +1,10 @@
|
|
1
|
+
from edsl.inference_services.registry import default
|
2
|
+
|
3
|
+
|
4
|
+
def write_available():
|
5
|
+
d = {}
|
6
|
+
for service in default.services:
|
7
|
+
d[service._inference_service_] = service.available()
|
8
|
+
|
9
|
+
with open("models_available_cache.py", "w") as f:
|
10
|
+
f.write(f"models_available = {d}")
|