edsl 0.1.15__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +348 -38
- edsl/BaseDiff.py +260 -0
- edsl/TemplateLoader.py +24 -0
- edsl/__init__.py +45 -10
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +842 -144
- edsl/agents/AgentList.py +521 -25
- edsl/agents/Invigilator.py +250 -374
- edsl/agents/InvigilatorBase.py +257 -0
- edsl/agents/PromptConstructor.py +272 -0
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/descriptors.py +43 -13
- edsl/agents/prompt_helpers.py +129 -0
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -0
- edsl/auto/StageBase.py +243 -0
- edsl/auto/StageGenerateSurvey.py +178 -0
- edsl/auto/StageLabelQuestions.py +125 -0
- edsl/auto/StagePersona.py +61 -0
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -0
- edsl/auto/StagePersonaDimensionValues.py +74 -0
- edsl/auto/StagePersonaDimensions.py +69 -0
- edsl/auto/StageQuestions.py +74 -0
- edsl/auto/SurveyCreatorPipeline.py +21 -0
- edsl/auto/utilities.py +218 -0
- edsl/base/Base.py +279 -0
- edsl/config.py +115 -113
- edsl/conversation/Conversation.py +290 -0
- edsl/conversation/car_buying.py +59 -0
- edsl/conversation/chips.py +95 -0
- edsl/conversation/mug_negotiation.py +81 -0
- edsl/conversation/next_speaker_utilities.py +93 -0
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -0
- edsl/coop/__init__.py +1 -0
- edsl/coop/coop.py +1029 -134
- edsl/coop/utils.py +131 -0
- edsl/data/Cache.py +560 -89
- edsl/data/CacheEntry.py +230 -0
- edsl/data/CacheHandler.py +168 -0
- edsl/data/RemoteCacheSync.py +186 -0
- edsl/data/SQLiteDict.py +292 -0
- edsl/data/__init__.py +5 -3
- edsl/data/orm.py +6 -33
- edsl/data_transfer_models.py +74 -27
- edsl/enums.py +165 -8
- edsl/exceptions/BaseException.py +21 -0
- edsl/exceptions/__init__.py +52 -46
- edsl/exceptions/agents.py +33 -15
- edsl/exceptions/cache.py +5 -0
- edsl/exceptions/coop.py +8 -0
- edsl/exceptions/general.py +34 -0
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +15 -0
- edsl/exceptions/language_models.py +46 -1
- edsl/exceptions/questions.py +80 -5
- edsl/exceptions/results.py +16 -5
- edsl/exceptions/scenarios.py +29 -0
- edsl/exceptions/surveys.py +13 -10
- edsl/inference_services/AnthropicService.py +106 -0
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -0
- edsl/inference_services/AzureAI.py +215 -0
- edsl/inference_services/DeepInfraService.py +18 -0
- edsl/inference_services/GoogleService.py +143 -0
- edsl/inference_services/GroqService.py +20 -0
- edsl/inference_services/InferenceServiceABC.py +80 -0
- edsl/inference_services/InferenceServicesCollection.py +138 -0
- edsl/inference_services/MistralAIService.py +120 -0
- edsl/inference_services/OllamaService.py +18 -0
- edsl/inference_services/OpenAIService.py +236 -0
- edsl/inference_services/PerplexityService.py +160 -0
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -0
- edsl/inference_services/TogetherAIService.py +172 -0
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -0
- edsl/inference_services/rate_limits_cache.py +25 -0
- edsl/inference_services/registry.py +41 -0
- edsl/inference_services/write_available.py +10 -0
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +21 -20
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +684 -206
- edsl/jobs/JobsChecks.py +172 -0
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -0
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -0
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -0
- edsl/jobs/buckets/ModelBuckets.py +65 -0
- edsl/jobs/buckets/TokenBucket.py +283 -0
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +392 -0
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -0
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -0
- edsl/jobs/interviews/InterviewStatistic.py +63 -0
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -0
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -0
- edsl/jobs/interviews/InterviewStatusLog.py +92 -0
- edsl/jobs/interviews/ReportErrors.py +66 -0
- edsl/jobs/interviews/interview_status_enum.py +9 -0
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -110
- edsl/jobs/runners/JobsRunnerStatus.py +298 -0
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -0
- edsl/jobs/tasks/TaskCreators.py +64 -0
- edsl/jobs/tasks/TaskHistory.py +470 -0
- edsl/jobs/tasks/TaskStatusLog.py +23 -0
- edsl/jobs/tasks/task_status_enum.py +161 -0
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -0
- edsl/jobs/tokens/TokenUsage.py +34 -0
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +507 -386
- edsl/language_models/ModelList.py +164 -0
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -0
- edsl/language_models/__init__.py +1 -8
- edsl/language_models/fake_openai_call.py +15 -0
- edsl/language_models/fake_openai_service.py +61 -0
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +109 -41
- edsl/language_models/utilities.py +65 -0
- edsl/notebooks/Notebook.py +263 -0
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -0
- edsl/prompts/Prompt.py +222 -93
- edsl/prompts/__init__.py +1 -1
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -0
- edsl/questions/QuestionBasePromptsMixin.py +221 -0
- edsl/questions/QuestionBudget.py +164 -67
- edsl/questions/QuestionCheckBox.py +281 -62
- edsl/questions/QuestionDict.py +343 -0
- edsl/questions/QuestionExtract.py +136 -50
- edsl/questions/QuestionFreeText.py +79 -55
- edsl/questions/QuestionFunctional.py +138 -41
- edsl/questions/QuestionList.py +184 -57
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +293 -69
- edsl/questions/QuestionNumerical.py +109 -56
- edsl/questions/QuestionRank.py +244 -49
- edsl/questions/Quick.py +41 -0
- edsl/questions/SimpleAskMixin.py +74 -0
- edsl/questions/__init__.py +9 -6
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +153 -38
- edsl/questions/compose_questions.py +13 -7
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -0
- edsl/questions/derived/QuestionLikertFive.py +28 -26
- edsl/questions/derived/QuestionLinearScale.py +41 -28
- edsl/questions/derived/QuestionTopK.py +34 -26
- edsl/questions/derived/QuestionYesNo.py +40 -27
- edsl/questions/descriptors.py +228 -74
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -0
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -0
- edsl/questions/prompt_templates/question_extract.jinja +11 -0
- edsl/questions/prompt_templates/question_free_text.jinja +3 -0
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -0
- edsl/questions/prompt_templates/question_list.jinja +17 -0
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -0
- edsl/questions/prompt_templates/question_numerical.jinja +37 -0
- edsl/questions/question_base_gen_mixin.py +168 -0
- edsl/questions/question_registry.py +130 -46
- edsl/questions/register_questions_meta.py +71 -0
- edsl/questions/response_validator_abc.py +188 -0
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +5 -2
- edsl/questions/templates/__init__.py +0 -0
- edsl/questions/templates/budget/__init__.py +0 -0
- edsl/questions/templates/budget/answering_instructions.jinja +7 -0
- edsl/questions/templates/budget/question_presentation.jinja +7 -0
- edsl/questions/templates/checkbox/__init__.py +0 -0
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -0
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -0
- edsl/questions/templates/dict/__init__.py +0 -0
- edsl/questions/templates/dict/answering_instructions.jinja +21 -0
- edsl/questions/templates/dict/question_presentation.jinja +1 -0
- edsl/questions/templates/extract/__init__.py +0 -0
- edsl/questions/templates/extract/answering_instructions.jinja +7 -0
- edsl/questions/templates/extract/question_presentation.jinja +1 -0
- edsl/questions/templates/free_text/__init__.py +0 -0
- edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
- edsl/questions/templates/free_text/question_presentation.jinja +1 -0
- edsl/questions/templates/likert_five/__init__.py +0 -0
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -0
- edsl/questions/templates/likert_five/question_presentation.jinja +12 -0
- edsl/questions/templates/linear_scale/__init__.py +0 -0
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -0
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -0
- edsl/questions/templates/list/__init__.py +0 -0
- edsl/questions/templates/list/answering_instructions.jinja +4 -0
- edsl/questions/templates/list/question_presentation.jinja +5 -0
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/__init__.py +0 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -0
- edsl/questions/templates/multiple_choice/html.jinja +0 -0
- edsl/questions/templates/multiple_choice/question_presentation.jinja +12 -0
- edsl/questions/templates/numerical/__init__.py +0 -0
- edsl/questions/templates/numerical/answering_instructions.jinja +7 -0
- edsl/questions/templates/numerical/question_presentation.jinja +7 -0
- edsl/questions/templates/rank/__init__.py +0 -0
- edsl/questions/templates/rank/answering_instructions.jinja +11 -0
- edsl/questions/templates/rank/question_presentation.jinja +15 -0
- edsl/questions/templates/top_k/__init__.py +0 -0
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -0
- edsl/questions/templates/top_k/question_presentation.jinja +22 -0
- edsl/questions/templates/yes_no/__init__.py +0 -0
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -0
- edsl/questions/templates/yes_no/question_presentation.jinja +12 -0
- edsl/results/CSSParameterizer.py +108 -0
- edsl/results/Dataset.py +550 -19
- edsl/results/DatasetExportMixin.py +594 -0
- edsl/results/DatasetTree.py +295 -0
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +477 -173
- edsl/results/Results.py +987 -269
- edsl/results/ResultsExportMixin.py +28 -125
- edsl/results/ResultsGGMixin.py +83 -15
- edsl/results/TableDisplay.py +125 -0
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +1 -1
- edsl/results/file_exports.py +252 -0
- edsl/results/results_fetch_mixin.py +33 -0
- edsl/results/results_selector.py +145 -0
- edsl/results/results_tools_mixin.py +98 -0
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +78 -0
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -0
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +543 -0
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +431 -62
- edsl/scenarios/ScenarioHtmlMixin.py +65 -0
- edsl/scenarios/ScenarioList.py +1415 -45
- edsl/scenarios/ScenarioListExportMixin.py +45 -0
- edsl/scenarios/ScenarioListPdfMixin.py +239 -0
- edsl/scenarios/__init__.py +2 -0
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +49 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/scenario_join.py +131 -0
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -0
- edsl/study/ObjectEntry.py +173 -0
- edsl/study/ProofOfWork.py +113 -0
- edsl/study/SnapShot.py +80 -0
- edsl/study/Study.py +521 -0
- edsl/study/__init__.py +4 -0
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +92 -11
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +9 -4
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +156 -35
- edsl/surveys/Rule.py +221 -74
- edsl/surveys/RuleCollection.py +241 -61
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1079 -339
- edsl/surveys/SurveyCSS.py +273 -0
- edsl/surveys/SurveyExportMixin.py +235 -40
- edsl/surveys/SurveyFlowVisualization.py +181 -0
- edsl/surveys/SurveyQualtricsImport.py +284 -0
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +4 -2
- edsl/surveys/base.py +19 -3
- edsl/surveys/descriptors.py +17 -6
- edsl/surveys/instructions/ChangeInstruction.py +48 -0
- edsl/surveys/instructions/Instruction.py +56 -0
- edsl/surveys/instructions/InstructionCollection.py +82 -0
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/templates/error_reporting/base.html +24 -0
- edsl/templates/error_reporting/exceptions_by_model.html +35 -0
- edsl/templates/error_reporting/exceptions_by_question_name.html +17 -0
- edsl/templates/error_reporting/exceptions_by_type.html +17 -0
- edsl/templates/error_reporting/interview_details.html +116 -0
- edsl/templates/error_reporting/interviews.html +19 -0
- edsl/templates/error_reporting/overview.html +5 -0
- edsl/templates/error_reporting/performance_plot.html +2 -0
- edsl/templates/error_reporting/report.css +74 -0
- edsl/templates/error_reporting/report.html +118 -0
- edsl/templates/error_reporting/report.js +25 -0
- edsl/tools/__init__.py +1 -0
- edsl/tools/clusters.py +192 -0
- edsl/tools/embeddings.py +27 -0
- edsl/tools/embeddings_plotting.py +118 -0
- edsl/tools/plotting.py +112 -0
- edsl/tools/summarize.py +18 -0
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +5 -0
- edsl/utilities/__init__.py +21 -20
- edsl/utilities/ast_utilities.py +3 -0
- edsl/utilities/data/Registry.py +2 -0
- edsl/utilities/decorators.py +41 -0
- edsl/utilities/gcp_bucket/__init__.py +0 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -0
- edsl/utilities/interface.py +310 -60
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -0
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -0
- edsl/utilities/restricted_python.py +70 -0
- edsl/utilities/utilities.py +203 -13
- edsl-0.1.40.dist-info/METADATA +111 -0
- edsl-0.1.40.dist-info/RECORD +362 -0
- {edsl-0.1.15.dist-info → edsl-0.1.40.dist-info}/WHEEL +1 -1
- edsl/agents/AgentListExportMixin.py +0 -24
- edsl/coop/old.py +0 -31
- edsl/data/Database.py +0 -141
- edsl/data/crud.py +0 -121
- edsl/jobs/Interview.py +0 -435
- edsl/jobs/JobsRunner.py +0 -63
- edsl/jobs/JobsRunnerStatusMixin.py +0 -115
- edsl/jobs/base.py +0 -47
- edsl/jobs/buckets.py +0 -178
- edsl/jobs/runners/JobsRunnerDryRun.py +0 -19
- edsl/jobs/runners/JobsRunnerStreaming.py +0 -54
- edsl/jobs/task_management.py +0 -215
- edsl/jobs/token_tracking.py +0 -78
- edsl/language_models/DeepInfra.py +0 -69
- edsl/language_models/OpenAI.py +0 -98
- edsl/language_models/model_interfaces/GeminiPro.py +0 -66
- edsl/language_models/model_interfaces/LanguageModelOpenAIFour.py +0 -8
- edsl/language_models/model_interfaces/LanguageModelOpenAIThreeFiveTurbo.py +0 -8
- edsl/language_models/model_interfaces/LlamaTwo13B.py +0 -21
- edsl/language_models/model_interfaces/LlamaTwo70B.py +0 -21
- edsl/language_models/model_interfaces/Mixtral8x7B.py +0 -24
- edsl/language_models/registry.py +0 -81
- edsl/language_models/schemas.py +0 -15
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/prompts/QuestionInstructionsBase.py +0 -6
- edsl/prompts/library/agent_instructions.py +0 -29
- edsl/prompts/library/agent_persona.py +0 -17
- edsl/prompts/library/question_budget.py +0 -26
- edsl/prompts/library/question_checkbox.py +0 -32
- edsl/prompts/library/question_extract.py +0 -19
- edsl/prompts/library/question_freetext.py +0 -14
- edsl/prompts/library/question_linear_scale.py +0 -20
- edsl/prompts/library/question_list.py +0 -22
- edsl/prompts/library/question_multiple_choice.py +0 -44
- edsl/prompts/library/question_numerical.py +0 -31
- edsl/prompts/library/question_rank.py +0 -21
- edsl/prompts/prompt_config.py +0 -33
- edsl/prompts/registry.py +0 -185
- edsl/questions/Question.py +0 -240
- edsl/report/InputOutputDataTypes.py +0 -134
- edsl/report/RegressionMixin.py +0 -28
- edsl/report/ReportOutputs.py +0 -1228
- edsl/report/ResultsFetchMixin.py +0 -106
- edsl/report/ResultsOutputMixin.py +0 -14
- edsl/report/demo.ipynb +0 -645
- edsl/results/ResultsDBMixin.py +0 -184
- edsl/surveys/SurveyFlowVisualizationMixin.py +0 -92
- edsl/trackers/Tracker.py +0 -91
- edsl/trackers/TrackerAPI.py +0 -196
- edsl/trackers/TrackerTasks.py +0 -70
- edsl/utilities/pastebin.py +0 -141
- edsl-0.1.15.dist-info/METADATA +0 -69
- edsl-0.1.15.dist-info/RECORD +0 -142
- /edsl/{language_models/model_interfaces → inference_services}/__init__.py +0 -0
- /edsl/{report/__init__.py → jobs/runners/JobsRunnerStatusData.py} +0 -0
- /edsl/{trackers/__init__.py → language_models/ServiceDataSources.py} +0 -0
- {edsl-0.1.15.dist-info → edsl-0.1.40.dist-info}/LICENSE +0 -0
@@ -0,0 +1,164 @@
|
|
1
|
+
from typing import Optional, List, TYPE_CHECKING
|
2
|
+
from collections import UserList
|
3
|
+
|
4
|
+
from edsl.Base import Base
|
5
|
+
from edsl.language_models.model import Model
|
6
|
+
|
7
|
+
#
|
8
|
+
from edsl.utilities.remove_edsl_version import remove_edsl_version
|
9
|
+
from edsl.utilities.is_valid_variable_name import is_valid_variable_name
|
10
|
+
|
11
|
+
if TYPE_CHECKING:
|
12
|
+
from edsl.inference_services.data_structures import AvailableModels
|
13
|
+
from edsl.language_models import LanguageModel
|
14
|
+
|
15
|
+
|
16
|
+
class ModelList(Base, UserList):
|
17
|
+
__documentation__ = """https://docs.expectedparrot.com/en/latest/language_models.html#module-edsl.language_models.ModelList"""
|
18
|
+
|
19
|
+
def __init__(self, data: Optional["LanguageModel"] = None):
|
20
|
+
"""Initialize the ScenarioList class.
|
21
|
+
|
22
|
+
>>> from edsl import Model
|
23
|
+
>>> m = ModelList(Model.available())
|
24
|
+
|
25
|
+
"""
|
26
|
+
if data is not None:
|
27
|
+
super().__init__(data)
|
28
|
+
else:
|
29
|
+
super().__init__([])
|
30
|
+
|
31
|
+
@property
|
32
|
+
def names(self):
|
33
|
+
"""
|
34
|
+
|
35
|
+
>>> ModelList.example().names
|
36
|
+
{'...'}
|
37
|
+
"""
|
38
|
+
return set([model.model for model in self])
|
39
|
+
|
40
|
+
def __repr__(self):
|
41
|
+
return f"ModelList({super().__repr__()})"
|
42
|
+
|
43
|
+
def _summary(self):
|
44
|
+
return {"models": len(self)}
|
45
|
+
|
46
|
+
def __hash__(self):
|
47
|
+
"""Return a hash of the ModelList. This is used for comparison of ModelLists.
|
48
|
+
|
49
|
+
>>> isinstance(hash(Model()), int)
|
50
|
+
True
|
51
|
+
|
52
|
+
"""
|
53
|
+
from edsl.utilities.utilities import dict_hash
|
54
|
+
|
55
|
+
return dict_hash(self.to_dict(sort=True, add_edsl_version=False))
|
56
|
+
|
57
|
+
def to_scenario_list(self):
|
58
|
+
from edsl.scenarios.ScenarioList import ScenarioList
|
59
|
+
from edsl.scenarios.Scenario import Scenario
|
60
|
+
|
61
|
+
sl = ScenarioList()
|
62
|
+
for model in self:
|
63
|
+
d = {"model": model.model}
|
64
|
+
d.update(model.parameters)
|
65
|
+
sl.append(Scenario(d))
|
66
|
+
return sl
|
67
|
+
|
68
|
+
def tree(self, node_list: Optional[List[str]] = None):
|
69
|
+
return self.to_scenario_list().tree(node_list)
|
70
|
+
|
71
|
+
def table(
|
72
|
+
self,
|
73
|
+
*fields,
|
74
|
+
tablefmt: Optional[str] = None,
|
75
|
+
pretty_labels: Optional[dict] = None,
|
76
|
+
):
|
77
|
+
"""
|
78
|
+
>>> ModelList.example().table('model')
|
79
|
+
model
|
80
|
+
-------
|
81
|
+
gpt-4o
|
82
|
+
gpt-4o
|
83
|
+
gpt-4o
|
84
|
+
"""
|
85
|
+
return (
|
86
|
+
self.to_scenario_list()
|
87
|
+
.to_dataset()
|
88
|
+
.table(*fields, tablefmt=tablefmt, pretty_labels=pretty_labels)
|
89
|
+
)
|
90
|
+
|
91
|
+
def to_list(self) -> list:
|
92
|
+
return self.to_scenario_list().to_list()
|
93
|
+
|
94
|
+
def to_dict(self, sort=False, add_edsl_version=True):
|
95
|
+
if sort:
|
96
|
+
model_list = sorted([model for model in self], key=lambda x: hash(x))
|
97
|
+
d = {
|
98
|
+
"models": [
|
99
|
+
model.to_dict(add_edsl_version=add_edsl_version)
|
100
|
+
for model in model_list
|
101
|
+
]
|
102
|
+
}
|
103
|
+
else:
|
104
|
+
d = {
|
105
|
+
"models": [
|
106
|
+
model.to_dict(add_edsl_version=add_edsl_version) for model in self
|
107
|
+
]
|
108
|
+
}
|
109
|
+
if add_edsl_version:
|
110
|
+
from edsl import __version__
|
111
|
+
|
112
|
+
d["edsl_version"] = __version__
|
113
|
+
d["edsl_class_name"] = "ModelList"
|
114
|
+
|
115
|
+
return d
|
116
|
+
|
117
|
+
@classmethod
|
118
|
+
def from_names(self, *args, **kwargs):
|
119
|
+
"""A a model list from a list of names"""
|
120
|
+
if len(args) == 1 and isinstance(args[0], list):
|
121
|
+
args = args[0]
|
122
|
+
return ModelList([Model(model_name, **kwargs) for model_name in args])
|
123
|
+
|
124
|
+
@classmethod
|
125
|
+
def from_available_models(self, available_models_list: "AvailableModels"):
|
126
|
+
"""Create a ModelList from an AvailableModels object"""
|
127
|
+
return ModelList(
|
128
|
+
[
|
129
|
+
Model(model.model_name, service_name=model.service_name)
|
130
|
+
for model in available_models_list
|
131
|
+
]
|
132
|
+
)
|
133
|
+
|
134
|
+
@classmethod
|
135
|
+
@remove_edsl_version
|
136
|
+
def from_dict(cls, data):
|
137
|
+
"""
|
138
|
+
Create a ModelList from a dictionary.
|
139
|
+
|
140
|
+
>>> newm = ModelList.from_dict(ModelList.example().to_dict())
|
141
|
+
>>> assert ModelList.example() == newm
|
142
|
+
"""
|
143
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
144
|
+
|
145
|
+
return cls(data=[LanguageModel.from_dict(model) for model in data["models"]])
|
146
|
+
|
147
|
+
def code(self):
|
148
|
+
pass
|
149
|
+
|
150
|
+
@classmethod
|
151
|
+
def example(cls, randomize: bool = False) -> "ModelList":
|
152
|
+
"""
|
153
|
+
Returns an example ModelList instance.
|
154
|
+
|
155
|
+
:param randomize: If True, uses Model's randomize method.
|
156
|
+
"""
|
157
|
+
|
158
|
+
return cls([Model.example(randomize) for _ in range(3)])
|
159
|
+
|
160
|
+
|
161
|
+
if __name__ == "__main__":
|
162
|
+
import doctest
|
163
|
+
|
164
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
@@ -0,0 +1,127 @@
|
|
1
|
+
from typing import Dict, Tuple, Optional, Union
|
2
|
+
|
3
|
+
|
4
|
+
class PriceManager:
|
5
|
+
_instance = None
|
6
|
+
_price_lookup: Dict[Tuple[str, str], Dict] = {}
|
7
|
+
_is_initialized = False
|
8
|
+
|
9
|
+
def __new__(cls):
|
10
|
+
if cls._instance is None:
|
11
|
+
cls._instance = super(PriceManager, cls).__new__(cls)
|
12
|
+
return cls._instance
|
13
|
+
|
14
|
+
def __init__(self):
|
15
|
+
# Only initialize once, even if __init__ is called multiple times
|
16
|
+
if not self._is_initialized:
|
17
|
+
self._is_initialized = True
|
18
|
+
self.refresh_prices()
|
19
|
+
|
20
|
+
def refresh_prices(self) -> None:
|
21
|
+
"""
|
22
|
+
Fetch fresh prices from the Coop service and update the internal price lookup.
|
23
|
+
|
24
|
+
"""
|
25
|
+
from edsl.coop import Coop
|
26
|
+
|
27
|
+
c = Coop()
|
28
|
+
try:
|
29
|
+
self._price_lookup = c.fetch_prices()
|
30
|
+
except Exception as e:
|
31
|
+
print(f"Error fetching prices: {str(e)}")
|
32
|
+
|
33
|
+
def get_price(self, inference_service: str, model: str) -> Optional[Dict]:
|
34
|
+
"""
|
35
|
+
Get the price information for a specific service and model combination.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
inference_service (str): The name of the inference service
|
39
|
+
model (str): The model identifier
|
40
|
+
|
41
|
+
Returns:
|
42
|
+
Optional[Dict]: Price information if found, None otherwise
|
43
|
+
"""
|
44
|
+
key = (inference_service, model)
|
45
|
+
return self._price_lookup.get(key)
|
46
|
+
|
47
|
+
def get_all_prices(self) -> Dict[Tuple[str, str], Dict]:
|
48
|
+
"""
|
49
|
+
Get the complete price lookup dictionary.
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
Dict[Tuple[str, str], Dict]: The complete price lookup dictionary
|
53
|
+
"""
|
54
|
+
return self._price_lookup.copy()
|
55
|
+
|
56
|
+
def calculate_cost(
|
57
|
+
self,
|
58
|
+
inference_service: str,
|
59
|
+
model: str,
|
60
|
+
usage: Dict[str, Union[str, int]],
|
61
|
+
input_token_name: str,
|
62
|
+
output_token_name: str,
|
63
|
+
) -> Union[float, str]:
|
64
|
+
"""
|
65
|
+
Calculate the total cost for a model usage based on input and output tokens.
|
66
|
+
|
67
|
+
Args:
|
68
|
+
inference_service (str): The inference service identifier
|
69
|
+
model (str): The model identifier
|
70
|
+
usage (Dict[str, Union[str, int]]): Dictionary containing token usage information
|
71
|
+
input_token_name (str): Key name for input tokens in the usage dict
|
72
|
+
output_token_name (str): Key name for output tokens in the usage dict
|
73
|
+
|
74
|
+
Returns:
|
75
|
+
Union[float, str]: Total cost if calculation successful, error message string if not
|
76
|
+
"""
|
77
|
+
relevant_prices = self.get_price(inference_service, model)
|
78
|
+
if relevant_prices is None:
|
79
|
+
return f"Could not find price for model {model} in the price lookup."
|
80
|
+
|
81
|
+
# Extract token counts
|
82
|
+
try:
|
83
|
+
input_tokens = int(usage[input_token_name])
|
84
|
+
output_tokens = int(usage[output_token_name])
|
85
|
+
except Exception as e:
|
86
|
+
return f"Could not fetch tokens from model response: {e}"
|
87
|
+
|
88
|
+
# Extract price information
|
89
|
+
try:
|
90
|
+
inverse_output_price = relevant_prices["output"]["one_usd_buys"]
|
91
|
+
inverse_input_price = relevant_prices["input"]["one_usd_buys"]
|
92
|
+
except Exception as e:
|
93
|
+
if "output" not in relevant_prices:
|
94
|
+
return f"Could not fetch prices from {relevant_prices} - {e}; Missing 'output' key."
|
95
|
+
if "input" not in relevant_prices:
|
96
|
+
return f"Could not fetch prices from {relevant_prices} - {e}; Missing 'input' key."
|
97
|
+
return f"Could not fetch prices from {relevant_prices} - {e}"
|
98
|
+
|
99
|
+
# Calculate input cost
|
100
|
+
if inverse_input_price == "infinity":
|
101
|
+
input_cost = 0
|
102
|
+
else:
|
103
|
+
try:
|
104
|
+
input_cost = input_tokens / float(inverse_input_price)
|
105
|
+
except Exception as e:
|
106
|
+
return f"Could not compute input price - {e}."
|
107
|
+
|
108
|
+
# Calculate output cost
|
109
|
+
if inverse_output_price == "infinity":
|
110
|
+
output_cost = 0
|
111
|
+
else:
|
112
|
+
try:
|
113
|
+
output_cost = output_tokens / float(inverse_output_price)
|
114
|
+
except Exception as e:
|
115
|
+
return f"Could not compute output price - {e}"
|
116
|
+
|
117
|
+
return input_cost + output_cost
|
118
|
+
|
119
|
+
@property
|
120
|
+
def is_initialized(self) -> bool:
|
121
|
+
"""
|
122
|
+
Check if the PriceManager has been initialized.
|
123
|
+
|
124
|
+
Returns:
|
125
|
+
bool: True if initialized, False otherwise
|
126
|
+
"""
|
127
|
+
return self._is_initialized
|
@@ -0,0 +1,106 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Optional, Any, List
|
3
|
+
from edsl.exceptions.language_models import LanguageModelBadResponseError
|
4
|
+
|
5
|
+
from json_repair import repair_json
|
6
|
+
|
7
|
+
|
8
|
+
def _extract_item_from_raw_response(data, sequence):
|
9
|
+
if isinstance(data, str):
|
10
|
+
try:
|
11
|
+
data = json.loads(data)
|
12
|
+
except json.JSONDecodeError as e:
|
13
|
+
return data
|
14
|
+
current_data = data
|
15
|
+
for i, key in enumerate(sequence):
|
16
|
+
try:
|
17
|
+
if isinstance(current_data, (list, tuple)):
|
18
|
+
if not isinstance(key, int):
|
19
|
+
raise TypeError(
|
20
|
+
f"Expected integer index for sequence at position {i}, got {type(key).__name__}"
|
21
|
+
)
|
22
|
+
if key < 0 or key >= len(current_data):
|
23
|
+
raise IndexError(
|
24
|
+
f"Index {key} out of range for sequence of length {len(current_data)} at position {i}"
|
25
|
+
)
|
26
|
+
elif isinstance(current_data, dict):
|
27
|
+
if key not in current_data:
|
28
|
+
raise KeyError(
|
29
|
+
f"Key '{key}' not found in dictionary at position {i}"
|
30
|
+
)
|
31
|
+
else:
|
32
|
+
raise TypeError(
|
33
|
+
f"Cannot index into {type(current_data).__name__} at position {i}. Full response is: {data} of type {type(data)}. Key sequence is: {sequence}"
|
34
|
+
)
|
35
|
+
|
36
|
+
current_data = current_data[key]
|
37
|
+
except Exception as e:
|
38
|
+
path = " -> ".join(map(str, sequence[: i + 1]))
|
39
|
+
if "error" in data:
|
40
|
+
msg = data["error"]
|
41
|
+
else:
|
42
|
+
msg = f"Error accessing path: {path}. {str(e)}. Full response is: '{data}'"
|
43
|
+
raise LanguageModelBadResponseError(message=msg, response_json=data)
|
44
|
+
if isinstance(current_data, str):
|
45
|
+
return current_data.strip()
|
46
|
+
else:
|
47
|
+
return current_data
|
48
|
+
|
49
|
+
|
50
|
+
class RawResponseHandler:
|
51
|
+
"""Class to handle raw responses from language models."""
|
52
|
+
|
53
|
+
def __init__(self, key_sequence: list, usage_sequence: Optional[list] = None):
|
54
|
+
self.key_sequence = key_sequence
|
55
|
+
self.usage_sequence = usage_sequence
|
56
|
+
|
57
|
+
def get_generated_token_string(self, raw_response):
|
58
|
+
return _extract_item_from_raw_response(raw_response, self.key_sequence)
|
59
|
+
|
60
|
+
def get_usage_dict(self, raw_response):
|
61
|
+
if self.usage_sequence is None:
|
62
|
+
return {}
|
63
|
+
return _extract_item_from_raw_response(raw_response, self.usage_sequence)
|
64
|
+
|
65
|
+
def parse_response(self, raw_response: dict[str, Any]) -> "EDSLOutput":
|
66
|
+
"""Parses the API response and returns the response text."""
|
67
|
+
|
68
|
+
from edsl.data_transfer_models import EDSLOutput
|
69
|
+
|
70
|
+
generated_token_string = self.get_generated_token_string(raw_response)
|
71
|
+
last_newline = generated_token_string.rfind("\n")
|
72
|
+
|
73
|
+
if last_newline == -1:
|
74
|
+
# There is no comment
|
75
|
+
edsl_dict = {
|
76
|
+
"answer": self.convert_answer(generated_token_string),
|
77
|
+
"generated_tokens": generated_token_string,
|
78
|
+
"comment": None,
|
79
|
+
}
|
80
|
+
else:
|
81
|
+
edsl_dict = {
|
82
|
+
"answer": self.convert_answer(generated_token_string[:last_newline]),
|
83
|
+
"comment": generated_token_string[last_newline + 1 :].strip(),
|
84
|
+
"generated_tokens": generated_token_string,
|
85
|
+
}
|
86
|
+
return EDSLOutput(**edsl_dict)
|
87
|
+
|
88
|
+
@staticmethod
|
89
|
+
def convert_answer(response_part):
|
90
|
+
import json
|
91
|
+
|
92
|
+
response_part = response_part.strip()
|
93
|
+
|
94
|
+
if response_part == "None":
|
95
|
+
return None
|
96
|
+
|
97
|
+
repaired = repair_json(response_part)
|
98
|
+
if repaired == '""':
|
99
|
+
# it was a literal string
|
100
|
+
return response_part
|
101
|
+
|
102
|
+
try:
|
103
|
+
return json.loads(repaired)
|
104
|
+
except json.JSONDecodeError as j:
|
105
|
+
# last resort
|
106
|
+
return response_part
|
@@ -0,0 +1,184 @@
|
|
1
|
+
from abc import ABC, ABCMeta
|
2
|
+
from typing import Any, List, Callable
|
3
|
+
import inspect
|
4
|
+
from typing import get_type_hints
|
5
|
+
from edsl.exceptions.language_models import LanguageModelAttributeTypeError
|
6
|
+
from edsl.enums import InferenceServiceType
|
7
|
+
|
8
|
+
|
9
|
+
class RegisterLanguageModelsMeta(ABCMeta):
|
10
|
+
"""Metaclass to register output elements in a registry i.e., those that have a parent."""
|
11
|
+
|
12
|
+
_registry = {} # Initialize the registry as a dictionary
|
13
|
+
REQUIRED_CLASS_ATTRIBUTES = ["_model_", "_parameters_", "_inference_service_"]
|
14
|
+
|
15
|
+
def __init__(cls, name, bases, dct):
|
16
|
+
"""Register the class in the registry if it has a _model_ attribute."""
|
17
|
+
super(RegisterLanguageModelsMeta, cls).__init__(name, bases, dct)
|
18
|
+
# if name != "LanguageModel":
|
19
|
+
if (model_name := getattr(cls, "_model_", None)) is not None:
|
20
|
+
RegisterLanguageModelsMeta.check_required_class_variables(
|
21
|
+
cls, RegisterLanguageModelsMeta.REQUIRED_CLASS_ATTRIBUTES
|
22
|
+
)
|
23
|
+
|
24
|
+
## Check that model name is valid
|
25
|
+
# if not LanguageModelType.is_value_valid(model_name):
|
26
|
+
# acceptable_values = [item.value for item in LanguageModelType]
|
27
|
+
# raise LanguageModelAttributeTypeError(
|
28
|
+
# f"""A LanguageModel's model must be one of {LanguageModelType} values, which are
|
29
|
+
# {acceptable_values}. You passed {model_name}."""
|
30
|
+
# )
|
31
|
+
|
32
|
+
if not InferenceServiceType.is_value_valid(
|
33
|
+
inference_service := getattr(cls, "_inference_service_", None)
|
34
|
+
):
|
35
|
+
acceptable_values = [item.value for item in InferenceServiceType]
|
36
|
+
raise LanguageModelAttributeTypeError(
|
37
|
+
f"""A LanguageModel's model must have an _inference_service_ value from
|
38
|
+
{acceptable_values}. You passed {inference_service}."""
|
39
|
+
)
|
40
|
+
|
41
|
+
# LanguageModel children have to implement the async_execute_model_call method
|
42
|
+
RegisterLanguageModelsMeta.verify_method(
|
43
|
+
candidate_class=cls,
|
44
|
+
method_name="async_execute_model_call",
|
45
|
+
expected_return_type=dict[str, Any],
|
46
|
+
required_parameters=[("user_prompt", str), ("system_prompt", str)],
|
47
|
+
must_be_async=True,
|
48
|
+
)
|
49
|
+
# LanguageModel children have to implement the parse_response method
|
50
|
+
RegisterLanguageModelsMeta._registry[model_name] = cls
|
51
|
+
|
52
|
+
@classmethod
|
53
|
+
def get_registered_classes(cls):
|
54
|
+
"""Return the registry."""
|
55
|
+
return cls._registry
|
56
|
+
|
57
|
+
@staticmethod
|
58
|
+
def check_required_class_variables(
|
59
|
+
candidate_class: "LanguageModel", required_attributes: List[str] = None
|
60
|
+
):
|
61
|
+
"""Check if a class has the required attributes.
|
62
|
+
|
63
|
+
>>> class M:
|
64
|
+
... _model_ = "m"
|
65
|
+
... _parameters_ = {}
|
66
|
+
>>> RegisterLanguageModelsMeta.check_required_class_variables(M, ["_model_", "_parameters_"])
|
67
|
+
>>> class M2:
|
68
|
+
... _model_ = "m"
|
69
|
+
>>> RegisterLanguageModelsMeta.check_required_class_variables(M2, ["_model_", "_parameters_"])
|
70
|
+
Traceback (most recent call last):
|
71
|
+
...
|
72
|
+
Exception: Class M2 does not have required attribute _parameters_
|
73
|
+
"""
|
74
|
+
required_attributes = required_attributes or []
|
75
|
+
for attribute in required_attributes:
|
76
|
+
if not hasattr(candidate_class, attribute):
|
77
|
+
raise Exception(
|
78
|
+
f"Class {candidate_class.__name__} does not have required attribute {attribute}"
|
79
|
+
)
|
80
|
+
|
81
|
+
@staticmethod
|
82
|
+
def verify_method(
|
83
|
+
candidate_class: "LanguageModel",
|
84
|
+
method_name: str,
|
85
|
+
expected_return_type: Any,
|
86
|
+
required_parameters: List[tuple[str, Any]] = None,
|
87
|
+
must_be_async: bool = False,
|
88
|
+
):
|
89
|
+
"""Verify that a method is defined in a class, has the correct return type, and has the correct parameters."""
|
90
|
+
RegisterLanguageModelsMeta._check_method_defined(candidate_class, method_name)
|
91
|
+
|
92
|
+
required_parameters = required_parameters or []
|
93
|
+
method = getattr(candidate_class, method_name)
|
94
|
+
# signature = inspect.signature(method)
|
95
|
+
|
96
|
+
RegisterLanguageModelsMeta._check_return_type(method, expected_return_type)
|
97
|
+
|
98
|
+
if must_be_async:
|
99
|
+
RegisterLanguageModelsMeta._check_is_coroutine(method)
|
100
|
+
|
101
|
+
# Check the parameters
|
102
|
+
# params = signature.parameters
|
103
|
+
# for param_name, param_type in required_parameters:
|
104
|
+
# RegisterLanguageModelsMeta._verify_parameter(
|
105
|
+
# params, param_name, param_type, method_name
|
106
|
+
# )
|
107
|
+
|
108
|
+
@staticmethod
|
109
|
+
def _check_method_defined(cls, method_name):
|
110
|
+
"""Check if a method is defined in a class.
|
111
|
+
|
112
|
+
Example:
|
113
|
+
>>> class M:
|
114
|
+
... def f(self): pass
|
115
|
+
>>> RegisterLanguageModelsMeta._check_method_defined(M, "f")
|
116
|
+
>>> RegisterLanguageModelsMeta._check_method_defined(M, "g")
|
117
|
+
Traceback (most recent call last):
|
118
|
+
...
|
119
|
+
NotImplementedError: g method must be implemented.
|
120
|
+
"""
|
121
|
+
if not hasattr(cls, method_name):
|
122
|
+
raise NotImplementedError(f"{method_name} method must be implemented.")
|
123
|
+
|
124
|
+
@staticmethod
|
125
|
+
def _check_is_coroutine(func: Callable):
|
126
|
+
"""Check to make sure it's a coroutine function.
|
127
|
+
|
128
|
+
Example:
|
129
|
+
|
130
|
+
>>> def f(): pass
|
131
|
+
>>> RegisterLanguageModelsMeta._check_is_coroutine(f)
|
132
|
+
Traceback (most recent call last):
|
133
|
+
...
|
134
|
+
TypeError: A LangugeModel class with method f must be an asynchronous method.
|
135
|
+
"""
|
136
|
+
if not inspect.iscoroutinefunction(func):
|
137
|
+
raise TypeError(
|
138
|
+
f"A LangugeModel class with method {func.__name__} must be an asynchronous method."
|
139
|
+
)
|
140
|
+
|
141
|
+
@staticmethod
|
142
|
+
def _verify_parameter(params, param_name, param_type, method_name):
|
143
|
+
"""Verify that a parameter is defined in a method and has the correct type."""
|
144
|
+
pass
|
145
|
+
# if param_name not in params:
|
146
|
+
# raise TypeError(
|
147
|
+
# f"""Parameter "{param_name}" of method "{method_name}" must be defined.
|
148
|
+
# """
|
149
|
+
# )
|
150
|
+
# if params[param_name].annotation != param_type:
|
151
|
+
# raise TypeError(
|
152
|
+
# f"""Parameter "{param_name}" of method "{method_name}" must be of type {param_type.__name__}.
|
153
|
+
# Got {params[param_name].annotation} instead.
|
154
|
+
# """
|
155
|
+
# )
|
156
|
+
|
157
|
+
@staticmethod
|
158
|
+
def _check_return_type(method, expected_return_type):
|
159
|
+
"""
|
160
|
+
Check if the return type of a method is as expected.
|
161
|
+
|
162
|
+
Example:
|
163
|
+
"""
|
164
|
+
pass
|
165
|
+
# if inspect.isroutine(method):
|
166
|
+
# # return_type = inspect.signature(method).return_annotation
|
167
|
+
# return_type = get_type_hints(method)["return"]
|
168
|
+
# if return_type != expected_return_type:
|
169
|
+
# raise TypeError(
|
170
|
+
# f"Return type of {method.__name__} must be {expected_return_type}. Got {return_type}."
|
171
|
+
# )
|
172
|
+
|
173
|
+
@classmethod
|
174
|
+
def model_names_to_classes(cls):
|
175
|
+
"""Return a dictionary of model names to classes."""
|
176
|
+
d = {}
|
177
|
+
for classname, cls in cls._registry.items():
|
178
|
+
if hasattr(cls, "_model_"):
|
179
|
+
d[cls._model_] = cls
|
180
|
+
else:
|
181
|
+
raise Exception(
|
182
|
+
f"Class {classname} does not have a _model_ class attribute."
|
183
|
+
)
|
184
|
+
return d
|
edsl/language_models/__init__.py
CHANGED
@@ -1,9 +1,2 @@
|
|
1
|
-
from edsl.language_models.schemas import model_prices
|
2
1
|
from edsl.language_models.LanguageModel import LanguageModel
|
3
|
-
from edsl.language_models.
|
4
|
-
LanguageModelOpenAIThreeFiveTurbo,
|
5
|
-
)
|
6
|
-
from edsl.language_models.model_interfaces.LanguageModelOpenAIFour import (
|
7
|
-
LanguageModelOpenAIFour,
|
8
|
-
)
|
9
|
-
from edsl.language_models.model_interfaces.GeminiPro import GeminiPro
|
2
|
+
from edsl.language_models.model import Model
|
@@ -0,0 +1,15 @@
|
|
1
|
+
from openai import AsyncOpenAI
|
2
|
+
import asyncio
|
3
|
+
|
4
|
+
client = AsyncOpenAI(base_url="http://127.0.0.1:8000/v1", api_key="fake_key")
|
5
|
+
|
6
|
+
|
7
|
+
async def main():
|
8
|
+
response = await client.chat.completions.create(
|
9
|
+
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Question XX42"}]
|
10
|
+
)
|
11
|
+
print(response)
|
12
|
+
|
13
|
+
|
14
|
+
if __name__ == "__main__":
|
15
|
+
asyncio.run(main())
|
@@ -0,0 +1,61 @@
|
|
1
|
+
import threading
|
2
|
+
import asyncio
|
3
|
+
from fastapi import FastAPI, Request
|
4
|
+
from fastapi.responses import JSONResponse
|
5
|
+
import uvicorn
|
6
|
+
import json
|
7
|
+
from typing import Any
|
8
|
+
|
9
|
+
app = FastAPI()
|
10
|
+
|
11
|
+
|
12
|
+
async def generate_response(question_number: int) -> dict:
|
13
|
+
# Simulate some asynchronous work
|
14
|
+
await asyncio.sleep(1)
|
15
|
+
return {
|
16
|
+
"id": "chatcmpl-123",
|
17
|
+
"object": "chat.completion",
|
18
|
+
"created": 1677652288,
|
19
|
+
"model": "gpt-3.5-turbo-0613",
|
20
|
+
"choices": [
|
21
|
+
{
|
22
|
+
"index": 0,
|
23
|
+
"message": {
|
24
|
+
"role": "assistant",
|
25
|
+
"content": json.dumps(
|
26
|
+
{"answer": f"SPAM for question {question_number}!"}
|
27
|
+
),
|
28
|
+
},
|
29
|
+
"finish_reason": "stop",
|
30
|
+
}
|
31
|
+
],
|
32
|
+
"usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
|
33
|
+
}
|
34
|
+
|
35
|
+
|
36
|
+
@app.post("/v1/chat/completions")
|
37
|
+
async def chat_completions(request: Request):
|
38
|
+
body = await request.json()
|
39
|
+
user_prompt = body["messages"][-1]["content"]
|
40
|
+
question_number = int(user_prompt.split("XX")[1])
|
41
|
+
|
42
|
+
response = await generate_response(question_number)
|
43
|
+
return JSONResponse(content=response)
|
44
|
+
|
45
|
+
|
46
|
+
def run_server():
|
47
|
+
uvicorn.run(app, host="127.0.0.1", port=8000)
|
48
|
+
|
49
|
+
|
50
|
+
if __name__ == "__main__":
|
51
|
+
# Start the server in a separate thread
|
52
|
+
server_thread = threading.Thread(target=run_server)
|
53
|
+
server_thread.start()
|
54
|
+
|
55
|
+
# Your main code here
|
56
|
+
# ...
|
57
|
+
|
58
|
+
# To use this with the OpenAI SDK:
|
59
|
+
# from openai import AsyncOpenAI
|
60
|
+
# client = AsyncOpenAI(base_url="http://127.0.0.1:8000/v1", api_key="fake_key")
|
61
|
+
# response = await client.chat.completions.create(model="gpt-3.5-turbo", messages=[...])
|