edsl 0.1.15__py3-none-any.whl → 0.1.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +348 -38
- edsl/BaseDiff.py +260 -0
- edsl/TemplateLoader.py +24 -0
- edsl/__init__.py +45 -10
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +842 -144
- edsl/agents/AgentList.py +521 -25
- edsl/agents/Invigilator.py +250 -374
- edsl/agents/InvigilatorBase.py +257 -0
- edsl/agents/PromptConstructor.py +272 -0
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/descriptors.py +43 -13
- edsl/agents/prompt_helpers.py +129 -0
- edsl/agents/question_option_processor.py +172 -0
- edsl/auto/AutoStudy.py +130 -0
- edsl/auto/StageBase.py +243 -0
- edsl/auto/StageGenerateSurvey.py +178 -0
- edsl/auto/StageLabelQuestions.py +125 -0
- edsl/auto/StagePersona.py +61 -0
- edsl/auto/StagePersonaDimensionValueRanges.py +88 -0
- edsl/auto/StagePersonaDimensionValues.py +74 -0
- edsl/auto/StagePersonaDimensions.py +69 -0
- edsl/auto/StageQuestions.py +74 -0
- edsl/auto/SurveyCreatorPipeline.py +21 -0
- edsl/auto/utilities.py +218 -0
- edsl/base/Base.py +279 -0
- edsl/config.py +115 -113
- edsl/conversation/Conversation.py +290 -0
- edsl/conversation/car_buying.py +59 -0
- edsl/conversation/chips.py +95 -0
- edsl/conversation/mug_negotiation.py +81 -0
- edsl/conversation/next_speaker_utilities.py +93 -0
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +54 -0
- edsl/coop/__init__.py +1 -0
- edsl/coop/coop.py +1029 -134
- edsl/coop/utils.py +131 -0
- edsl/data/Cache.py +560 -89
- edsl/data/CacheEntry.py +230 -0
- edsl/data/CacheHandler.py +168 -0
- edsl/data/RemoteCacheSync.py +186 -0
- edsl/data/SQLiteDict.py +292 -0
- edsl/data/__init__.py +5 -3
- edsl/data/orm.py +6 -33
- edsl/data_transfer_models.py +74 -27
- edsl/enums.py +165 -8
- edsl/exceptions/BaseException.py +21 -0
- edsl/exceptions/__init__.py +52 -46
- edsl/exceptions/agents.py +33 -15
- edsl/exceptions/cache.py +5 -0
- edsl/exceptions/coop.py +8 -0
- edsl/exceptions/general.py +34 -0
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/jobs.py +15 -0
- edsl/exceptions/language_models.py +46 -1
- edsl/exceptions/questions.py +80 -5
- edsl/exceptions/results.py +16 -5
- edsl/exceptions/scenarios.py +29 -0
- edsl/exceptions/surveys.py +13 -10
- edsl/inference_services/AnthropicService.py +106 -0
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +215 -0
- edsl/inference_services/AwsBedrock.py +118 -0
- edsl/inference_services/AzureAI.py +215 -0
- edsl/inference_services/DeepInfraService.py +18 -0
- edsl/inference_services/GoogleService.py +143 -0
- edsl/inference_services/GroqService.py +20 -0
- edsl/inference_services/InferenceServiceABC.py +80 -0
- edsl/inference_services/InferenceServicesCollection.py +138 -0
- edsl/inference_services/MistralAIService.py +120 -0
- edsl/inference_services/OllamaService.py +18 -0
- edsl/inference_services/OpenAIService.py +236 -0
- edsl/inference_services/PerplexityService.py +160 -0
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +90 -0
- edsl/inference_services/TogetherAIService.py +172 -0
- edsl/inference_services/data_structures.py +134 -0
- edsl/inference_services/models_available_cache.py +118 -0
- edsl/inference_services/rate_limits_cache.py +25 -0
- edsl/inference_services/registry.py +41 -0
- edsl/inference_services/write_available.py +10 -0
- edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
- edsl/jobs/Answers.py +21 -20
- edsl/jobs/FetchInvigilator.py +47 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +50 -0
- edsl/jobs/Jobs.py +684 -206
- edsl/jobs/JobsChecks.py +172 -0
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +270 -0
- edsl/jobs/JobsRemoteInferenceHandler.py +311 -0
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/async_interview_runner.py +138 -0
- edsl/jobs/buckets/BucketCollection.py +104 -0
- edsl/jobs/buckets/ModelBuckets.py +65 -0
- edsl/jobs/buckets/TokenBucket.py +283 -0
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/check_survey_scenario_compatibility.py +85 -0
- edsl/jobs/data_structures.py +120 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +392 -0
- edsl/jobs/interviews/InterviewExceptionCollection.py +99 -0
- edsl/jobs/interviews/InterviewExceptionEntry.py +186 -0
- edsl/jobs/interviews/InterviewStatistic.py +63 -0
- edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -0
- edsl/jobs/interviews/InterviewStatusDictionary.py +78 -0
- edsl/jobs/interviews/InterviewStatusLog.py +92 -0
- edsl/jobs/interviews/ReportErrors.py +66 -0
- edsl/jobs/interviews/interview_status_enum.py +9 -0
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/results_exceptions_handler.py +98 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +151 -110
- edsl/jobs/runners/JobsRunnerStatus.py +298 -0
- edsl/jobs/tasks/QuestionTaskCreator.py +244 -0
- edsl/jobs/tasks/TaskCreators.py +64 -0
- edsl/jobs/tasks/TaskHistory.py +470 -0
- edsl/jobs/tasks/TaskStatusLog.py +23 -0
- edsl/jobs/tasks/task_status_enum.py +161 -0
- edsl/jobs/tokens/InterviewTokenUsage.py +27 -0
- edsl/jobs/tokens/TokenUsage.py +34 -0
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +507 -386
- edsl/language_models/ModelList.py +164 -0
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/RegisterLanguageModelsMeta.py +184 -0
- edsl/language_models/__init__.py +1 -8
- edsl/language_models/fake_openai_call.py +15 -0
- edsl/language_models/fake_openai_service.py +61 -0
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/model.py +256 -0
- edsl/language_models/repair.py +109 -41
- edsl/language_models/utilities.py +65 -0
- edsl/notebooks/Notebook.py +263 -0
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/notebooks/__init__.py +1 -0
- edsl/prompts/Prompt.py +222 -93
- edsl/prompts/__init__.py +1 -1
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/QuestionBase.py +518 -0
- edsl/questions/QuestionBasePromptsMixin.py +221 -0
- edsl/questions/QuestionBudget.py +164 -67
- edsl/questions/QuestionCheckBox.py +281 -62
- edsl/questions/QuestionDict.py +343 -0
- edsl/questions/QuestionExtract.py +136 -50
- edsl/questions/QuestionFreeText.py +79 -55
- edsl/questions/QuestionFunctional.py +138 -41
- edsl/questions/QuestionList.py +184 -57
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +293 -69
- edsl/questions/QuestionNumerical.py +109 -56
- edsl/questions/QuestionRank.py +244 -49
- edsl/questions/Quick.py +41 -0
- edsl/questions/SimpleAskMixin.py +74 -0
- edsl/questions/__init__.py +9 -6
- edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +153 -38
- edsl/questions/compose_questions.py +13 -7
- edsl/questions/data_structures.py +20 -0
- edsl/questions/decorators.py +21 -0
- edsl/questions/derived/QuestionLikertFive.py +28 -26
- edsl/questions/derived/QuestionLinearScale.py +41 -28
- edsl/questions/derived/QuestionTopK.py +34 -26
- edsl/questions/derived/QuestionYesNo.py +40 -27
- edsl/questions/descriptors.py +228 -74
- edsl/questions/loop_processor.py +149 -0
- edsl/questions/prompt_templates/question_budget.jinja +13 -0
- edsl/questions/prompt_templates/question_checkbox.jinja +32 -0
- edsl/questions/prompt_templates/question_extract.jinja +11 -0
- edsl/questions/prompt_templates/question_free_text.jinja +3 -0
- edsl/questions/prompt_templates/question_linear_scale.jinja +11 -0
- edsl/questions/prompt_templates/question_list.jinja +17 -0
- edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -0
- edsl/questions/prompt_templates/question_numerical.jinja +37 -0
- edsl/questions/question_base_gen_mixin.py +168 -0
- edsl/questions/question_registry.py +130 -46
- edsl/questions/register_questions_meta.py +71 -0
- edsl/questions/response_validator_abc.py +188 -0
- edsl/questions/response_validator_factory.py +34 -0
- edsl/questions/settings.py +5 -2
- edsl/questions/templates/__init__.py +0 -0
- edsl/questions/templates/budget/__init__.py +0 -0
- edsl/questions/templates/budget/answering_instructions.jinja +7 -0
- edsl/questions/templates/budget/question_presentation.jinja +7 -0
- edsl/questions/templates/checkbox/__init__.py +0 -0
- edsl/questions/templates/checkbox/answering_instructions.jinja +10 -0
- edsl/questions/templates/checkbox/question_presentation.jinja +22 -0
- edsl/questions/templates/dict/__init__.py +0 -0
- edsl/questions/templates/dict/answering_instructions.jinja +21 -0
- edsl/questions/templates/dict/question_presentation.jinja +1 -0
- edsl/questions/templates/extract/__init__.py +0 -0
- edsl/questions/templates/extract/answering_instructions.jinja +7 -0
- edsl/questions/templates/extract/question_presentation.jinja +1 -0
- edsl/questions/templates/free_text/__init__.py +0 -0
- edsl/questions/templates/free_text/answering_instructions.jinja +0 -0
- edsl/questions/templates/free_text/question_presentation.jinja +1 -0
- edsl/questions/templates/likert_five/__init__.py +0 -0
- edsl/questions/templates/likert_five/answering_instructions.jinja +10 -0
- edsl/questions/templates/likert_five/question_presentation.jinja +12 -0
- edsl/questions/templates/linear_scale/__init__.py +0 -0
- edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -0
- edsl/questions/templates/linear_scale/question_presentation.jinja +5 -0
- edsl/questions/templates/list/__init__.py +0 -0
- edsl/questions/templates/list/answering_instructions.jinja +4 -0
- edsl/questions/templates/list/question_presentation.jinja +5 -0
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/questions/templates/multiple_choice/__init__.py +0 -0
- edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -0
- edsl/questions/templates/multiple_choice/html.jinja +0 -0
- edsl/questions/templates/multiple_choice/question_presentation.jinja +12 -0
- edsl/questions/templates/numerical/__init__.py +0 -0
- edsl/questions/templates/numerical/answering_instructions.jinja +7 -0
- edsl/questions/templates/numerical/question_presentation.jinja +7 -0
- edsl/questions/templates/rank/__init__.py +0 -0
- edsl/questions/templates/rank/answering_instructions.jinja +11 -0
- edsl/questions/templates/rank/question_presentation.jinja +15 -0
- edsl/questions/templates/top_k/__init__.py +0 -0
- edsl/questions/templates/top_k/answering_instructions.jinja +8 -0
- edsl/questions/templates/top_k/question_presentation.jinja +22 -0
- edsl/questions/templates/yes_no/__init__.py +0 -0
- edsl/questions/templates/yes_no/answering_instructions.jinja +6 -0
- edsl/questions/templates/yes_no/question_presentation.jinja +12 -0
- edsl/results/CSSParameterizer.py +108 -0
- edsl/results/Dataset.py +550 -19
- edsl/results/DatasetExportMixin.py +594 -0
- edsl/results/DatasetTree.py +295 -0
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +477 -173
- edsl/results/Results.py +987 -269
- edsl/results/ResultsExportMixin.py +28 -125
- edsl/results/ResultsGGMixin.py +83 -15
- edsl/results/TableDisplay.py +125 -0
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +1 -1
- edsl/results/file_exports.py +252 -0
- edsl/results/results_fetch_mixin.py +33 -0
- edsl/results/results_selector.py +145 -0
- edsl/results/results_tools_mixin.py +98 -0
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_display.css +78 -0
- edsl/results/table_renderers.py +118 -0
- edsl/results/tree_explore.py +115 -0
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +543 -0
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +431 -62
- edsl/scenarios/ScenarioHtmlMixin.py +65 -0
- edsl/scenarios/ScenarioList.py +1415 -45
- edsl/scenarios/ScenarioListExportMixin.py +45 -0
- edsl/scenarios/ScenarioListPdfMixin.py +239 -0
- edsl/scenarios/__init__.py +2 -0
- edsl/scenarios/directory_scanner.py +96 -0
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +49 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/scenarios/scenario_join.py +131 -0
- edsl/scenarios/scenario_selector.py +156 -0
- edsl/shared.py +1 -0
- edsl/study/ObjectEntry.py +173 -0
- edsl/study/ProofOfWork.py +113 -0
- edsl/study/SnapShot.py +80 -0
- edsl/study/Study.py +521 -0
- edsl/study/__init__.py +4 -0
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/DAG.py +92 -11
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/Memory.py +9 -4
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/MemoryPlan.py +156 -35
- edsl/surveys/Rule.py +221 -74
- edsl/surveys/RuleCollection.py +241 -61
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +1079 -339
- edsl/surveys/SurveyCSS.py +273 -0
- edsl/surveys/SurveyExportMixin.py +235 -40
- edsl/surveys/SurveyFlowVisualization.py +181 -0
- edsl/surveys/SurveyQualtricsImport.py +284 -0
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +4 -2
- edsl/surveys/base.py +19 -3
- edsl/surveys/descriptors.py +17 -6
- edsl/surveys/instructions/ChangeInstruction.py +48 -0
- edsl/surveys/instructions/Instruction.py +56 -0
- edsl/surveys/instructions/InstructionCollection.py +82 -0
- edsl/surveys/instructions/__init__.py +0 -0
- edsl/templates/error_reporting/base.html +24 -0
- edsl/templates/error_reporting/exceptions_by_model.html +35 -0
- edsl/templates/error_reporting/exceptions_by_question_name.html +17 -0
- edsl/templates/error_reporting/exceptions_by_type.html +17 -0
- edsl/templates/error_reporting/interview_details.html +116 -0
- edsl/templates/error_reporting/interviews.html +19 -0
- edsl/templates/error_reporting/overview.html +5 -0
- edsl/templates/error_reporting/performance_plot.html +2 -0
- edsl/templates/error_reporting/report.css +74 -0
- edsl/templates/error_reporting/report.html +118 -0
- edsl/templates/error_reporting/report.js +25 -0
- edsl/tools/__init__.py +1 -0
- edsl/tools/clusters.py +192 -0
- edsl/tools/embeddings.py +27 -0
- edsl/tools/embeddings_plotting.py +118 -0
- edsl/tools/plotting.py +112 -0
- edsl/tools/summarize.py +18 -0
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/SystemInfo.py +5 -0
- edsl/utilities/__init__.py +21 -20
- edsl/utilities/ast_utilities.py +3 -0
- edsl/utilities/data/Registry.py +2 -0
- edsl/utilities/decorators.py +41 -0
- edsl/utilities/gcp_bucket/__init__.py +0 -0
- edsl/utilities/gcp_bucket/cloud_storage.py +96 -0
- edsl/utilities/interface.py +310 -60
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/naming_utilities.py +263 -0
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/repair_functions.py +28 -0
- edsl/utilities/restricted_python.py +70 -0
- edsl/utilities/utilities.py +203 -13
- edsl-0.1.40.dist-info/METADATA +111 -0
- edsl-0.1.40.dist-info/RECORD +362 -0
- {edsl-0.1.15.dist-info → edsl-0.1.40.dist-info}/WHEEL +1 -1
- edsl/agents/AgentListExportMixin.py +0 -24
- edsl/coop/old.py +0 -31
- edsl/data/Database.py +0 -141
- edsl/data/crud.py +0 -121
- edsl/jobs/Interview.py +0 -435
- edsl/jobs/JobsRunner.py +0 -63
- edsl/jobs/JobsRunnerStatusMixin.py +0 -115
- edsl/jobs/base.py +0 -47
- edsl/jobs/buckets.py +0 -178
- edsl/jobs/runners/JobsRunnerDryRun.py +0 -19
- edsl/jobs/runners/JobsRunnerStreaming.py +0 -54
- edsl/jobs/task_management.py +0 -215
- edsl/jobs/token_tracking.py +0 -78
- edsl/language_models/DeepInfra.py +0 -69
- edsl/language_models/OpenAI.py +0 -98
- edsl/language_models/model_interfaces/GeminiPro.py +0 -66
- edsl/language_models/model_interfaces/LanguageModelOpenAIFour.py +0 -8
- edsl/language_models/model_interfaces/LanguageModelOpenAIThreeFiveTurbo.py +0 -8
- edsl/language_models/model_interfaces/LlamaTwo13B.py +0 -21
- edsl/language_models/model_interfaces/LlamaTwo70B.py +0 -21
- edsl/language_models/model_interfaces/Mixtral8x7B.py +0 -24
- edsl/language_models/registry.py +0 -81
- edsl/language_models/schemas.py +0 -15
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/prompts/QuestionInstructionsBase.py +0 -6
- edsl/prompts/library/agent_instructions.py +0 -29
- edsl/prompts/library/agent_persona.py +0 -17
- edsl/prompts/library/question_budget.py +0 -26
- edsl/prompts/library/question_checkbox.py +0 -32
- edsl/prompts/library/question_extract.py +0 -19
- edsl/prompts/library/question_freetext.py +0 -14
- edsl/prompts/library/question_linear_scale.py +0 -20
- edsl/prompts/library/question_list.py +0 -22
- edsl/prompts/library/question_multiple_choice.py +0 -44
- edsl/prompts/library/question_numerical.py +0 -31
- edsl/prompts/library/question_rank.py +0 -21
- edsl/prompts/prompt_config.py +0 -33
- edsl/prompts/registry.py +0 -185
- edsl/questions/Question.py +0 -240
- edsl/report/InputOutputDataTypes.py +0 -134
- edsl/report/RegressionMixin.py +0 -28
- edsl/report/ReportOutputs.py +0 -1228
- edsl/report/ResultsFetchMixin.py +0 -106
- edsl/report/ResultsOutputMixin.py +0 -14
- edsl/report/demo.ipynb +0 -645
- edsl/results/ResultsDBMixin.py +0 -184
- edsl/surveys/SurveyFlowVisualizationMixin.py +0 -92
- edsl/trackers/Tracker.py +0 -91
- edsl/trackers/TrackerAPI.py +0 -196
- edsl/trackers/TrackerTasks.py +0 -70
- edsl/utilities/pastebin.py +0 -141
- edsl-0.1.15.dist-info/METADATA +0 -69
- edsl-0.1.15.dist-info/RECORD +0 -142
- /edsl/{language_models/model_interfaces → inference_services}/__init__.py +0 -0
- /edsl/{report/__init__.py → jobs/runners/JobsRunnerStatusData.py} +0 -0
- /edsl/{trackers/__init__.py → language_models/ServiceDataSources.py} +0 -0
- {edsl-0.1.15.dist-info → edsl-0.1.40.dist-info}/LICENSE +0 -0
@@ -0,0 +1,256 @@
|
|
1
|
+
import textwrap
|
2
|
+
from random import random
|
3
|
+
from typing import Optional, TYPE_CHECKING, List
|
4
|
+
|
5
|
+
from edsl.utilities.PrettyList import PrettyList
|
6
|
+
from edsl.config import CONFIG
|
7
|
+
|
8
|
+
from edsl.inference_services.InferenceServicesCollection import (
|
9
|
+
InferenceServicesCollection,
|
10
|
+
)
|
11
|
+
from edsl.inference_services.data_structures import AvailableModels
|
12
|
+
from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
|
13
|
+
from edsl.enums import InferenceServiceLiteral
|
14
|
+
|
15
|
+
if TYPE_CHECKING:
|
16
|
+
from edsl.results.Dataset import Dataset
|
17
|
+
|
18
|
+
|
19
|
+
def get_model_class(model_name, registry: Optional[InferenceServicesCollection] = None):
|
20
|
+
from edsl.inference_services.registry import default
|
21
|
+
|
22
|
+
registry = registry or default
|
23
|
+
factory = registry.create_model_factory(model_name)
|
24
|
+
return factory
|
25
|
+
|
26
|
+
|
27
|
+
class Meta(type):
|
28
|
+
def __repr__(cls):
|
29
|
+
return textwrap.dedent(
|
30
|
+
f"""\
|
31
|
+
Available models: {cls.available()}
|
32
|
+
|
33
|
+
To create an instance, you can do:
|
34
|
+
>>> m = Model('gpt-4-1106-preview', temperature=0.5, ...)
|
35
|
+
|
36
|
+
To get the default model, you can leave out the model name.
|
37
|
+
To see the available models, you can do:
|
38
|
+
>>> Model.available()
|
39
|
+
"""
|
40
|
+
)
|
41
|
+
|
42
|
+
|
43
|
+
class Model(metaclass=Meta):
|
44
|
+
default_model = CONFIG.get("EDSL_DEFAULT_MODEL")
|
45
|
+
_registry: InferenceServicesCollection = None # Class-level registry storage
|
46
|
+
|
47
|
+
@classmethod
|
48
|
+
def get_registry(cls) -> InferenceServicesCollection:
|
49
|
+
"""Get the current registry or initialize with default if None"""
|
50
|
+
if cls._registry is None:
|
51
|
+
from edsl.inference_services.registry import default
|
52
|
+
|
53
|
+
cls._registry = default
|
54
|
+
return cls._registry
|
55
|
+
|
56
|
+
@classmethod
|
57
|
+
def set_registry(cls, registry: InferenceServicesCollection) -> None:
|
58
|
+
"""Set a new registry"""
|
59
|
+
cls._registry = registry
|
60
|
+
|
61
|
+
def __new__(
|
62
|
+
cls,
|
63
|
+
model_name: Optional[str] = None,
|
64
|
+
service_name: Optional[InferenceServiceLiteral] = None,
|
65
|
+
registry: Optional[InferenceServicesCollection] = None,
|
66
|
+
*args,
|
67
|
+
**kwargs,
|
68
|
+
):
|
69
|
+
"Instantiate a new language model."
|
70
|
+
# Map index to the respective subclass
|
71
|
+
if model_name is None:
|
72
|
+
model_name = (
|
73
|
+
cls.default_model
|
74
|
+
) # when model_name is None, use the default model, set in the config file
|
75
|
+
|
76
|
+
if registry is not None:
|
77
|
+
cls.set_registry(registry)
|
78
|
+
|
79
|
+
if isinstance(model_name, int): # can refer to a model by index
|
80
|
+
model_name = cls.available(name_only=True)[model_name]
|
81
|
+
|
82
|
+
factory = cls.get_registry().create_model_factory(
|
83
|
+
model_name, service_name=service_name
|
84
|
+
)
|
85
|
+
return factory(*args, **kwargs)
|
86
|
+
|
87
|
+
@classmethod
|
88
|
+
def add_model(cls, service_name, model_name) -> None:
|
89
|
+
cls.get_registry().add_model(service_name, model_name)
|
90
|
+
|
91
|
+
@classmethod
|
92
|
+
def service_classes(cls) -> List["InferenceServiceABC"]:
|
93
|
+
"""Returns a list of service classes.
|
94
|
+
|
95
|
+
>>> Model.service_classes()
|
96
|
+
[...]
|
97
|
+
"""
|
98
|
+
return [r for r in cls.services(name_only=True)]
|
99
|
+
|
100
|
+
@classmethod
|
101
|
+
def services(cls, name_only: bool = False) -> List[str]:
|
102
|
+
"""Returns a list of services, annotated with whether the user has local keys for them."""
|
103
|
+
services_with_local_keys = set(cls.key_info().select("service").to_list())
|
104
|
+
f = lambda service_name: (
|
105
|
+
"yes" if service_name in services_with_local_keys else " "
|
106
|
+
)
|
107
|
+
if name_only:
|
108
|
+
return PrettyList(
|
109
|
+
[r._inference_service_ for r in cls.get_registry().services],
|
110
|
+
columns=["Service Name"],
|
111
|
+
)
|
112
|
+
else:
|
113
|
+
return PrettyList(
|
114
|
+
[
|
115
|
+
(r._inference_service_, f(r._inference_service_))
|
116
|
+
for r in cls.get_registry().services
|
117
|
+
],
|
118
|
+
columns=["Service Name", "Local key?"],
|
119
|
+
)
|
120
|
+
|
121
|
+
@classmethod
|
122
|
+
def services_with_local_keys(cls) -> set:
|
123
|
+
"""Returns a list of services for which the user has local keys."""
|
124
|
+
return set(cls.key_info().select("service").to_list())
|
125
|
+
|
126
|
+
@classmethod
|
127
|
+
def key_info(cls, obscure_api_key: bool = True) -> "Dataset":
|
128
|
+
"""Returns a dataset of local key information."""
|
129
|
+
from edsl.language_models.key_management.KeyLookupCollection import (
|
130
|
+
KeyLookupCollection,
|
131
|
+
)
|
132
|
+
from edsl.scenarios import Scenario, ScenarioList
|
133
|
+
|
134
|
+
klc = KeyLookupCollection()
|
135
|
+
klc.add_key_lookup(fetch_order=None)
|
136
|
+
sl = ScenarioList()
|
137
|
+
for service, entry in list(klc.data.values())[0].items():
|
138
|
+
sl.append(Scenario({"service": service} | entry.to_dict()))
|
139
|
+
if obscure_api_key:
|
140
|
+
for service in sl:
|
141
|
+
service["api_token"] = (
|
142
|
+
service["api_token"][:4] + "..." + service["api_token"][-4:]
|
143
|
+
)
|
144
|
+
return sl.to_dataset()
|
145
|
+
|
146
|
+
@classmethod
|
147
|
+
def search_models(cls, search_term: str):
|
148
|
+
return cls.available(search_term=search_term)
|
149
|
+
|
150
|
+
@classmethod
|
151
|
+
def all_known_models(cls) -> "AvailableModels":
|
152
|
+
return cls.get_registry().available()
|
153
|
+
|
154
|
+
@classmethod
|
155
|
+
def available_with_local_keys(cls):
|
156
|
+
services_with_local_keys = set(cls.key_info().select("service").to_list())
|
157
|
+
return [
|
158
|
+
m
|
159
|
+
for m in cls.get_registry().available()
|
160
|
+
if m.service_name in services_with_local_keys
|
161
|
+
]
|
162
|
+
|
163
|
+
@classmethod
|
164
|
+
def available(
|
165
|
+
cls,
|
166
|
+
search_term: str = None,
|
167
|
+
name_only: bool = False,
|
168
|
+
service: Optional[str] = None,
|
169
|
+
):
|
170
|
+
# if search_term is None and service is None:
|
171
|
+
# print("Getting available models...")
|
172
|
+
# print("You have local keys for the following services:")
|
173
|
+
# print(cls.services_with_local_keys())
|
174
|
+
# print("\n")
|
175
|
+
# print("To see models by service, use the 'service' parameter.")
|
176
|
+
# print("E.g., Model.available(service='openai')")
|
177
|
+
# return None
|
178
|
+
|
179
|
+
if service is not None:
|
180
|
+
if service not in cls.services(name_only=True):
|
181
|
+
raise ValueError(
|
182
|
+
f"Service {service} not found in available services.",
|
183
|
+
f"Available services are: {cls.services()}",
|
184
|
+
)
|
185
|
+
|
186
|
+
full_list = cls.get_registry().available(service=service)
|
187
|
+
|
188
|
+
if search_term is None:
|
189
|
+
if name_only:
|
190
|
+
return PrettyList(
|
191
|
+
[m.model_name for m in full_list],
|
192
|
+
columns=["Model Name"],
|
193
|
+
)
|
194
|
+
else:
|
195
|
+
return PrettyList(
|
196
|
+
[[m.model_name, m.service_name] for m in full_list],
|
197
|
+
columns=["Model Name", "Service Name"],
|
198
|
+
)
|
199
|
+
else:
|
200
|
+
filtered_results = [
|
201
|
+
m
|
202
|
+
for m in full_list
|
203
|
+
if search_term in m.model_name or search_term in m.service_name
|
204
|
+
]
|
205
|
+
if name_only:
|
206
|
+
return PrettyList(
|
207
|
+
[m.model_name for m in filtered_results],
|
208
|
+
columns=["Model Name"],
|
209
|
+
)
|
210
|
+
else:
|
211
|
+
return PrettyList(
|
212
|
+
[[m.model_name, m.service_name] for m in full_list],
|
213
|
+
columns=["Model Name", "Service Name"],
|
214
|
+
)
|
215
|
+
|
216
|
+
@classmethod
|
217
|
+
def check_models(cls, verbose=False):
|
218
|
+
print("Checking all available models...\n")
|
219
|
+
for model in cls.available(name_only=True):
|
220
|
+
print(f"Now checking: {model}")
|
221
|
+
try:
|
222
|
+
m = cls(model)
|
223
|
+
except Exception as e:
|
224
|
+
print(f"Error creating instance of {model}: {e}")
|
225
|
+
continue
|
226
|
+
try:
|
227
|
+
results = m.hello(verbose)
|
228
|
+
if verbose:
|
229
|
+
print(f"Results from model call: {results}")
|
230
|
+
except Exception as e:
|
231
|
+
print(f"Error calling 'hello' on {model}: {e}")
|
232
|
+
continue
|
233
|
+
print("OK!")
|
234
|
+
print("\n")
|
235
|
+
|
236
|
+
@classmethod
|
237
|
+
def example(cls, randomize: bool = False) -> "Model":
|
238
|
+
"""
|
239
|
+
Returns an example Model instance.
|
240
|
+
|
241
|
+
:param randomize: If True, the temperature is set to a random decimal between 0 and 1.
|
242
|
+
"""
|
243
|
+
temperature = 0.5 if not randomize else round(random(), 2)
|
244
|
+
model_name = cls.default_model
|
245
|
+
return cls(model_name, temperature=temperature)
|
246
|
+
|
247
|
+
|
248
|
+
if __name__ == "__main__":
|
249
|
+
import doctest
|
250
|
+
|
251
|
+
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
252
|
+
|
253
|
+
available = Model.available()
|
254
|
+
m = Model("gpt-4-1106-preview")
|
255
|
+
results = m.execute_model_call("Hello world")
|
256
|
+
print(results)
|
edsl/language_models/repair.py
CHANGED
@@ -1,76 +1,144 @@
|
|
1
1
|
import json
|
2
2
|
import asyncio
|
3
|
+
import warnings
|
3
4
|
|
4
5
|
|
5
|
-
async def async_repair(
|
6
|
-
|
6
|
+
async def async_repair(
|
7
|
+
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
8
|
+
):
|
9
|
+
from edsl.utilities.utilities import clean_json
|
7
10
|
|
8
|
-
|
11
|
+
s = clean_json(bad_json)
|
9
12
|
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
13
|
+
try:
|
14
|
+
# this is the OpenAI version, but that's fine
|
15
|
+
valid_dict = json.loads(s)
|
16
|
+
success = True
|
17
|
+
except json.JSONDecodeError:
|
18
|
+
valid_dict = {}
|
19
|
+
success = False
|
20
|
+
# print("Replacing control characters didn't work. Trying extracting the sub-string.")
|
21
|
+
else:
|
22
|
+
return valid_dict, success
|
17
23
|
|
18
24
|
try:
|
19
|
-
|
20
|
-
|
21
|
-
|
25
|
+
from edsl.utilities.repair_functions import extract_json_from_string
|
26
|
+
|
27
|
+
valid_dict = extract_json_from_string(s)
|
28
|
+
success = True
|
29
|
+
except ValueError:
|
30
|
+
valid_dict = {}
|
31
|
+
success = False
|
32
|
+
else:
|
33
|
+
return valid_dict, success
|
34
|
+
|
35
|
+
from edsl.language_models.model import Model
|
36
|
+
|
37
|
+
m = Model()
|
38
|
+
|
39
|
+
from edsl.questions.QuestionExtract import QuestionExtract
|
40
|
+
|
41
|
+
with warnings.catch_warnings():
|
42
|
+
warnings.simplefilter("ignore", UserWarning)
|
43
|
+
|
44
|
+
q = QuestionExtract(
|
45
|
+
question_text="""
|
46
|
+
A language model was supposed to respond to a question.
|
47
|
+
The response should have been JSON object with an answer to a question and some commentary.
|
48
|
+
|
49
|
+
It should have retured a string like this:
|
50
|
+
|
51
|
+
'{'answer': 'The answer to the question.', 'comment': 'Some commentary.'}'
|
52
|
+
|
53
|
+
or:
|
54
|
+
|
55
|
+
'{'answer': 'The answer to the question.'}'
|
56
|
+
|
57
|
+
The answer field is very like an integer number. The comment field is always string.
|
58
|
+
|
59
|
+
You job is to return just the repaired JSON object that the model should have returned, properly formatted.
|
60
|
+
|
61
|
+
- It might have included some preliminary comments.
|
62
|
+
- It might have included some control characters.
|
63
|
+
- It might have included some extraneous text.
|
64
|
+
|
65
|
+
DO NOT include any extraneous text in your response. Just return the repaired JSON object.
|
66
|
+
Do not preface the JSON object with any text. Just return the JSON object.
|
67
|
+
|
68
|
+
Bad answer: """
|
69
|
+
+ str(bad_json)
|
70
|
+
+ "The model received a user prompt of: '"
|
71
|
+
+ str(user_prompt)
|
72
|
+
+ """'
|
73
|
+
The model received a system prompt of: ' """
|
74
|
+
+ str(system_prompt)
|
75
|
+
+ """
|
76
|
+
'
|
77
|
+
Please return the repaired JSON object, following the instructions the original model should have followed, though
|
78
|
+
using 'new_answer' a nd 'new_comment' as the keys.""",
|
79
|
+
answer_template={
|
80
|
+
"new_answer": "<number, string, list, etc.>",
|
81
|
+
"new_comment": "Model's comments",
|
82
|
+
},
|
83
|
+
question_name="model_repair",
|
22
84
|
)
|
23
|
-
|
24
|
-
|
85
|
+
|
86
|
+
results = await q.run_async(cache=cache)
|
25
87
|
|
26
88
|
try:
|
27
|
-
|
89
|
+
# this is the OpenAI version, but that's fine
|
90
|
+
valid_dict = json.loads(json.dumps(results))
|
28
91
|
success = True
|
92
|
+
# this is to deal with the fact that the model returns the answer and comment as new_answer and new_comment
|
93
|
+
valid_dict["answer"] = valid_dict.pop("new_answer")
|
94
|
+
valid_dict["comment"] = valid_dict.pop("new_comment")
|
29
95
|
except json.JSONDecodeError:
|
30
96
|
valid_dict = {}
|
31
97
|
success = False
|
98
|
+
from rich import print
|
99
|
+
from rich.console import Console
|
100
|
+
from rich.syntax import Syntax
|
101
|
+
|
102
|
+
console = Console()
|
103
|
+
error_message = (
|
104
|
+
f"All repairs. failed. LLM Model given [red]{str(bad_json)}[/red]"
|
105
|
+
)
|
106
|
+
console.print(" " + error_message)
|
107
|
+
model_returned = results["choices"][0]["message"]["content"]
|
108
|
+
console.print(f"LLM Model returned: [blue]{model_returned}[/blue]")
|
32
109
|
|
33
110
|
return valid_dict, success
|
34
111
|
|
35
112
|
|
36
|
-
def repair_wrapper(
|
113
|
+
def repair_wrapper(
|
114
|
+
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
115
|
+
):
|
37
116
|
try:
|
38
117
|
loop = asyncio.get_event_loop()
|
39
118
|
if loop.is_running():
|
40
119
|
# Add repair as a task to the running loop
|
41
|
-
task = loop.create_task(
|
120
|
+
task = loop.create_task(
|
121
|
+
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
122
|
+
)
|
42
123
|
return task
|
43
124
|
else:
|
44
125
|
# Run a new event loop for repair
|
45
|
-
return loop.run_until_complete(
|
126
|
+
return loop.run_until_complete(
|
127
|
+
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
128
|
+
)
|
46
129
|
except RuntimeError:
|
47
130
|
# Create a new event loop if one is not already available
|
48
131
|
loop = asyncio.new_event_loop()
|
49
132
|
asyncio.set_event_loop(loop)
|
50
|
-
return loop.run_until_complete(
|
51
|
-
|
52
|
-
|
53
|
-
def repair(bad_json, error_message=""):
|
54
|
-
return repair_wrapper(bad_json, error_message)
|
55
|
-
|
56
|
-
|
57
|
-
# Example usage:
|
58
|
-
# result, success = repair_wrapper('{"name": "John Doe", "age": 30,}') # example bad JSON
|
59
|
-
|
60
|
-
|
61
|
-
# def repair_wrapper(bad_json, error_message=""):
|
62
|
-
# loop = asyncio.get_event_loop()
|
63
|
-
# if loop.is_running():
|
64
|
-
# # Add repair as a task to the running loop
|
65
|
-
# task = loop.create_task(repair(bad_json, error_message))
|
66
|
-
# return task
|
67
|
-
# else:
|
68
|
-
# # Run a new event loop for repair
|
69
|
-
# return loop.run_until_complete(repair(bad_json, error_message))
|
133
|
+
return loop.run_until_complete(
|
134
|
+
async_repair(bad_json, error_message, user_prompt, system_prompt, cache)
|
135
|
+
)
|
70
136
|
|
71
137
|
|
72
|
-
|
73
|
-
|
138
|
+
def repair(
|
139
|
+
bad_json, error_message="", user_prompt=None, system_prompt=None, cache=None
|
140
|
+
):
|
141
|
+
return repair_wrapper(bad_json, error_message, user_prompt, system_prompt, cache)
|
74
142
|
|
75
143
|
|
76
144
|
if __name__ == "__main__":
|
@@ -0,0 +1,65 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Any, Optional, List
|
3
|
+
from edsl.enums import InferenceServiceType
|
4
|
+
|
5
|
+
|
6
|
+
def create_survey(num_questions: int, chained: bool = True, take_scenario=False):
|
7
|
+
from edsl.surveys.Survey import Survey
|
8
|
+
from edsl.questions.QuestionFreeText import QuestionFreeText
|
9
|
+
|
10
|
+
survey = Survey()
|
11
|
+
for i in range(num_questions):
|
12
|
+
if take_scenario:
|
13
|
+
q = QuestionFreeText(
|
14
|
+
question_text=f"XX{i}XX and {{scenario_value }}",
|
15
|
+
question_name=f"question_{i}",
|
16
|
+
)
|
17
|
+
else:
|
18
|
+
q = QuestionFreeText(
|
19
|
+
question_text=f"XX{i}XX", question_name=f"question_{i}"
|
20
|
+
)
|
21
|
+
survey.add_question(q)
|
22
|
+
if i > 0 and chained:
|
23
|
+
survey.add_targeted_memory(f"question_{i}", f"question_{i-1}")
|
24
|
+
return survey
|
25
|
+
|
26
|
+
|
27
|
+
def create_language_model(
|
28
|
+
exception: Exception, fail_at_number: int, never_ending=False
|
29
|
+
):
|
30
|
+
from edsl.language_models.LanguageModel import LanguageModel
|
31
|
+
|
32
|
+
class LanguageModelFromUtilities(LanguageModel):
|
33
|
+
_model_ = "test"
|
34
|
+
_parameters_ = {"temperature": 0.5}
|
35
|
+
_inference_service_ = InferenceServiceType.TEST.value
|
36
|
+
key_sequence = ["message", 0, "text"]
|
37
|
+
usage_sequence = ["usage"]
|
38
|
+
input_token_name = "prompt_tokens"
|
39
|
+
output_token_name = "completion_tokens"
|
40
|
+
_rpm = 1000000000000
|
41
|
+
_tpm = 1000000000000
|
42
|
+
|
43
|
+
async def async_execute_model_call(
|
44
|
+
self,
|
45
|
+
user_prompt: str,
|
46
|
+
system_prompt: str,
|
47
|
+
files_list: Optional[List[Any]] = None,
|
48
|
+
) -> dict[str, Any]:
|
49
|
+
question_number = int(
|
50
|
+
user_prompt.split("XX")[1]
|
51
|
+
) ## grabs the question number from the prompt
|
52
|
+
await asyncio.sleep(0.1)
|
53
|
+
if never_ending: ## you're not going anywhere buddy
|
54
|
+
await asyncio.sleep(float("inf"))
|
55
|
+
if question_number == fail_at_number:
|
56
|
+
if asyncio.iscoroutinefunction(exception):
|
57
|
+
await exception()
|
58
|
+
else:
|
59
|
+
raise exception
|
60
|
+
return {
|
61
|
+
"message": [{"text": "SPAM!"}],
|
62
|
+
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
|
63
|
+
}
|
64
|
+
|
65
|
+
return LanguageModelFromUtilities
|