edsl 0.1.39.dev2__py3-none-any.whl → 0.1.39.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (334) hide show
  1. edsl/Base.py +332 -385
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -57
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +867 -1079
  7. edsl/agents/AgentList.py +413 -551
  8. edsl/agents/Invigilator.py +233 -285
  9. edsl/agents/InvigilatorBase.py +270 -254
  10. edsl/agents/PromptConstructor.py +354 -252
  11. edsl/agents/__init__.py +3 -2
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +157 -177
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -59
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +1028 -1090
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +555 -562
  37. edsl/data/CacheEntry.py +233 -230
  38. edsl/data/CacheHandler.py +149 -170
  39. edsl/data/RemoteCacheSync.py +78 -78
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -5
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -74
  44. edsl/enums.py +175 -195
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -54
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -109
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -29
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -84
  61. edsl/inference_services/AwsBedrock.py +120 -118
  62. edsl/inference_services/AzureAI.py +217 -215
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +148 -139
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -80
  67. edsl/inference_services/InferenceServicesCollection.py +97 -122
  68. edsl/inference_services/MistralAIService.py +123 -120
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -221
  71. edsl/inference_services/PerplexityService.py +163 -160
  72. edsl/inference_services/TestService.py +89 -92
  73. edsl/inference_services/TogetherAIService.py +170 -170
  74. edsl/inference_services/models_available_cache.py +118 -118
  75. edsl/inference_services/rate_limits_cache.py +25 -25
  76. edsl/inference_services/registry.py +41 -41
  77. edsl/inference_services/write_available.py +10 -10
  78. edsl/jobs/Answers.py +56 -43
  79. edsl/jobs/Jobs.py +898 -757
  80. edsl/jobs/JobsChecks.py +147 -172
  81. edsl/jobs/JobsPrompts.py +268 -270
  82. edsl/jobs/JobsRemoteInferenceHandler.py +239 -287
  83. edsl/jobs/__init__.py +1 -1
  84. edsl/jobs/buckets/BucketCollection.py +63 -104
  85. edsl/jobs/buckets/ModelBuckets.py +65 -65
  86. edsl/jobs/buckets/TokenBucket.py +251 -283
  87. edsl/jobs/interviews/Interview.py +661 -358
  88. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  89. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  90. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  91. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  92. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  93. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  94. edsl/jobs/interviews/ReportErrors.py +66 -66
  95. edsl/jobs/interviews/interview_status_enum.py +9 -9
  96. edsl/jobs/runners/JobsRunnerAsyncio.py +466 -421
  97. edsl/jobs/runners/JobsRunnerStatus.py +330 -330
  98. edsl/jobs/tasks/QuestionTaskCreator.py +242 -244
  99. edsl/jobs/tasks/TaskCreators.py +64 -64
  100. edsl/jobs/tasks/TaskHistory.py +450 -449
  101. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  102. edsl/jobs/tasks/task_status_enum.py +163 -161
  103. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  104. edsl/jobs/tokens/TokenUsage.py +34 -34
  105. edsl/language_models/KeyLookup.py +30 -0
  106. edsl/language_models/LanguageModel.py +668 -571
  107. edsl/language_models/ModelList.py +155 -153
  108. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  109. edsl/language_models/__init__.py +3 -2
  110. edsl/language_models/fake_openai_call.py +15 -15
  111. edsl/language_models/fake_openai_service.py +61 -61
  112. edsl/language_models/registry.py +190 -180
  113. edsl/language_models/repair.py +156 -156
  114. edsl/language_models/unused/ReplicateBase.py +83 -0
  115. edsl/language_models/utilities.py +64 -65
  116. edsl/notebooks/Notebook.py +258 -263
  117. edsl/notebooks/__init__.py +1 -1
  118. edsl/prompts/Prompt.py +362 -352
  119. edsl/prompts/__init__.py +2 -2
  120. edsl/questions/AnswerValidatorMixin.py +289 -334
  121. edsl/questions/QuestionBase.py +664 -509
  122. edsl/questions/QuestionBaseGenMixin.py +161 -165
  123. edsl/questions/QuestionBasePromptsMixin.py +217 -221
  124. edsl/questions/QuestionBudget.py +227 -227
  125. edsl/questions/QuestionCheckBox.py +359 -359
  126. edsl/questions/QuestionExtract.py +182 -182
  127. edsl/questions/QuestionFreeText.py +114 -113
  128. edsl/questions/QuestionFunctional.py +166 -166
  129. edsl/questions/QuestionList.py +231 -229
  130. edsl/questions/QuestionMultipleChoice.py +286 -330
  131. edsl/questions/QuestionNumerical.py +153 -151
  132. edsl/questions/QuestionRank.py +324 -314
  133. edsl/questions/Quick.py +41 -41
  134. edsl/questions/RegisterQuestionsMeta.py +71 -71
  135. edsl/questions/ResponseValidatorABC.py +174 -200
  136. edsl/questions/SimpleAskMixin.py +73 -74
  137. edsl/questions/__init__.py +26 -27
  138. edsl/questions/compose_questions.py +98 -98
  139. edsl/questions/decorators.py +21 -21
  140. edsl/questions/derived/QuestionLikertFive.py +76 -76
  141. edsl/questions/derived/QuestionLinearScale.py +87 -90
  142. edsl/questions/derived/QuestionTopK.py +93 -93
  143. edsl/questions/derived/QuestionYesNo.py +82 -82
  144. edsl/questions/descriptors.py +413 -427
  145. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  146. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  147. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  148. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  149. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  150. edsl/questions/prompt_templates/question_list.jinja +17 -17
  151. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  152. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  153. edsl/questions/question_registry.py +177 -177
  154. edsl/questions/settings.py +12 -12
  155. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  157. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  158. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  159. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  160. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  161. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  162. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  163. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  164. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  165. edsl/questions/templates/list/question_presentation.jinja +5 -5
  166. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  167. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  168. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  169. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  170. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  171. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  172. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  173. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  174. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  176. edsl/results/CSSParameterizer.py +108 -108
  177. edsl/results/Dataset.py +424 -587
  178. edsl/results/DatasetExportMixin.py +731 -653
  179. edsl/results/DatasetTree.py +275 -295
  180. edsl/results/Result.py +465 -451
  181. edsl/results/Results.py +1165 -1172
  182. edsl/results/ResultsDBMixin.py +238 -0
  183. edsl/results/ResultsExportMixin.py +43 -45
  184. edsl/results/ResultsFetchMixin.py +33 -33
  185. edsl/results/ResultsGGMixin.py +121 -121
  186. edsl/results/ResultsToolsMixin.py +98 -98
  187. edsl/results/Selector.py +135 -145
  188. edsl/results/TableDisplay.py +198 -125
  189. edsl/results/__init__.py +2 -2
  190. edsl/results/table_display.css +77 -77
  191. edsl/results/tree_explore.py +115 -115
  192. edsl/scenarios/FileStore.py +632 -511
  193. edsl/scenarios/Scenario.py +601 -498
  194. edsl/scenarios/ScenarioHtmlMixin.py +64 -65
  195. edsl/scenarios/ScenarioJoin.py +127 -131
  196. edsl/scenarios/ScenarioList.py +1287 -1430
  197. edsl/scenarios/ScenarioListExportMixin.py +52 -45
  198. edsl/scenarios/ScenarioListPdfMixin.py +261 -239
  199. edsl/scenarios/__init__.py +4 -3
  200. edsl/shared.py +1 -1
  201. edsl/study/ObjectEntry.py +173 -173
  202. edsl/study/ProofOfWork.py +113 -113
  203. edsl/study/SnapShot.py +80 -80
  204. edsl/study/Study.py +528 -521
  205. edsl/study/__init__.py +4 -4
  206. edsl/surveys/DAG.py +148 -148
  207. edsl/surveys/Memory.py +31 -31
  208. edsl/surveys/MemoryPlan.py +244 -244
  209. edsl/surveys/Rule.py +326 -327
  210. edsl/surveys/RuleCollection.py +387 -385
  211. edsl/surveys/Survey.py +1801 -1229
  212. edsl/surveys/SurveyCSS.py +261 -273
  213. edsl/surveys/SurveyExportMixin.py +259 -259
  214. edsl/surveys/{SurveyFlowVisualization.py → SurveyFlowVisualizationMixin.py} +179 -181
  215. edsl/surveys/SurveyQualtricsImport.py +284 -284
  216. edsl/surveys/__init__.py +3 -5
  217. edsl/surveys/base.py +53 -53
  218. edsl/surveys/descriptors.py +56 -60
  219. edsl/surveys/instructions/ChangeInstruction.py +49 -48
  220. edsl/surveys/instructions/Instruction.py +65 -56
  221. edsl/surveys/instructions/InstructionCollection.py +77 -82
  222. edsl/templates/error_reporting/base.html +23 -23
  223. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  224. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  225. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  226. edsl/templates/error_reporting/interview_details.html +115 -115
  227. edsl/templates/error_reporting/interviews.html +19 -19
  228. edsl/templates/error_reporting/overview.html +4 -4
  229. edsl/templates/error_reporting/performance_plot.html +1 -1
  230. edsl/templates/error_reporting/report.css +73 -73
  231. edsl/templates/error_reporting/report.html +117 -117
  232. edsl/templates/error_reporting/report.js +25 -25
  233. edsl/tools/__init__.py +1 -1
  234. edsl/tools/clusters.py +192 -192
  235. edsl/tools/embeddings.py +27 -27
  236. edsl/tools/embeddings_plotting.py +118 -118
  237. edsl/tools/plotting.py +112 -112
  238. edsl/tools/summarize.py +18 -18
  239. edsl/utilities/SystemInfo.py +28 -28
  240. edsl/utilities/__init__.py +22 -22
  241. edsl/utilities/ast_utilities.py +25 -25
  242. edsl/utilities/data/Registry.py +6 -6
  243. edsl/utilities/data/__init__.py +1 -1
  244. edsl/utilities/data/scooter_results.json +1 -1
  245. edsl/utilities/decorators.py +77 -77
  246. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  247. edsl/utilities/interface.py +627 -627
  248. edsl/utilities/naming_utilities.py +263 -263
  249. edsl/utilities/repair_functions.py +28 -28
  250. edsl/utilities/restricted_python.py +70 -70
  251. edsl/utilities/utilities.py +424 -436
  252. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev3.dist-info}/LICENSE +21 -21
  253. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev3.dist-info}/METADATA +10 -12
  254. edsl-0.1.39.dev3.dist-info/RECORD +277 -0
  255. edsl/agents/QuestionInstructionPromptBuilder.py +0 -128
  256. edsl/agents/QuestionOptionProcessor.py +0 -172
  257. edsl/agents/QuestionTemplateReplacementsBuilder.py +0 -137
  258. edsl/coop/CoopFunctionsMixin.py +0 -15
  259. edsl/coop/ExpectedParrotKeyHandler.py +0 -125
  260. edsl/exceptions/inference_services.py +0 -5
  261. edsl/inference_services/AvailableModelCacheHandler.py +0 -184
  262. edsl/inference_services/AvailableModelFetcher.py +0 -209
  263. edsl/inference_services/ServiceAvailability.py +0 -135
  264. edsl/inference_services/data_structures.py +0 -62
  265. edsl/jobs/AnswerQuestionFunctionConstructor.py +0 -188
  266. edsl/jobs/FetchInvigilator.py +0 -40
  267. edsl/jobs/InterviewTaskManager.py +0 -98
  268. edsl/jobs/InterviewsConstructor.py +0 -48
  269. edsl/jobs/JobsComponentConstructor.py +0 -189
  270. edsl/jobs/JobsRemoteInferenceLogger.py +0 -239
  271. edsl/jobs/RequestTokenEstimator.py +0 -30
  272. edsl/jobs/buckets/TokenBucketAPI.py +0 -211
  273. edsl/jobs/buckets/TokenBucketClient.py +0 -191
  274. edsl/jobs/decorators.py +0 -35
  275. edsl/jobs/jobs_status_enums.py +0 -9
  276. edsl/jobs/loggers/HTMLTableJobLogger.py +0 -304
  277. edsl/language_models/ComputeCost.py +0 -63
  278. edsl/language_models/PriceManager.py +0 -127
  279. edsl/language_models/RawResponseHandler.py +0 -106
  280. edsl/language_models/ServiceDataSources.py +0 -0
  281. edsl/language_models/key_management/KeyLookup.py +0 -63
  282. edsl/language_models/key_management/KeyLookupBuilder.py +0 -273
  283. edsl/language_models/key_management/KeyLookupCollection.py +0 -38
  284. edsl/language_models/key_management/__init__.py +0 -0
  285. edsl/language_models/key_management/models.py +0 -131
  286. edsl/notebooks/NotebookToLaTeX.py +0 -142
  287. edsl/questions/ExceptionExplainer.py +0 -77
  288. edsl/questions/HTMLQuestion.py +0 -103
  289. edsl/questions/LoopProcessor.py +0 -149
  290. edsl/questions/QuestionMatrix.py +0 -265
  291. edsl/questions/ResponseValidatorFactory.py +0 -28
  292. edsl/questions/templates/matrix/__init__.py +0 -1
  293. edsl/questions/templates/matrix/answering_instructions.jinja +0 -5
  294. edsl/questions/templates/matrix/question_presentation.jinja +0 -20
  295. edsl/results/MarkdownToDocx.py +0 -122
  296. edsl/results/MarkdownToPDF.py +0 -111
  297. edsl/results/TextEditor.py +0 -50
  298. edsl/results/smart_objects.py +0 -96
  299. edsl/results/table_data_class.py +0 -12
  300. edsl/results/table_renderers.py +0 -118
  301. edsl/scenarios/ConstructDownloadLink.py +0 -109
  302. edsl/scenarios/DirectoryScanner.py +0 -96
  303. edsl/scenarios/DocumentChunker.py +0 -102
  304. edsl/scenarios/DocxScenario.py +0 -16
  305. edsl/scenarios/PdfExtractor.py +0 -40
  306. edsl/scenarios/ScenarioSelector.py +0 -156
  307. edsl/scenarios/file_methods.py +0 -85
  308. edsl/scenarios/handlers/__init__.py +0 -13
  309. edsl/scenarios/handlers/csv.py +0 -38
  310. edsl/scenarios/handlers/docx.py +0 -76
  311. edsl/scenarios/handlers/html.py +0 -37
  312. edsl/scenarios/handlers/json.py +0 -111
  313. edsl/scenarios/handlers/latex.py +0 -5
  314. edsl/scenarios/handlers/md.py +0 -51
  315. edsl/scenarios/handlers/pdf.py +0 -68
  316. edsl/scenarios/handlers/png.py +0 -39
  317. edsl/scenarios/handlers/pptx.py +0 -105
  318. edsl/scenarios/handlers/py.py +0 -294
  319. edsl/scenarios/handlers/sql.py +0 -313
  320. edsl/scenarios/handlers/sqlite.py +0 -149
  321. edsl/scenarios/handlers/txt.py +0 -33
  322. edsl/surveys/ConstructDAG.py +0 -92
  323. edsl/surveys/EditSurvey.py +0 -221
  324. edsl/surveys/InstructionHandler.py +0 -100
  325. edsl/surveys/MemoryManagement.py +0 -72
  326. edsl/surveys/RuleManager.py +0 -172
  327. edsl/surveys/Simulator.py +0 -75
  328. edsl/surveys/SurveyToApp.py +0 -141
  329. edsl/utilities/PrettyList.py +0 -56
  330. edsl/utilities/is_notebook.py +0 -18
  331. edsl/utilities/is_valid_variable_name.py +0 -11
  332. edsl/utilities/remove_edsl_version.py +0 -24
  333. edsl-0.1.39.dev2.dist-info/RECORD +0 -352
  334. {edsl-0.1.39.dev2.dist-info → edsl-0.1.39.dev3.dist-info}/WHEEL +0 -0
@@ -1,209 +0,0 @@
1
- from typing import Any, List, Tuple, Optional, Dict, TYPE_CHECKING, Union, Generator
2
- from concurrent.futures import ThreadPoolExecutor, as_completed
3
- from collections import UserList
4
-
5
- from edsl.inference_services.ServiceAvailability import ServiceAvailability
6
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
- from edsl.inference_services.data_structures import ModelNamesList
8
- from edsl.enums import InferenceServiceLiteral
9
-
10
- from edsl.inference_services.data_structures import LanguageModelInfo
11
- from edsl.inference_services.AvailableModelCacheHandler import (
12
- AvailableModelCacheHandler,
13
- )
14
-
15
-
16
- from edsl.inference_services.data_structures import AvailableModels
17
-
18
-
19
- class AvailableModelFetcher:
20
- """Fetches available models from the various services with JSON caching."""
21
-
22
- service_availability = ServiceAvailability()
23
- CACHE_VALIDITY_HOURS = 48 # Cache validity period in hours
24
-
25
- def __init__(
26
- self,
27
- services: List["InferenceServiceABC"],
28
- added_models: Dict[str, List[str]],
29
- verbose: bool = False,
30
- use_cache: bool = True,
31
- ):
32
- self.services = services
33
- self.added_models = added_models
34
- self._service_map = {
35
- service._inference_service_: service for service in services
36
- }
37
- self.verbose = verbose
38
- if use_cache:
39
- self.cache_handler = AvailableModelCacheHandler()
40
- else:
41
- self.cache_handler = None
42
-
43
- @property
44
- def num_cache_entries(self):
45
- return self.cache_handler.num_cache_entries
46
-
47
- @property
48
- def path_to_db(self):
49
- return self.cache_handler.path_to_db
50
-
51
- def reset_cache(self):
52
- if self.cache_handler:
53
- self.cache_handler.reset_cache()
54
-
55
- def available(
56
- self,
57
- service: Optional[InferenceServiceABC] = None,
58
- force_refresh: bool = False,
59
- ) -> List[LanguageModelInfo]:
60
- """
61
- Get available models from all services, using cached data when available.
62
-
63
- :param service: Optional[InferenceServiceABC] - If specified, only fetch models for this service.
64
-
65
- >>> from edsl.inference_services.OpenAIService import OpenAIService
66
- >>> af = AvailableModelFetcher([OpenAIService()], {})
67
- >>> af.available(service="openai")
68
- [LanguageModelInfo(model_name='...', service_name='openai'), ...]
69
-
70
- Returns a list of [model, service_name, index] entries.
71
- """
72
-
73
- if service: # they passed a specific service
74
- matching_models, _ = self.get_available_models_by_service(
75
- service=service, force_refresh=force_refresh
76
- )
77
- return matching_models
78
-
79
- # Nope, we need to fetch them all
80
- all_models = self._get_all_models()
81
-
82
- # if self.cache_handler:
83
- # self.cache_handler.add_models_to_cache(all_models)
84
-
85
- return all_models
86
-
87
- def get_available_models_by_service(
88
- self,
89
- service: Union["InferenceServiceABC", InferenceServiceLiteral],
90
- force_refresh: bool = False,
91
- ) -> Tuple[AvailableModels, InferenceServiceLiteral]:
92
- """Get models for a single service.
93
-
94
- :param service: InferenceServiceABC - e.g., OpenAIService or "openai"
95
- :return: Tuple[List[LanguageModelInfo], InferenceServiceLiteral]
96
- """
97
- if isinstance(service, str):
98
- service = self._fetch_service_by_service_name(service)
99
-
100
- if not force_refresh:
101
- models_from_cache = self.cache_handler.models(
102
- service=service._inference_service_
103
- )
104
- if self.verbose:
105
- print(
106
- "Searching cache for models with service name:",
107
- service._inference_service_,
108
- )
109
- print("Got models from cache:", models_from_cache)
110
- else:
111
- models_from_cache = None
112
-
113
- if models_from_cache:
114
- # print(f"Models from cache for {service}: {models_from_cache}")
115
- # print(hasattr(models_from_cache[0], "service_name"))
116
- return models_from_cache, service._inference_service_
117
- else:
118
- return self.get_available_models_by_service_fresh(service)
119
-
120
- def get_available_models_by_service_fresh(
121
- self, service: Union["InferenceServiceABC", InferenceServiceLiteral]
122
- ) -> Tuple[AvailableModels, InferenceServiceLiteral]:
123
- """Get models for a single service. This method always fetches fresh data.
124
-
125
- :param service: InferenceServiceABC - e.g., OpenAIService or "openai"
126
- :return: Tuple[List[LanguageModelInfo], InferenceServiceLiteral]
127
- """
128
- if isinstance(service, str):
129
- service = self._fetch_service_by_service_name(service)
130
-
131
- service_models: ModelNamesList = (
132
- self.service_availability.get_service_available(service, warn=False)
133
- )
134
- service_name = service._inference_service_
135
-
136
- models_list = AvailableModels(
137
- [
138
- LanguageModelInfo(
139
- model_name=model_name,
140
- service_name=service_name,
141
- )
142
- for model_name in service_models
143
- ]
144
- )
145
- self.cache_handler.add_models_to_cache(models_list) # update the cache
146
- return models_list, service_name
147
-
148
- def _fetch_service_by_service_name(
149
- self, service_name: InferenceServiceLiteral
150
- ) -> "InferenceServiceABC":
151
- """The service name is the _inference_service_ attribute of the service."""
152
- if service_name in self._service_map:
153
- return self._service_map[service_name]
154
- raise ValueError(f"Service {service_name} not found")
155
-
156
- def _get_all_models(self, force_refresh=False) -> List[LanguageModelInfo]:
157
- all_models = []
158
- with ThreadPoolExecutor(max_workers=min(len(self.services), 10)) as executor:
159
- future_to_service = {
160
- executor.submit(
161
- self.get_available_models_by_service, service, force_refresh
162
- ): service
163
- for service in self.services
164
- }
165
-
166
- for future in as_completed(future_to_service):
167
- try:
168
- models, service_name = future.result()
169
- all_models.extend(models)
170
-
171
- # Add any additional models for this service
172
- for model in self.added_models.get(service_name, []):
173
- all_models.append(
174
- LanguageModelInfo(
175
- model_name=model, service_name=service_name
176
- )
177
- )
178
-
179
- except Exception as exc:
180
- print(f"Service query failed: {exc}")
181
- continue
182
-
183
- return AvailableModels(all_models)
184
-
185
-
186
- def main():
187
- from edsl.inference_services.OpenAIService import OpenAIService
188
-
189
- af = AvailableModelFetcher([OpenAIService()], {}, verbose=True)
190
- # print(af.available(service="openai"))
191
- all_models = AvailableModelFetcher([OpenAIService()], {})._get_all_models(
192
- force_refresh=True
193
- )
194
- print(all_models)
195
-
196
-
197
- if __name__ == "__main__":
198
- import doctest
199
-
200
- doctest.testmod(optionflags=doctest.ELLIPSIS)
201
- # main()
202
-
203
- # from edsl.inference_services.OpenAIService import OpenAIService
204
-
205
- # af = AvailableModelFetcher([OpenAIService()], {}, verbose=True)
206
- # # print(af.available(service="openai"))
207
-
208
- # all_models = AvailableModelFetcher([OpenAIService()], {})._get_all_models()
209
- # print(all_models)
@@ -1,135 +0,0 @@
1
- from enum import Enum
2
- from typing import List, Optional, TYPE_CHECKING
3
- from functools import partial
4
- import warnings
5
-
6
- from edsl.inference_services.data_structures import AvailableModels, ModelNamesList
7
-
8
- if TYPE_CHECKING:
9
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
10
-
11
-
12
- class ModelSource(Enum):
13
- LOCAL = "local"
14
- COOP = "coop"
15
- CACHE = "cache"
16
-
17
-
18
- class ServiceAvailability:
19
- """This class is responsible for fetching the available models from different sources."""
20
-
21
- _coop_model_list = None
22
-
23
- def __init__(self, source_order: Optional[List[ModelSource]] = None):
24
- """
25
- Initialize with custom source order.
26
- Default order is LOCAL -> COOP -> CACHE
27
- """
28
- self.source_order = source_order or [
29
- ModelSource.LOCAL,
30
- ModelSource.COOP,
31
- ModelSource.CACHE,
32
- ]
33
-
34
- # Map sources to their fetch functions
35
- self._source_fetchers = {
36
- ModelSource.LOCAL: self._fetch_from_local_service,
37
- ModelSource.COOP: self._fetch_from_coop,
38
- ModelSource.CACHE: self._fetch_from_cache,
39
- }
40
-
41
- @classmethod
42
- def models_from_coop(cls) -> AvailableModels:
43
- if not cls._coop_model_list:
44
- from edsl.coop.coop import Coop
45
-
46
- c = Coop()
47
- coop_model_list = c.fetch_models()
48
- cls._coop_model_list = coop_model_list
49
- return cls._coop_model_list
50
-
51
- def get_service_available(
52
- self, service: "InferenceServiceABC", warn: bool = False
53
- ) -> ModelNamesList:
54
- """
55
- Try to fetch available models from sources in specified order.
56
- Returns first successful result.
57
- """
58
- last_error = None
59
-
60
- for source in self.source_order:
61
- try:
62
- fetch_func = partial(self._source_fetchers[source], service)
63
- result = fetch_func()
64
-
65
- # Cache successful result
66
- service._models_list_cache = result
67
- return result
68
-
69
- except Exception as e:
70
- last_error = e
71
- if warn:
72
- self._warn_source_failed(service, source)
73
- continue
74
-
75
- # If we get here, all sources failed
76
- raise RuntimeError(
77
- f"All sources failed to fetch models. Last error: {last_error}"
78
- )
79
-
80
- @staticmethod
81
- def _fetch_from_local_service(service: "InferenceServiceABC") -> ModelNamesList:
82
- """Attempt to fetch models directly from the service."""
83
- return service.available()
84
-
85
- @classmethod
86
- def _fetch_from_coop(cls, service: "InferenceServiceABC") -> ModelNamesList:
87
- """Fetch models from Coop."""
88
- models_from_coop = cls.models_from_coop()
89
- return models_from_coop.get(service._inference_service_, [])
90
-
91
- @staticmethod
92
- def _fetch_from_cache(service: "InferenceServiceABC") -> ModelNamesList:
93
- """Fetch models from local cache."""
94
- from edsl.inference_services.models_available_cache import models_available
95
-
96
- return models_available.get(service._inference_service_, [])
97
-
98
- def _warn_source_failed(self, service: "InferenceServiceABC", source: ModelSource):
99
- """Display appropriate warning message based on failed source."""
100
- messages = {
101
- ModelSource.LOCAL: f"""Error getting models for {service._inference_service_}.
102
- Check that you have properly stored your Expected Parrot API key and activated remote inference,
103
- or stored your own API keys for the language models that you want to use.
104
- See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
105
- Trying next source.""",
106
- ModelSource.COOP: f"Error getting models from Coop for {service._inference_service_}. Trying next source.",
107
- ModelSource.CACHE: f"Error getting models from cache for {service._inference_service_}.",
108
- }
109
- warnings.warn(messages[source], UserWarning)
110
-
111
-
112
- if __name__ == "__main__":
113
- # sa = ServiceAvailability()
114
- # models_from_coop = sa.models_from_coop()
115
- # print(models_from_coop)
116
- from edsl.inference_services.OpenAIService import OpenAIService
117
-
118
- openai_models = ServiceAvailability._fetch_from_local_service(OpenAIService())
119
- print(openai_models)
120
-
121
- # Example usage:
122
- """
123
- # Default order (LOCAL -> COOP -> CACHE)
124
- availability = ServiceAvailability()
125
-
126
- # Custom order (COOP -> LOCAL -> CACHE)
127
- availability_coop_first = ServiceAvailability([
128
- ModelSource.COOP,
129
- ModelSource.LOCAL,
130
- ModelSource.CACHE
131
- ])
132
-
133
- # Get available models using custom order
134
- models = availability_coop_first.get_service_available(service, warn=True)
135
- """
@@ -1,62 +0,0 @@
1
- from collections import UserDict, defaultdict, UserList
2
- from typing import Union
3
- from edsl.enums import InferenceServiceLiteral
4
- from dataclasses import dataclass
5
-
6
-
7
- @dataclass
8
- class LanguageModelInfo:
9
- model_name: str
10
- service_name: str
11
-
12
- def __getitem__(self, key: int) -> str:
13
- import warnings
14
-
15
- warnings.warn(
16
- "Accessing LanguageModelInfo via index is deprecated. "
17
- "Please use .model_name, .service_name, or .index attributes instead.",
18
- DeprecationWarning,
19
- stacklevel=2,
20
- )
21
-
22
- if key == 0:
23
- return self.model_name
24
- elif key == 1:
25
- return self.service_name
26
- else:
27
- raise IndexError("Index out of range")
28
-
29
-
30
- class ModelNamesList(UserList):
31
- pass
32
-
33
-
34
- class AvailableModels(UserList):
35
- def __init__(self, data: list) -> None:
36
- super().__init__(data)
37
-
38
- def __contains__(self, model_name: str) -> bool:
39
- for model_entry in self:
40
- if model_entry.model_name == model_name:
41
- return True
42
- return False
43
-
44
-
45
- class ServiceToModelsMapping(UserDict):
46
- def __init__(self, data: dict) -> None:
47
- super().__init__(data)
48
-
49
- @property
50
- def service_names(self) -> list[str]:
51
- return list(self.data.keys())
52
-
53
- def _validate_service_names(self):
54
- for service in self.service_names:
55
- if service not in InferenceServiceLiteral:
56
- raise ValueError(f"Invalid service name: {service}")
57
-
58
- def model_to_services(self) -> dict:
59
- self._model_to_service = defaultdict(list)
60
- for service, models in self.data.items():
61
- for model in models:
62
- self._model_to_service[model].append(service)
@@ -1,188 +0,0 @@
1
- import copy
2
- import asyncio
3
-
4
- from typing import Union, Type, Callable, TYPE_CHECKING
5
-
6
- if TYPE_CHECKING:
7
- from edsl.questions.QuestionBase import QuestionBase
8
-
9
- from edsl.surveys.base import EndOfSurvey
10
- from edsl.jobs.tasks.task_status_enum import TaskStatus
11
-
12
- from edsl.jobs.FetchInvigilator import FetchInvigilator
13
- from edsl.exceptions.language_models import LanguageModelNoResponseError
14
- from edsl.exceptions.questions import QuestionAnswerValidationError
15
- from edsl.data_transfer_models import AgentResponseDict, EDSLResultObjectInput
16
-
17
- from edsl.jobs.Answers import Answers
18
-
19
-
20
- class AnswerQuestionFunctionConstructor:
21
- def __init__(self, interview):
22
- self.interview = interview
23
- self.had_language_model_no_response_error = False
24
- self.question_index = self.interview.to_index
25
-
26
- self.skip_function: Callable = (
27
- self.interview.survey.rule_collection.skip_question_before_running
28
- )
29
-
30
- def _combined_answers(self) -> Answers:
31
- return self.answers | self.interview.scenario | self.interview.agent["traits"]
32
-
33
- @property
34
- def answers(self) -> Answers:
35
- return self.interview.answers
36
-
37
- def _skip_this_question(self, current_question: "QuestionBase") -> bool:
38
- current_question_index = self.question_index[current_question.question_name]
39
- combined_answers = self._combined_answers()
40
- return self.skip_function(current_question_index, combined_answers)
41
-
42
- def _handle_exception(
43
- self, e: Exception, invigilator: "InvigilatorBase", task=None
44
- ):
45
- from edsl.jobs.interviews.InterviewExceptionEntry import InterviewExceptionEntry
46
-
47
- answers = copy.copy(self.answers) # copy to freeze the answers here for logging
48
- exception_entry = InterviewExceptionEntry(
49
- exception=e,
50
- invigilator=invigilator,
51
- answers=answers,
52
- )
53
- if task:
54
- task.task_status = TaskStatus.FAILED
55
- self.interview.exceptions.add(
56
- invigilator.question.question_name, exception_entry
57
- )
58
-
59
- if self.interview.raise_validation_errors and isinstance(
60
- e, QuestionAnswerValidationError
61
- ):
62
- raise e
63
-
64
- stop_on_exception = getattr(self.interview, "stop_on_exception", False)
65
- if stop_on_exception:
66
- raise e
67
-
68
- def _cancel_skipped_questions(self, current_question: "QuestionBase") -> None:
69
- current_question_index: int = self.interview.to_index[
70
- current_question.question_name
71
- ]
72
- answers = (
73
- self.answers | self.interview.scenario | self.interview.agent["traits"]
74
- )
75
-
76
- # Get the index of the next question, which could also be the end of the survey
77
- next_question: Union[
78
- int, EndOfSurvey
79
- ] = self.interview.survey.rule_collection.next_question(
80
- q_now=current_question_index,
81
- answers=answers,
82
- )
83
-
84
- def cancel_between(start, end):
85
- for i in range(start, end):
86
- self.interview.tasks[i].cancel()
87
-
88
- if (next_question_index := next_question.next_q) == EndOfSurvey:
89
- cancel_between(
90
- current_question_index + 1, len(self.interview.survey.questions)
91
- )
92
- return
93
-
94
- if next_question_index > (current_question_index + 1):
95
- cancel_between(current_question_index + 1, next_question_index)
96
-
97
- def __call__(self):
98
- from edsl.config import CONFIG
99
-
100
- EDSL_BACKOFF_START_SEC = float(CONFIG.get("EDSL_BACKOFF_START_SEC"))
101
- EDSL_BACKOFF_MAX_SEC = float(CONFIG.get("EDSL_BACKOFF_MAX_SEC"))
102
- EDSL_MAX_ATTEMPTS = int(CONFIG.get("EDSL_MAX_ATTEMPTS"))
103
-
104
- from tenacity import (
105
- retry,
106
- stop_after_attempt,
107
- wait_exponential,
108
- retry_if_exception_type,
109
- RetryError,
110
- )
111
-
112
- async def answer_question_and_record_task(
113
- *,
114
- question: "QuestionBase",
115
- task=None,
116
- ) -> "AgentResponseDict":
117
- @retry(
118
- stop=stop_after_attempt(EDSL_MAX_ATTEMPTS),
119
- wait=wait_exponential(
120
- multiplier=EDSL_BACKOFF_START_SEC, max=EDSL_BACKOFF_MAX_SEC
121
- ),
122
- retry=retry_if_exception_type(LanguageModelNoResponseError),
123
- reraise=True,
124
- )
125
- async def attempt_answer():
126
- invigilator = FetchInvigilator(self.interview)(question)
127
-
128
- if self._skip_this_question(question):
129
- return invigilator.get_failed_task_result(
130
- failure_reason="Question skipped."
131
- )
132
-
133
- try:
134
- response: EDSLResultObjectInput = (
135
- await invigilator.async_answer_question()
136
- )
137
- if response.validated:
138
- self.answers.add_answer(response=response, question=question)
139
- self._cancel_skipped_questions(question)
140
- else:
141
- if (
142
- hasattr(response, "exception_occurred")
143
- and response.exception_occurred
144
- ):
145
- raise response.exception_occurred
146
-
147
- except QuestionAnswerValidationError as e:
148
- self._handle_exception(e, invigilator, task)
149
- return invigilator.get_failed_task_result(
150
- failure_reason="Question answer validation failed."
151
- )
152
-
153
- except asyncio.TimeoutError as e:
154
- self._handle_exception(e, invigilator, task)
155
- had_language_model_no_response_error = True
156
- raise LanguageModelNoResponseError(
157
- f"Language model timed out for question '{question.question_name}.'"
158
- )
159
-
160
- except Exception as e:
161
- self._handle_exception(e, invigilator, task)
162
-
163
- if "response" not in locals():
164
- had_language_model_no_response_error = True
165
- raise LanguageModelNoResponseError(
166
- f"Language model did not return a response for question '{question.question_name}.'"
167
- )
168
-
169
- if (
170
- question.question_name in self.interview.exceptions
171
- and had_language_model_no_response_error
172
- ):
173
- self.interview.exceptions.record_fixed_question(
174
- question.question_name
175
- )
176
-
177
- return response
178
-
179
- try:
180
- return await attempt_answer()
181
- except RetryError as retry_error:
182
- original_error = retry_error.last_attempt.exception()
183
- self._handle_exception(
184
- original_error, FetchInvigilator(self.interview)(question), task
185
- )
186
- raise original_error
187
-
188
- return answer_question_and_record_task
@@ -1,40 +0,0 @@
1
- from typing import List, Dict, Any, Optional, TYPE_CHECKING
2
-
3
- if TYPE_CHECKING:
4
- from edsl.questions.QuestionBase import QuestionBase
5
- from edsl.agents.InvigilatorBase import InvigilatorBase
6
-
7
-
8
- class FetchInvigilator:
9
- def __init__(self, interview, current_answers: Optional[Dict[str, Any]] = None):
10
- self.interview = interview
11
- if current_answers is None:
12
- self.current_answers = self.interview.answers
13
- else:
14
- self.current_answers = current_answers
15
-
16
- def get_invigilator(self, question: "QuestionBase") -> "InvigilatorBase":
17
- """Return an invigilator for the given question.
18
-
19
- :param question: the question to be answered
20
- :param debug: whether to use debug mode, in which case `InvigilatorDebug` is used.
21
- """
22
-
23
- invigilator = self.interview.agent.create_invigilator(
24
- question=question,
25
- scenario=self.interview.scenario,
26
- model=self.interview.model,
27
- # debug=False,
28
- survey=self.interview.survey,
29
- memory_plan=self.interview.survey.memory_plan,
30
- current_answers=self.current_answers, # not yet known
31
- iteration=self.interview.iteration,
32
- cache=self.interview.cache,
33
- # sidecar_model=self.interview.sidecar_model,
34
- raise_validation_errors=self.interview.raise_validation_errors,
35
- )
36
- """Return an invigilator for the given question."""
37
- return invigilator
38
-
39
- def __call__(self, question):
40
- return self.get_invigilator(question)