edsl 0.1.39.dev3__py3-none-any.whl → 0.1.39.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (344) hide show
  1. edsl/Base.py +413 -332
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +57 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +1071 -867
  7. edsl/agents/AgentList.py +551 -413
  8. edsl/agents/Invigilator.py +284 -233
  9. edsl/agents/InvigilatorBase.py +257 -270
  10. edsl/agents/PromptConstructor.py +272 -354
  11. edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
  12. edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
  13. edsl/agents/__init__.py +2 -3
  14. edsl/agents/descriptors.py +99 -99
  15. edsl/agents/prompt_helpers.py +129 -129
  16. edsl/agents/question_option_processor.py +172 -0
  17. edsl/auto/AutoStudy.py +130 -117
  18. edsl/auto/StageBase.py +243 -230
  19. edsl/auto/StageGenerateSurvey.py +178 -178
  20. edsl/auto/StageLabelQuestions.py +125 -125
  21. edsl/auto/StagePersona.py +61 -61
  22. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  23. edsl/auto/StagePersonaDimensionValues.py +74 -74
  24. edsl/auto/StagePersonaDimensions.py +69 -69
  25. edsl/auto/StageQuestions.py +74 -73
  26. edsl/auto/SurveyCreatorPipeline.py +21 -21
  27. edsl/auto/utilities.py +218 -224
  28. edsl/base/Base.py +279 -279
  29. edsl/config.py +177 -157
  30. edsl/conversation/Conversation.py +290 -290
  31. edsl/conversation/car_buying.py +59 -58
  32. edsl/conversation/chips.py +95 -95
  33. edsl/conversation/mug_negotiation.py +81 -81
  34. edsl/conversation/next_speaker_utilities.py +93 -93
  35. edsl/coop/CoopFunctionsMixin.py +15 -0
  36. edsl/coop/ExpectedParrotKeyHandler.py +125 -0
  37. edsl/coop/PriceFetcher.py +54 -54
  38. edsl/coop/__init__.py +2 -2
  39. edsl/coop/coop.py +1106 -1028
  40. edsl/coop/utils.py +131 -131
  41. edsl/data/Cache.py +573 -555
  42. edsl/data/CacheEntry.py +230 -233
  43. edsl/data/CacheHandler.py +168 -149
  44. edsl/data/RemoteCacheSync.py +186 -78
  45. edsl/data/SQLiteDict.py +292 -292
  46. edsl/data/__init__.py +5 -4
  47. edsl/data/hack.py +10 -0
  48. edsl/data/orm.py +10 -10
  49. edsl/data_transfer_models.py +74 -73
  50. edsl/enums.py +202 -175
  51. edsl/exceptions/BaseException.py +21 -21
  52. edsl/exceptions/__init__.py +54 -54
  53. edsl/exceptions/agents.py +54 -42
  54. edsl/exceptions/cache.py +5 -5
  55. edsl/exceptions/configuration.py +16 -16
  56. edsl/exceptions/coop.py +10 -10
  57. edsl/exceptions/data.py +14 -14
  58. edsl/exceptions/general.py +34 -34
  59. edsl/exceptions/inference_services.py +5 -0
  60. edsl/exceptions/jobs.py +33 -33
  61. edsl/exceptions/language_models.py +63 -63
  62. edsl/exceptions/prompts.py +15 -15
  63. edsl/exceptions/questions.py +109 -91
  64. edsl/exceptions/results.py +29 -29
  65. edsl/exceptions/scenarios.py +29 -22
  66. edsl/exceptions/surveys.py +37 -37
  67. edsl/inference_services/AnthropicService.py +106 -87
  68. edsl/inference_services/AvailableModelCacheHandler.py +184 -0
  69. edsl/inference_services/AvailableModelFetcher.py +215 -0
  70. edsl/inference_services/AwsBedrock.py +118 -120
  71. edsl/inference_services/AzureAI.py +215 -217
  72. edsl/inference_services/DeepInfraService.py +18 -18
  73. edsl/inference_services/GoogleService.py +143 -148
  74. edsl/inference_services/GroqService.py +20 -20
  75. edsl/inference_services/InferenceServiceABC.py +80 -147
  76. edsl/inference_services/InferenceServicesCollection.py +138 -97
  77. edsl/inference_services/MistralAIService.py +120 -123
  78. edsl/inference_services/OllamaService.py +18 -18
  79. edsl/inference_services/OpenAIService.py +236 -224
  80. edsl/inference_services/PerplexityService.py +160 -163
  81. edsl/inference_services/ServiceAvailability.py +135 -0
  82. edsl/inference_services/TestService.py +90 -89
  83. edsl/inference_services/TogetherAIService.py +172 -170
  84. edsl/inference_services/data_structures.py +134 -0
  85. edsl/inference_services/models_available_cache.py +118 -118
  86. edsl/inference_services/rate_limits_cache.py +25 -25
  87. edsl/inference_services/registry.py +41 -41
  88. edsl/inference_services/write_available.py +10 -10
  89. edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
  90. edsl/jobs/Answers.py +43 -56
  91. edsl/jobs/FetchInvigilator.py +47 -0
  92. edsl/jobs/InterviewTaskManager.py +98 -0
  93. edsl/jobs/InterviewsConstructor.py +50 -0
  94. edsl/jobs/Jobs.py +823 -898
  95. edsl/jobs/JobsChecks.py +172 -147
  96. edsl/jobs/JobsComponentConstructor.py +189 -0
  97. edsl/jobs/JobsPrompts.py +270 -268
  98. edsl/jobs/JobsRemoteInferenceHandler.py +311 -239
  99. edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
  100. edsl/jobs/RequestTokenEstimator.py +30 -0
  101. edsl/jobs/__init__.py +1 -1
  102. edsl/jobs/async_interview_runner.py +138 -0
  103. edsl/jobs/buckets/BucketCollection.py +104 -63
  104. edsl/jobs/buckets/ModelBuckets.py +65 -65
  105. edsl/jobs/buckets/TokenBucket.py +283 -251
  106. edsl/jobs/buckets/TokenBucketAPI.py +211 -0
  107. edsl/jobs/buckets/TokenBucketClient.py +191 -0
  108. edsl/jobs/check_survey_scenario_compatibility.py +85 -0
  109. edsl/jobs/data_structures.py +120 -0
  110. edsl/jobs/decorators.py +35 -0
  111. edsl/jobs/interviews/Interview.py +396 -661
  112. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  113. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  114. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  115. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  116. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  117. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  118. edsl/jobs/interviews/ReportErrors.py +66 -66
  119. edsl/jobs/interviews/interview_status_enum.py +9 -9
  120. edsl/jobs/jobs_status_enums.py +9 -0
  121. edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
  122. edsl/jobs/results_exceptions_handler.py +98 -0
  123. edsl/jobs/runners/JobsRunnerAsyncio.py +151 -466
  124. edsl/jobs/runners/JobsRunnerStatus.py +297 -330
  125. edsl/jobs/tasks/QuestionTaskCreator.py +244 -242
  126. edsl/jobs/tasks/TaskCreators.py +64 -64
  127. edsl/jobs/tasks/TaskHistory.py +470 -450
  128. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  129. edsl/jobs/tasks/task_status_enum.py +161 -163
  130. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  131. edsl/jobs/tokens/TokenUsage.py +34 -34
  132. edsl/language_models/ComputeCost.py +63 -0
  133. edsl/language_models/LanguageModel.py +626 -668
  134. edsl/language_models/ModelList.py +164 -155
  135. edsl/language_models/PriceManager.py +127 -0
  136. edsl/language_models/RawResponseHandler.py +106 -0
  137. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  138. edsl/language_models/ServiceDataSources.py +0 -0
  139. edsl/language_models/__init__.py +2 -3
  140. edsl/language_models/fake_openai_call.py +15 -15
  141. edsl/language_models/fake_openai_service.py +61 -61
  142. edsl/language_models/key_management/KeyLookup.py +63 -0
  143. edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
  144. edsl/language_models/key_management/KeyLookupCollection.py +38 -0
  145. edsl/language_models/key_management/__init__.py +0 -0
  146. edsl/language_models/key_management/models.py +131 -0
  147. edsl/language_models/model.py +256 -0
  148. edsl/language_models/repair.py +156 -156
  149. edsl/language_models/utilities.py +65 -64
  150. edsl/notebooks/Notebook.py +263 -258
  151. edsl/notebooks/NotebookToLaTeX.py +142 -0
  152. edsl/notebooks/__init__.py +1 -1
  153. edsl/prompts/Prompt.py +352 -362
  154. edsl/prompts/__init__.py +2 -2
  155. edsl/questions/ExceptionExplainer.py +77 -0
  156. edsl/questions/HTMLQuestion.py +103 -0
  157. edsl/questions/QuestionBase.py +518 -664
  158. edsl/questions/QuestionBasePromptsMixin.py +221 -217
  159. edsl/questions/QuestionBudget.py +227 -227
  160. edsl/questions/QuestionCheckBox.py +359 -359
  161. edsl/questions/QuestionExtract.py +180 -182
  162. edsl/questions/QuestionFreeText.py +113 -114
  163. edsl/questions/QuestionFunctional.py +166 -166
  164. edsl/questions/QuestionList.py +223 -231
  165. edsl/questions/QuestionMatrix.py +265 -0
  166. edsl/questions/QuestionMultipleChoice.py +330 -286
  167. edsl/questions/QuestionNumerical.py +151 -153
  168. edsl/questions/QuestionRank.py +314 -324
  169. edsl/questions/Quick.py +41 -41
  170. edsl/questions/SimpleAskMixin.py +74 -73
  171. edsl/questions/__init__.py +27 -26
  172. edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +334 -289
  173. edsl/questions/compose_questions.py +98 -98
  174. edsl/questions/data_structures.py +20 -0
  175. edsl/questions/decorators.py +21 -21
  176. edsl/questions/derived/QuestionLikertFive.py +76 -76
  177. edsl/questions/derived/QuestionLinearScale.py +90 -87
  178. edsl/questions/derived/QuestionTopK.py +93 -93
  179. edsl/questions/derived/QuestionYesNo.py +82 -82
  180. edsl/questions/descriptors.py +427 -413
  181. edsl/questions/loop_processor.py +149 -0
  182. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  183. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  184. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  185. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  186. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  187. edsl/questions/prompt_templates/question_list.jinja +17 -17
  188. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  189. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  190. edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +168 -161
  191. edsl/questions/question_registry.py +177 -177
  192. edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +71 -71
  193. edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +188 -174
  194. edsl/questions/response_validator_factory.py +34 -0
  195. edsl/questions/settings.py +12 -12
  196. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  197. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  198. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  199. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  200. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  201. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  202. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  203. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  204. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  205. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  206. edsl/questions/templates/list/question_presentation.jinja +5 -5
  207. edsl/questions/templates/matrix/__init__.py +1 -0
  208. edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
  209. edsl/questions/templates/matrix/question_presentation.jinja +20 -0
  210. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  211. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  212. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  213. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  214. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  215. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  216. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  217. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  218. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  219. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  220. edsl/results/CSSParameterizer.py +108 -108
  221. edsl/results/Dataset.py +587 -424
  222. edsl/results/DatasetExportMixin.py +594 -731
  223. edsl/results/DatasetTree.py +295 -275
  224. edsl/results/MarkdownToDocx.py +122 -0
  225. edsl/results/MarkdownToPDF.py +111 -0
  226. edsl/results/Result.py +557 -465
  227. edsl/results/Results.py +1183 -1165
  228. edsl/results/ResultsExportMixin.py +45 -43
  229. edsl/results/ResultsGGMixin.py +121 -121
  230. edsl/results/TableDisplay.py +125 -198
  231. edsl/results/TextEditor.py +50 -0
  232. edsl/results/__init__.py +2 -2
  233. edsl/results/file_exports.py +252 -0
  234. edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +33 -33
  235. edsl/results/{Selector.py → results_selector.py} +145 -135
  236. edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +98 -98
  237. edsl/results/smart_objects.py +96 -0
  238. edsl/results/table_data_class.py +12 -0
  239. edsl/results/table_display.css +77 -77
  240. edsl/results/table_renderers.py +118 -0
  241. edsl/results/tree_explore.py +115 -115
  242. edsl/scenarios/ConstructDownloadLink.py +109 -0
  243. edsl/scenarios/DocumentChunker.py +102 -0
  244. edsl/scenarios/DocxScenario.py +16 -0
  245. edsl/scenarios/FileStore.py +511 -632
  246. edsl/scenarios/PdfExtractor.py +40 -0
  247. edsl/scenarios/Scenario.py +498 -601
  248. edsl/scenarios/ScenarioHtmlMixin.py +65 -64
  249. edsl/scenarios/ScenarioList.py +1458 -1287
  250. edsl/scenarios/ScenarioListExportMixin.py +45 -52
  251. edsl/scenarios/ScenarioListPdfMixin.py +239 -261
  252. edsl/scenarios/__init__.py +3 -4
  253. edsl/scenarios/directory_scanner.py +96 -0
  254. edsl/scenarios/file_methods.py +85 -0
  255. edsl/scenarios/handlers/__init__.py +13 -0
  256. edsl/scenarios/handlers/csv.py +38 -0
  257. edsl/scenarios/handlers/docx.py +76 -0
  258. edsl/scenarios/handlers/html.py +37 -0
  259. edsl/scenarios/handlers/json.py +111 -0
  260. edsl/scenarios/handlers/latex.py +5 -0
  261. edsl/scenarios/handlers/md.py +51 -0
  262. edsl/scenarios/handlers/pdf.py +68 -0
  263. edsl/scenarios/handlers/png.py +39 -0
  264. edsl/scenarios/handlers/pptx.py +105 -0
  265. edsl/scenarios/handlers/py.py +294 -0
  266. edsl/scenarios/handlers/sql.py +313 -0
  267. edsl/scenarios/handlers/sqlite.py +149 -0
  268. edsl/scenarios/handlers/txt.py +33 -0
  269. edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +131 -127
  270. edsl/scenarios/scenario_selector.py +156 -0
  271. edsl/shared.py +1 -1
  272. edsl/study/ObjectEntry.py +173 -173
  273. edsl/study/ProofOfWork.py +113 -113
  274. edsl/study/SnapShot.py +80 -80
  275. edsl/study/Study.py +521 -528
  276. edsl/study/__init__.py +4 -4
  277. edsl/surveys/ConstructDAG.py +92 -0
  278. edsl/surveys/DAG.py +148 -148
  279. edsl/surveys/EditSurvey.py +221 -0
  280. edsl/surveys/InstructionHandler.py +100 -0
  281. edsl/surveys/Memory.py +31 -31
  282. edsl/surveys/MemoryManagement.py +72 -0
  283. edsl/surveys/MemoryPlan.py +244 -244
  284. edsl/surveys/Rule.py +327 -326
  285. edsl/surveys/RuleCollection.py +385 -387
  286. edsl/surveys/RuleManager.py +172 -0
  287. edsl/surveys/Simulator.py +75 -0
  288. edsl/surveys/Survey.py +1280 -1801
  289. edsl/surveys/SurveyCSS.py +273 -261
  290. edsl/surveys/SurveyExportMixin.py +259 -259
  291. edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +181 -179
  292. edsl/surveys/SurveyQualtricsImport.py +284 -284
  293. edsl/surveys/SurveyToApp.py +141 -0
  294. edsl/surveys/__init__.py +5 -3
  295. edsl/surveys/base.py +53 -53
  296. edsl/surveys/descriptors.py +60 -56
  297. edsl/surveys/instructions/ChangeInstruction.py +48 -49
  298. edsl/surveys/instructions/Instruction.py +56 -65
  299. edsl/surveys/instructions/InstructionCollection.py +82 -77
  300. edsl/templates/error_reporting/base.html +23 -23
  301. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  302. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  303. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  304. edsl/templates/error_reporting/interview_details.html +115 -115
  305. edsl/templates/error_reporting/interviews.html +19 -19
  306. edsl/templates/error_reporting/overview.html +4 -4
  307. edsl/templates/error_reporting/performance_plot.html +1 -1
  308. edsl/templates/error_reporting/report.css +73 -73
  309. edsl/templates/error_reporting/report.html +117 -117
  310. edsl/templates/error_reporting/report.js +25 -25
  311. edsl/test_h +1 -0
  312. edsl/tools/__init__.py +1 -1
  313. edsl/tools/clusters.py +192 -192
  314. edsl/tools/embeddings.py +27 -27
  315. edsl/tools/embeddings_plotting.py +118 -118
  316. edsl/tools/plotting.py +112 -112
  317. edsl/tools/summarize.py +18 -18
  318. edsl/utilities/PrettyList.py +56 -0
  319. edsl/utilities/SystemInfo.py +28 -28
  320. edsl/utilities/__init__.py +22 -22
  321. edsl/utilities/ast_utilities.py +25 -25
  322. edsl/utilities/data/Registry.py +6 -6
  323. edsl/utilities/data/__init__.py +1 -1
  324. edsl/utilities/data/scooter_results.json +1 -1
  325. edsl/utilities/decorators.py +77 -77
  326. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  327. edsl/utilities/gcp_bucket/example.py +50 -0
  328. edsl/utilities/interface.py +627 -627
  329. edsl/utilities/is_notebook.py +18 -0
  330. edsl/utilities/is_valid_variable_name.py +11 -0
  331. edsl/utilities/naming_utilities.py +263 -263
  332. edsl/utilities/remove_edsl_version.py +24 -0
  333. edsl/utilities/repair_functions.py +28 -28
  334. edsl/utilities/restricted_python.py +70 -70
  335. edsl/utilities/utilities.py +436 -424
  336. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/LICENSE +21 -21
  337. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/METADATA +13 -11
  338. edsl-0.1.39.dev4.dist-info/RECORD +361 -0
  339. edsl/language_models/KeyLookup.py +0 -30
  340. edsl/language_models/registry.py +0 -190
  341. edsl/language_models/unused/ReplicateBase.py +0 -83
  342. edsl/results/ResultsDBMixin.py +0 -238
  343. edsl-0.1.39.dev3.dist-info/RECORD +0 -277
  344. {edsl-0.1.39.dev3.dist-info → edsl-0.1.39.dev4.dist-info}/WHEEL +0 -0
@@ -1,163 +1,160 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List, Optional
5
- from edsl.inference_services.rate_limits_cache import rate_limits
6
-
7
- # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
8
- from edsl.language_models import LanguageModel
9
-
10
- from edsl.inference_services.OpenAIService import OpenAIService
11
-
12
-
13
- class PerplexityService(OpenAIService):
14
- """Perplexity service class."""
15
-
16
- _inference_service_ = "perplexity"
17
- _env_key_name_ = "PERPLEXITY_API_KEY"
18
- _base_url_ = "https://api.perplexity.ai"
19
- _models_list_cache: List[str] = []
20
- # default perplexity parameters
21
- _parameters_ = {
22
- "temperature": 0.5,
23
- "max_tokens": 1000,
24
- "top_p": 1,
25
- "logprobs": False,
26
- "top_logprobs": 3,
27
- }
28
-
29
- @classmethod
30
- def available(cls) -> List[str]:
31
- return [
32
- "llama-3.1-sonar-huge-128k-online",
33
- "llama-3.1-sonar-large-128k-online",
34
- "llama-3.1-sonar-small-128k-online",
35
- ]
36
-
37
- @classmethod
38
- def create_model(
39
- cls, model_name="llama-3.1-sonar-large-128k-online", model_class_name=None
40
- ) -> LanguageModel:
41
- if model_class_name is None:
42
- model_class_name = cls.to_class_name(model_name)
43
-
44
- class LLM(LanguageModel):
45
- """
46
- Child class of LanguageModel for interacting with Perplexity models
47
- """
48
-
49
- key_sequence = cls.key_sequence
50
- usage_sequence = cls.usage_sequence
51
- input_token_name = cls.input_token_name
52
- output_token_name = cls.output_token_name
53
-
54
- _rpm = cls.get_rpm(cls)
55
- _tpm = cls.get_tpm(cls)
56
-
57
- _inference_service_ = cls._inference_service_
58
- _model_ = model_name
59
-
60
- _parameters_ = {
61
- "temperature": 0.5,
62
- "max_tokens": 1000,
63
- "top_p": 1,
64
- "frequency_penalty": 1,
65
- "presence_penalty": 0,
66
- # "logprobs": False, # Enable this returns 'Neither or both of logprobs and top_logprobs must be set.
67
- # "top_logprobs": 3,
68
- }
69
-
70
- def sync_client(self):
71
- return cls.sync_client()
72
-
73
- def async_client(self):
74
- return cls.async_client()
75
-
76
- @classmethod
77
- def available(cls) -> list[str]:
78
- return cls.sync_client().models.list()
79
-
80
- def get_headers(self) -> dict[str, Any]:
81
- client = self.sync_client()
82
- response = client.chat.completions.with_raw_response.create(
83
- messages=[
84
- {
85
- "role": "user",
86
- "content": "Say this is a test",
87
- }
88
- ],
89
- model=self.model,
90
- )
91
- return dict(response.headers)
92
-
93
- def get_rate_limits(self) -> dict[str, Any]:
94
- try:
95
- if "openai" in rate_limits:
96
- headers = rate_limits["openai"]
97
-
98
- else:
99
- headers = self.get_headers()
100
-
101
- except Exception as e:
102
- return {
103
- "rpm": 10_000,
104
- "tpm": 2_000_000,
105
- }
106
- else:
107
- return {
108
- "rpm": int(headers["x-ratelimit-limit-requests"]),
109
- "tpm": int(headers["x-ratelimit-limit-tokens"]),
110
- }
111
-
112
- async def async_execute_model_call(
113
- self,
114
- user_prompt: str,
115
- system_prompt: str = "",
116
- files_list: Optional[List["Files"]] = None,
117
- invigilator: Optional[
118
- "InvigilatorAI"
119
- ] = None, # TBD - can eventually be used for function-calling
120
- ) -> dict[str, Any]:
121
- """Calls the OpenAI API and returns the API response."""
122
- if files_list:
123
- encoded_image = files_list[0].base64_string
124
- content = [{"type": "text", "text": user_prompt}]
125
- content.append(
126
- {
127
- "type": "image_url",
128
- "image_url": {
129
- "url": f"data:image/jpeg;base64,{encoded_image}"
130
- },
131
- }
132
- )
133
- else:
134
- content = user_prompt
135
- client = self.async_client()
136
-
137
- messages = [
138
- {"role": "system", "content": system_prompt},
139
- {"role": "user", "content": content},
140
- ]
141
- if system_prompt == "" and self.omit_system_prompt_if_empty:
142
- messages = messages[1:]
143
-
144
- params = {
145
- "model": self.model,
146
- "messages": messages,
147
- "temperature": self.temperature,
148
- "max_tokens": self.max_tokens,
149
- "top_p": self.top_p,
150
- "frequency_penalty": self.frequency_penalty,
151
- "presence_penalty": self.presence_penalty,
152
- # "logprobs": self.logprobs,
153
- # "top_logprobs": self.top_logprobs if self.logprobs else None,
154
- }
155
- try:
156
- response = await client.chat.completions.create(**params)
157
- except Exception as e:
158
- print(e, flush=True)
159
- return response.model_dump()
160
-
161
- LLM.__name__ = "LanguageModel"
162
-
163
- return LLM
1
+ import aiohttp
2
+ import json
3
+ import requests
4
+ from typing import Any, List, Optional
5
+ from edsl.inference_services.rate_limits_cache import rate_limits
6
+
7
+ # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
8
+ from edsl.language_models import LanguageModel
9
+
10
+ from edsl.inference_services.OpenAIService import OpenAIService
11
+
12
+
13
+ class PerplexityService(OpenAIService):
14
+ """Perplexity service class."""
15
+
16
+ _inference_service_ = "perplexity"
17
+ _env_key_name_ = "PERPLEXITY_API_KEY"
18
+ _base_url_ = "https://api.perplexity.ai"
19
+ _models_list_cache: List[str] = []
20
+ # default perplexity parameters
21
+ _parameters_ = {
22
+ "temperature": 0.5,
23
+ "max_tokens": 1000,
24
+ "top_p": 1,
25
+ "logprobs": False,
26
+ "top_logprobs": 3,
27
+ }
28
+
29
+ @classmethod
30
+ def available(cls) -> List[str]:
31
+ return [
32
+ "llama-3.1-sonar-huge-128k-online",
33
+ "llama-3.1-sonar-large-128k-online",
34
+ "llama-3.1-sonar-small-128k-online",
35
+ ]
36
+
37
+ @classmethod
38
+ def create_model(
39
+ cls, model_name="llama-3.1-sonar-large-128k-online", model_class_name=None
40
+ ) -> LanguageModel:
41
+ if model_class_name is None:
42
+ model_class_name = cls.to_class_name(model_name)
43
+
44
+ class LLM(LanguageModel):
45
+ """
46
+ Child class of LanguageModel for interacting with Perplexity models
47
+ """
48
+
49
+ key_sequence = cls.key_sequence
50
+ usage_sequence = cls.usage_sequence
51
+ input_token_name = cls.input_token_name
52
+ output_token_name = cls.output_token_name
53
+
54
+ _inference_service_ = cls._inference_service_
55
+ _model_ = model_name
56
+
57
+ _parameters_ = {
58
+ "temperature": 0.5,
59
+ "max_tokens": 1000,
60
+ "top_p": 1,
61
+ "frequency_penalty": 1,
62
+ "presence_penalty": 0,
63
+ # "logprobs": False, # Enable this returns 'Neither or both of logprobs and top_logprobs must be set.
64
+ # "top_logprobs": 3,
65
+ }
66
+
67
+ def sync_client(self):
68
+ return cls.sync_client()
69
+
70
+ def async_client(self):
71
+ return cls.async_client()
72
+
73
+ @classmethod
74
+ def available(cls) -> list[str]:
75
+ return cls.sync_client().models.list()
76
+
77
+ def get_headers(self) -> dict[str, Any]:
78
+ client = self.sync_client()
79
+ response = client.chat.completions.with_raw_response.create(
80
+ messages=[
81
+ {
82
+ "role": "user",
83
+ "content": "Say this is a test",
84
+ }
85
+ ],
86
+ model=self.model,
87
+ )
88
+ return dict(response.headers)
89
+
90
+ def get_rate_limits(self) -> dict[str, Any]:
91
+ try:
92
+ if "openai" in rate_limits:
93
+ headers = rate_limits["openai"]
94
+
95
+ else:
96
+ headers = self.get_headers()
97
+
98
+ except Exception as e:
99
+ return {
100
+ "rpm": 10_000,
101
+ "tpm": 2_000_000,
102
+ }
103
+ else:
104
+ return {
105
+ "rpm": int(headers["x-ratelimit-limit-requests"]),
106
+ "tpm": int(headers["x-ratelimit-limit-tokens"]),
107
+ }
108
+
109
+ async def async_execute_model_call(
110
+ self,
111
+ user_prompt: str,
112
+ system_prompt: str = "",
113
+ files_list: Optional[List["Files"]] = None,
114
+ invigilator: Optional[
115
+ "InvigilatorAI"
116
+ ] = None, # TBD - can eventually be used for function-calling
117
+ ) -> dict[str, Any]:
118
+ """Calls the OpenAI API and returns the API response."""
119
+ if files_list:
120
+ encoded_image = files_list[0].base64_string
121
+ content = [{"type": "text", "text": user_prompt}]
122
+ content.append(
123
+ {
124
+ "type": "image_url",
125
+ "image_url": {
126
+ "url": f"data:image/jpeg;base64,{encoded_image}"
127
+ },
128
+ }
129
+ )
130
+ else:
131
+ content = user_prompt
132
+ client = self.async_client()
133
+
134
+ messages = [
135
+ {"role": "system", "content": system_prompt},
136
+ {"role": "user", "content": content},
137
+ ]
138
+ if system_prompt == "" and self.omit_system_prompt_if_empty:
139
+ messages = messages[1:]
140
+
141
+ params = {
142
+ "model": self.model,
143
+ "messages": messages,
144
+ "temperature": self.temperature,
145
+ "max_tokens": self.max_tokens,
146
+ "top_p": self.top_p,
147
+ "frequency_penalty": self.frequency_penalty,
148
+ "presence_penalty": self.presence_penalty,
149
+ # "logprobs": self.logprobs,
150
+ # "top_logprobs": self.top_logprobs if self.logprobs else None,
151
+ }
152
+ try:
153
+ response = await client.chat.completions.create(**params)
154
+ except Exception as e:
155
+ print(e, flush=True)
156
+ return response.model_dump()
157
+
158
+ LLM.__name__ = "LanguageModel"
159
+
160
+ return LLM
@@ -0,0 +1,135 @@
1
+ from enum import Enum
2
+ from typing import List, Optional, TYPE_CHECKING
3
+ from functools import partial
4
+ import warnings
5
+
6
+ from edsl.inference_services.data_structures import AvailableModels, ModelNamesList
7
+
8
+ if TYPE_CHECKING:
9
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
10
+
11
+
12
+ class ModelSource(Enum):
13
+ LOCAL = "local"
14
+ COOP = "coop"
15
+ CACHE = "cache"
16
+
17
+
18
+ class ServiceAvailability:
19
+ """This class is responsible for fetching the available models from different sources."""
20
+
21
+ _coop_model_list = None
22
+
23
+ def __init__(self, source_order: Optional[List[ModelSource]] = None):
24
+ """
25
+ Initialize with custom source order.
26
+ Default order is LOCAL -> COOP -> CACHE
27
+ """
28
+ self.source_order = source_order or [
29
+ ModelSource.LOCAL,
30
+ ModelSource.COOP,
31
+ ModelSource.CACHE,
32
+ ]
33
+
34
+ # Map sources to their fetch functions
35
+ self._source_fetchers = {
36
+ ModelSource.LOCAL: self._fetch_from_local_service,
37
+ ModelSource.COOP: self._fetch_from_coop,
38
+ ModelSource.CACHE: self._fetch_from_cache,
39
+ }
40
+
41
+ @classmethod
42
+ def models_from_coop(cls) -> AvailableModels:
43
+ if not cls._coop_model_list:
44
+ from edsl.coop.coop import Coop
45
+
46
+ c = Coop()
47
+ coop_model_list = c.fetch_models()
48
+ cls._coop_model_list = coop_model_list
49
+ return cls._coop_model_list
50
+
51
+ def get_service_available(
52
+ self, service: "InferenceServiceABC", warn: bool = False
53
+ ) -> ModelNamesList:
54
+ """
55
+ Try to fetch available models from sources in specified order.
56
+ Returns first successful result.
57
+ """
58
+ last_error = None
59
+
60
+ for source in self.source_order:
61
+ try:
62
+ fetch_func = partial(self._source_fetchers[source], service)
63
+ result = fetch_func()
64
+
65
+ # Cache successful result
66
+ service._models_list_cache = result
67
+ return result
68
+
69
+ except Exception as e:
70
+ last_error = e
71
+ if warn:
72
+ self._warn_source_failed(service, source)
73
+ continue
74
+
75
+ # If we get here, all sources failed
76
+ raise RuntimeError(
77
+ f"All sources failed to fetch models. Last error: {last_error}"
78
+ )
79
+
80
+ @staticmethod
81
+ def _fetch_from_local_service(service: "InferenceServiceABC") -> ModelNamesList:
82
+ """Attempt to fetch models directly from the service."""
83
+ return service.available()
84
+
85
+ @classmethod
86
+ def _fetch_from_coop(cls, service: "InferenceServiceABC") -> ModelNamesList:
87
+ """Fetch models from Coop."""
88
+ models_from_coop = cls.models_from_coop()
89
+ return models_from_coop.get(service._inference_service_, [])
90
+
91
+ @staticmethod
92
+ def _fetch_from_cache(service: "InferenceServiceABC") -> ModelNamesList:
93
+ """Fetch models from local cache."""
94
+ from edsl.inference_services.models_available_cache import models_available
95
+
96
+ return models_available.get(service._inference_service_, [])
97
+
98
+ def _warn_source_failed(self, service: "InferenceServiceABC", source: ModelSource):
99
+ """Display appropriate warning message based on failed source."""
100
+ messages = {
101
+ ModelSource.LOCAL: f"""Error getting models for {service._inference_service_}.
102
+ Check that you have properly stored your Expected Parrot API key and activated remote inference,
103
+ or stored your own API keys for the language models that you want to use.
104
+ See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
105
+ Trying next source.""",
106
+ ModelSource.COOP: f"Error getting models from Coop for {service._inference_service_}. Trying next source.",
107
+ ModelSource.CACHE: f"Error getting models from cache for {service._inference_service_}.",
108
+ }
109
+ warnings.warn(messages[source], UserWarning)
110
+
111
+
112
+ if __name__ == "__main__":
113
+ # sa = ServiceAvailability()
114
+ # models_from_coop = sa.models_from_coop()
115
+ # print(models_from_coop)
116
+ from edsl.inference_services.OpenAIService import OpenAIService
117
+
118
+ openai_models = ServiceAvailability._fetch_from_local_service(OpenAIService())
119
+ print(openai_models)
120
+
121
+ # Example usage:
122
+ """
123
+ # Default order (LOCAL -> COOP -> CACHE)
124
+ availability = ServiceAvailability()
125
+
126
+ # Custom order (COOP -> LOCAL -> CACHE)
127
+ availability_coop_first = ServiceAvailability([
128
+ ModelSource.COOP,
129
+ ModelSource.LOCAL,
130
+ ModelSource.CACHE
131
+ ])
132
+
133
+ # Get available models using custom order
134
+ models = availability_coop_first.get_service_available(service, warn=True)
135
+ """
@@ -1,89 +1,90 @@
1
- from typing import Any, List, Optional
2
- import os
3
- import asyncio
4
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
- from edsl.language_models import LanguageModel
6
- from edsl.inference_services.rate_limits_cache import rate_limits
7
- from edsl.utilities.utilities import fix_partial_correct_response
8
-
9
- from edsl.enums import InferenceServiceType
10
- import random
11
-
12
-
13
- class TestService(InferenceServiceABC):
14
- """OpenAI service class."""
15
-
16
- _inference_service_ = "test"
17
- _env_key_name_ = None
18
- _base_url_ = None
19
-
20
- _sync_client_ = None
21
- _async_client_ = None
22
-
23
- _sync_client_instance = None
24
- _async_client_instance = None
25
-
26
- key_sequence = None
27
- usage_sequence = None
28
- model_exclude_list = []
29
- input_token_name = "prompt_tokens"
30
- output_token_name = "completion_tokens"
31
-
32
- @classmethod
33
- def available(cls) -> list[str]:
34
- return ["test"]
35
-
36
- @classmethod
37
- def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
- throw_exception = False
39
-
40
- class TestServiceLanguageModel(LanguageModel):
41
- _model_ = "test"
42
- _parameters_ = {"temperature": 0.5}
43
- _inference_service_ = InferenceServiceType.TEST.value
44
- usage_sequence = ["usage"]
45
- key_sequence = ["message", 0, "text"]
46
- input_token_name = cls.input_token_name
47
- output_token_name = cls.output_token_name
48
- _rpm = 1000
49
- _tpm = 100000
50
-
51
- @property
52
- def _canned_response(self):
53
- if hasattr(self, "canned_response"):
54
- return self.canned_response
55
- else:
56
- return "Hello, world"
57
-
58
- async def async_execute_model_call(
59
- self,
60
- user_prompt: str,
61
- system_prompt: str,
62
- # func: Optional[callable] = None,
63
- files_list: Optional[List["File"]] = None,
64
- ) -> dict[str, Any]:
65
- await asyncio.sleep(0.1)
66
- # return {"message": """{"answer": "Hello, world"}"""}
67
-
68
- if hasattr(self, "func"):
69
- return {
70
- "message": [
71
- {"text": self.func(user_prompt, system_prompt, files_list)}
72
- ],
73
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
74
- }
75
-
76
- if hasattr(self, "throw_exception") and self.throw_exception:
77
- if hasattr(self, "exception_probability"):
78
- p = self.exception_probability
79
- else:
80
- p = 1
81
-
82
- if random.random() < p:
83
- raise Exception("This is a test error")
84
- return {
85
- "message": [{"text": f"{self._canned_response}"}],
86
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
87
- }
88
-
89
- return TestServiceLanguageModel
1
+ from typing import Any, List, Optional
2
+ import os
3
+ import asyncio
4
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
+ from edsl.language_models.LanguageModel import LanguageModel
6
+ from edsl.inference_services.rate_limits_cache import rate_limits
7
+ from edsl.utilities.utilities import fix_partial_correct_response
8
+
9
+ from edsl.enums import InferenceServiceType
10
+ import random
11
+
12
+
13
+ class TestService(InferenceServiceABC):
14
+ """OpenAI service class."""
15
+
16
+ _inference_service_ = "test"
17
+ _env_key_name_ = None
18
+ _base_url_ = None
19
+
20
+ _sync_client_ = None
21
+ _async_client_ = None
22
+
23
+ _sync_client_instance = None
24
+ _async_client_instance = None
25
+
26
+ key_sequence = None
27
+ usage_sequence = None
28
+ model_exclude_list = []
29
+ input_token_name = "prompt_tokens"
30
+ output_token_name = "completion_tokens"
31
+
32
+ @classmethod
33
+ def available(cls) -> list[str]:
34
+ return ["test"]
35
+
36
+ @classmethod
37
+ def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
+ throw_exception = False
39
+
40
+ class TestServiceLanguageModel(LanguageModel):
41
+ _model_ = "test"
42
+ _parameters_ = {"temperature": 0.5}
43
+ _inference_service_ = InferenceServiceType.TEST.value
44
+ usage_sequence = ["usage"]
45
+ key_sequence = ["message", 0, "text"]
46
+ input_token_name = cls.input_token_name
47
+ output_token_name = cls.output_token_name
48
+ _rpm = 1000
49
+ _tpm = 100000
50
+
51
+ @property
52
+ def _canned_response(self):
53
+ if hasattr(self, "canned_response"):
54
+
55
+ return self.canned_response
56
+ else:
57
+ return "Hello, world"
58
+
59
+ async def async_execute_model_call(
60
+ self,
61
+ user_prompt: str,
62
+ system_prompt: str,
63
+ # func: Optional[callable] = None,
64
+ files_list: Optional[List["File"]] = None,
65
+ ) -> dict[str, Any]:
66
+ await asyncio.sleep(0.1)
67
+
68
+ if hasattr(self, "throw_exception") and self.throw_exception:
69
+ if hasattr(self, "exception_probability"):
70
+ p = self.exception_probability
71
+ else:
72
+ p = 1
73
+
74
+ if random.random() < p:
75
+ raise Exception("This is a test error")
76
+
77
+ if hasattr(self, "func"):
78
+ return {
79
+ "message": [
80
+ {"text": self.func(user_prompt, system_prompt, files_list)}
81
+ ],
82
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
83
+ }
84
+
85
+ return {
86
+ "message": [{"text": f"{self._canned_response}"}],
87
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
88
+ }
89
+
90
+ return TestServiceLanguageModel