edsl 0.1.37.dev6__py3-none-any.whl → 0.1.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (271) hide show
  1. edsl/Base.py +332 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +867 -855
  7. edsl/agents/AgentList.py +413 -350
  8. edsl/agents/Invigilator.py +233 -222
  9. edsl/agents/InvigilatorBase.py +265 -284
  10. edsl/agents/PromptConstructor.py +354 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -289
  26. edsl/config.py +157 -149
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +1028 -958
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +555 -527
  37. edsl/data/CacheEntry.py +233 -228
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +78 -97
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +175 -173
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -38
  48. edsl/exceptions/cache.py +5 -0
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +148 -156
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/PerplexityService.py +163 -0
  72. edsl/inference_services/TestService.py +89 -89
  73. edsl/inference_services/TogetherAIService.py +170 -170
  74. edsl/inference_services/models_available_cache.py +118 -118
  75. edsl/inference_services/rate_limits_cache.py +25 -25
  76. edsl/inference_services/registry.py +41 -39
  77. edsl/inference_services/write_available.py +10 -10
  78. edsl/jobs/Answers.py +56 -56
  79. edsl/jobs/Jobs.py +898 -1347
  80. edsl/jobs/JobsChecks.py +147 -0
  81. edsl/jobs/JobsPrompts.py +268 -0
  82. edsl/jobs/JobsRemoteInferenceHandler.py +239 -0
  83. edsl/jobs/__init__.py +1 -1
  84. edsl/jobs/buckets/BucketCollection.py +63 -63
  85. edsl/jobs/buckets/ModelBuckets.py +65 -65
  86. edsl/jobs/buckets/TokenBucket.py +251 -248
  87. edsl/jobs/interviews/Interview.py +661 -661
  88. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  89. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  90. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  91. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  92. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  93. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  94. edsl/jobs/interviews/ReportErrors.py +66 -66
  95. edsl/jobs/interviews/interview_status_enum.py +9 -9
  96. edsl/jobs/runners/JobsRunnerAsyncio.py +466 -338
  97. edsl/jobs/runners/JobsRunnerStatus.py +330 -332
  98. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  99. edsl/jobs/tasks/TaskCreators.py +64 -64
  100. edsl/jobs/tasks/TaskHistory.py +450 -442
  101. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  102. edsl/jobs/tasks/task_status_enum.py +163 -163
  103. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  104. edsl/jobs/tokens/TokenUsage.py +34 -34
  105. edsl/language_models/KeyLookup.py +30 -30
  106. edsl/language_models/LanguageModel.py +668 -706
  107. edsl/language_models/ModelList.py +155 -102
  108. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  109. edsl/language_models/__init__.py +3 -3
  110. edsl/language_models/fake_openai_call.py +15 -15
  111. edsl/language_models/fake_openai_service.py +61 -61
  112. edsl/language_models/registry.py +190 -137
  113. edsl/language_models/repair.py +156 -156
  114. edsl/language_models/unused/ReplicateBase.py +83 -83
  115. edsl/language_models/utilities.py +64 -64
  116. edsl/notebooks/Notebook.py +258 -259
  117. edsl/notebooks/__init__.py +1 -1
  118. edsl/prompts/Prompt.py +362 -357
  119. edsl/prompts/__init__.py +2 -2
  120. edsl/questions/AnswerValidatorMixin.py +289 -289
  121. edsl/questions/QuestionBase.py +664 -656
  122. edsl/questions/QuestionBaseGenMixin.py +161 -161
  123. edsl/questions/QuestionBasePromptsMixin.py +217 -234
  124. edsl/questions/QuestionBudget.py +227 -227
  125. edsl/questions/QuestionCheckBox.py +359 -359
  126. edsl/questions/QuestionExtract.py +182 -183
  127. edsl/questions/QuestionFreeText.py +114 -114
  128. edsl/questions/QuestionFunctional.py +166 -159
  129. edsl/questions/QuestionList.py +231 -231
  130. edsl/questions/QuestionMultipleChoice.py +286 -286
  131. edsl/questions/QuestionNumerical.py +153 -153
  132. edsl/questions/QuestionRank.py +324 -324
  133. edsl/questions/Quick.py +41 -41
  134. edsl/questions/RegisterQuestionsMeta.py +71 -71
  135. edsl/questions/ResponseValidatorABC.py +174 -174
  136. edsl/questions/SimpleAskMixin.py +73 -73
  137. edsl/questions/__init__.py +26 -26
  138. edsl/questions/compose_questions.py +98 -98
  139. edsl/questions/decorators.py +21 -21
  140. edsl/questions/derived/QuestionLikertFive.py +76 -76
  141. edsl/questions/derived/QuestionLinearScale.py +87 -87
  142. edsl/questions/derived/QuestionTopK.py +93 -91
  143. edsl/questions/derived/QuestionYesNo.py +82 -82
  144. edsl/questions/descriptors.py +413 -413
  145. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  146. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  147. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  148. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  149. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  150. edsl/questions/prompt_templates/question_list.jinja +17 -17
  151. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  152. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  153. edsl/questions/question_registry.py +177 -147
  154. edsl/questions/settings.py +12 -12
  155. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  157. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  158. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  159. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  160. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  161. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  162. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  163. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  164. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  165. edsl/questions/templates/list/question_presentation.jinja +5 -5
  166. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  167. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  168. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  169. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  170. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  171. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  172. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  173. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  174. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  176. edsl/results/CSSParameterizer.py +108 -0
  177. edsl/results/Dataset.py +424 -293
  178. edsl/results/DatasetExportMixin.py +731 -717
  179. edsl/results/DatasetTree.py +275 -145
  180. edsl/results/Result.py +465 -450
  181. edsl/results/Results.py +1165 -1071
  182. edsl/results/ResultsDBMixin.py +238 -238
  183. edsl/results/ResultsExportMixin.py +43 -43
  184. edsl/results/ResultsFetchMixin.py +33 -33
  185. edsl/results/ResultsGGMixin.py +121 -121
  186. edsl/results/ResultsToolsMixin.py +98 -98
  187. edsl/results/Selector.py +135 -135
  188. edsl/results/TableDisplay.py +198 -0
  189. edsl/results/__init__.py +2 -2
  190. edsl/results/table_display.css +78 -0
  191. edsl/results/tree_explore.py +115 -115
  192. edsl/scenarios/FileStore.py +632 -458
  193. edsl/scenarios/Scenario.py +601 -546
  194. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  195. edsl/scenarios/ScenarioJoin.py +127 -0
  196. edsl/scenarios/ScenarioList.py +1287 -1112
  197. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  198. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  199. edsl/scenarios/__init__.py +4 -4
  200. edsl/shared.py +1 -1
  201. edsl/study/ObjectEntry.py +173 -173
  202. edsl/study/ProofOfWork.py +113 -113
  203. edsl/study/SnapShot.py +80 -80
  204. edsl/study/Study.py +528 -528
  205. edsl/study/__init__.py +4 -4
  206. edsl/surveys/DAG.py +148 -148
  207. edsl/surveys/Memory.py +31 -31
  208. edsl/surveys/MemoryPlan.py +244 -244
  209. edsl/surveys/Rule.py +326 -330
  210. edsl/surveys/RuleCollection.py +387 -387
  211. edsl/surveys/Survey.py +1801 -1795
  212. edsl/surveys/SurveyCSS.py +261 -261
  213. edsl/surveys/SurveyExportMixin.py +259 -259
  214. edsl/surveys/SurveyFlowVisualizationMixin.py +179 -121
  215. edsl/surveys/SurveyQualtricsImport.py +284 -284
  216. edsl/surveys/__init__.py +3 -3
  217. edsl/surveys/base.py +53 -53
  218. edsl/surveys/descriptors.py +56 -56
  219. edsl/surveys/instructions/ChangeInstruction.py +49 -47
  220. edsl/surveys/instructions/Instruction.py +65 -51
  221. edsl/surveys/instructions/InstructionCollection.py +77 -77
  222. edsl/templates/error_reporting/base.html +23 -23
  223. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  224. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  225. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  226. edsl/templates/error_reporting/interview_details.html +115 -115
  227. edsl/templates/error_reporting/interviews.html +19 -10
  228. edsl/templates/error_reporting/overview.html +4 -4
  229. edsl/templates/error_reporting/performance_plot.html +1 -1
  230. edsl/templates/error_reporting/report.css +73 -73
  231. edsl/templates/error_reporting/report.html +117 -117
  232. edsl/templates/error_reporting/report.js +25 -25
  233. edsl/tools/__init__.py +1 -1
  234. edsl/tools/clusters.py +192 -192
  235. edsl/tools/embeddings.py +27 -27
  236. edsl/tools/embeddings_plotting.py +118 -118
  237. edsl/tools/plotting.py +112 -112
  238. edsl/tools/summarize.py +18 -18
  239. edsl/utilities/SystemInfo.py +28 -28
  240. edsl/utilities/__init__.py +22 -22
  241. edsl/utilities/ast_utilities.py +25 -25
  242. edsl/utilities/data/Registry.py +6 -6
  243. edsl/utilities/data/__init__.py +1 -1
  244. edsl/utilities/data/scooter_results.json +1 -1
  245. edsl/utilities/decorators.py +77 -77
  246. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  247. edsl/utilities/interface.py +627 -627
  248. edsl/{conjure → utilities}/naming_utilities.py +263 -263
  249. edsl/utilities/repair_functions.py +28 -28
  250. edsl/utilities/restricted_python.py +70 -70
  251. edsl/utilities/utilities.py +424 -409
  252. {edsl-0.1.37.dev6.dist-info → edsl-0.1.38.dist-info}/LICENSE +21 -21
  253. {edsl-0.1.37.dev6.dist-info → edsl-0.1.38.dist-info}/METADATA +2 -1
  254. edsl-0.1.38.dist-info/RECORD +277 -0
  255. edsl/conjure/AgentConstructionMixin.py +0 -160
  256. edsl/conjure/Conjure.py +0 -62
  257. edsl/conjure/InputData.py +0 -659
  258. edsl/conjure/InputDataCSV.py +0 -48
  259. edsl/conjure/InputDataMixinQuestionStats.py +0 -182
  260. edsl/conjure/InputDataPyRead.py +0 -91
  261. edsl/conjure/InputDataSPSS.py +0 -8
  262. edsl/conjure/InputDataStata.py +0 -8
  263. edsl/conjure/QuestionOptionMixin.py +0 -76
  264. edsl/conjure/QuestionTypeMixin.py +0 -23
  265. edsl/conjure/RawQuestion.py +0 -65
  266. edsl/conjure/SurveyResponses.py +0 -7
  267. edsl/conjure/__init__.py +0 -9
  268. edsl/conjure/examples/placeholder.txt +0 -0
  269. edsl/conjure/utilities.py +0 -201
  270. edsl-0.1.37.dev6.dist-info/RECORD +0 -283
  271. {edsl-0.1.37.dev6.dist-info → edsl-0.1.38.dist-info}/WHEEL +0 -0
@@ -1,89 +1,89 @@
1
- from typing import Any, List, Optional
2
- import os
3
- import asyncio
4
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
- from edsl.language_models import LanguageModel
6
- from edsl.inference_services.rate_limits_cache import rate_limits
7
- from edsl.utilities.utilities import fix_partial_correct_response
8
-
9
- from edsl.enums import InferenceServiceType
10
- import random
11
-
12
-
13
- class TestService(InferenceServiceABC):
14
- """OpenAI service class."""
15
-
16
- _inference_service_ = "test"
17
- _env_key_name_ = None
18
- _base_url_ = None
19
-
20
- _sync_client_ = None
21
- _async_client_ = None
22
-
23
- _sync_client_instance = None
24
- _async_client_instance = None
25
-
26
- key_sequence = None
27
- usage_sequence = None
28
- model_exclude_list = []
29
- input_token_name = "prompt_tokens"
30
- output_token_name = "completion_tokens"
31
-
32
- @classmethod
33
- def available(cls) -> list[str]:
34
- return ["test"]
35
-
36
- @classmethod
37
- def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
- throw_exception = False
39
-
40
- class TestServiceLanguageModel(LanguageModel):
41
- _model_ = "test"
42
- _parameters_ = {"temperature": 0.5}
43
- _inference_service_ = InferenceServiceType.TEST.value
44
- usage_sequence = ["usage"]
45
- key_sequence = ["message", 0, "text"]
46
- input_token_name = cls.input_token_name
47
- output_token_name = cls.output_token_name
48
- _rpm = 1000
49
- _tpm = 100000
50
-
51
- @property
52
- def _canned_response(self):
53
- if hasattr(self, "canned_response"):
54
- return self.canned_response
55
- else:
56
- return "Hello, world"
57
-
58
- async def async_execute_model_call(
59
- self,
60
- user_prompt: str,
61
- system_prompt: str,
62
- # func: Optional[callable] = None,
63
- files_list: Optional[List["File"]] = None,
64
- ) -> dict[str, Any]:
65
- await asyncio.sleep(0.1)
66
- # return {"message": """{"answer": "Hello, world"}"""}
67
-
68
- if hasattr(self, "func"):
69
- return {
70
- "message": [
71
- {"text": self.func(user_prompt, system_prompt, files_list)}
72
- ],
73
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
74
- }
75
-
76
- if hasattr(self, "throw_exception") and self.throw_exception:
77
- if hasattr(self, "exception_probability"):
78
- p = self.exception_probability
79
- else:
80
- p = 1
81
-
82
- if random.random() < p:
83
- raise Exception("This is a test error")
84
- return {
85
- "message": [{"text": f"{self._canned_response}"}],
86
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
87
- }
88
-
89
- return TestServiceLanguageModel
1
+ from typing import Any, List, Optional
2
+ import os
3
+ import asyncio
4
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
+ from edsl.language_models import LanguageModel
6
+ from edsl.inference_services.rate_limits_cache import rate_limits
7
+ from edsl.utilities.utilities import fix_partial_correct_response
8
+
9
+ from edsl.enums import InferenceServiceType
10
+ import random
11
+
12
+
13
+ class TestService(InferenceServiceABC):
14
+ """OpenAI service class."""
15
+
16
+ _inference_service_ = "test"
17
+ _env_key_name_ = None
18
+ _base_url_ = None
19
+
20
+ _sync_client_ = None
21
+ _async_client_ = None
22
+
23
+ _sync_client_instance = None
24
+ _async_client_instance = None
25
+
26
+ key_sequence = None
27
+ usage_sequence = None
28
+ model_exclude_list = []
29
+ input_token_name = "prompt_tokens"
30
+ output_token_name = "completion_tokens"
31
+
32
+ @classmethod
33
+ def available(cls) -> list[str]:
34
+ return ["test"]
35
+
36
+ @classmethod
37
+ def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
+ throw_exception = False
39
+
40
+ class TestServiceLanguageModel(LanguageModel):
41
+ _model_ = "test"
42
+ _parameters_ = {"temperature": 0.5}
43
+ _inference_service_ = InferenceServiceType.TEST.value
44
+ usage_sequence = ["usage"]
45
+ key_sequence = ["message", 0, "text"]
46
+ input_token_name = cls.input_token_name
47
+ output_token_name = cls.output_token_name
48
+ _rpm = 1000
49
+ _tpm = 100000
50
+
51
+ @property
52
+ def _canned_response(self):
53
+ if hasattr(self, "canned_response"):
54
+ return self.canned_response
55
+ else:
56
+ return "Hello, world"
57
+
58
+ async def async_execute_model_call(
59
+ self,
60
+ user_prompt: str,
61
+ system_prompt: str,
62
+ # func: Optional[callable] = None,
63
+ files_list: Optional[List["File"]] = None,
64
+ ) -> dict[str, Any]:
65
+ await asyncio.sleep(0.1)
66
+ # return {"message": """{"answer": "Hello, world"}"""}
67
+
68
+ if hasattr(self, "func"):
69
+ return {
70
+ "message": [
71
+ {"text": self.func(user_prompt, system_prompt, files_list)}
72
+ ],
73
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
74
+ }
75
+
76
+ if hasattr(self, "throw_exception") and self.throw_exception:
77
+ if hasattr(self, "exception_probability"):
78
+ p = self.exception_probability
79
+ else:
80
+ p = 1
81
+
82
+ if random.random() < p:
83
+ raise Exception("This is a test error")
84
+ return {
85
+ "message": [{"text": f"{self._canned_response}"}],
86
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
87
+ }
88
+
89
+ return TestServiceLanguageModel
@@ -1,170 +1,170 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List, Optional
5
-
6
- # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
- from edsl.language_models import LanguageModel
8
-
9
- from edsl.inference_services.OpenAIService import OpenAIService
10
- import openai
11
-
12
-
13
- class TogetherAIService(OpenAIService):
14
- """DeepInfra service class."""
15
-
16
- _inference_service_ = "together"
17
- _env_key_name_ = "TOGETHER_API_KEY"
18
- _base_url_ = "https://api.together.xyz/v1"
19
- _models_list_cache: List[str] = []
20
-
21
- # These are non-serverless models. There was no api param to filter them
22
- model_exclude_list = [
23
- "EleutherAI/llemma_7b",
24
- "HuggingFaceH4/zephyr-7b-beta",
25
- "Nexusflow/NexusRaven-V2-13B",
26
- "NousResearch/Hermes-2-Theta-Llama-3-70B",
27
- "NousResearch/Nous-Capybara-7B-V1p9",
28
- "NousResearch/Nous-Hermes-13b",
29
- "NousResearch/Nous-Hermes-2-Mistral-7B-DPO",
30
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
31
- "NousResearch/Nous-Hermes-Llama2-13b",
32
- "NousResearch/Nous-Hermes-Llama2-70b",
33
- "NousResearch/Nous-Hermes-llama-2-7b",
34
- "NumbersStation/nsql-llama-2-7B",
35
- "Open-Orca/Mistral-7B-OpenOrca",
36
- "Phind/Phind-CodeLlama-34B-Python-v1",
37
- "Phind/Phind-CodeLlama-34B-v2",
38
- "Qwen/Qwen1.5-0.5B",
39
- "Qwen/Qwen1.5-0.5B-Chat",
40
- "Qwen/Qwen1.5-1.8B",
41
- "Qwen/Qwen1.5-1.8B-Chat",
42
- "Qwen/Qwen1.5-14B",
43
- "Qwen/Qwen1.5-14B-Chat",
44
- "Qwen/Qwen1.5-32B",
45
- "Qwen/Qwen1.5-32B-Chat",
46
- "Qwen/Qwen1.5-4B",
47
- "Qwen/Qwen1.5-4B-Chat",
48
- "Qwen/Qwen1.5-72B",
49
- "Qwen/Qwen1.5-7B",
50
- "Qwen/Qwen1.5-7B-Chat",
51
- "Qwen/Qwen2-1.5B",
52
- "Qwen/Qwen2-1.5B-Instruct",
53
- "Qwen/Qwen2-72B",
54
- "Qwen/Qwen2-7B",
55
- "Qwen/Qwen2-7B-Instruct",
56
- "SG161222/Realistic_Vision_V3.0_VAE",
57
- "Snowflake/snowflake-arctic-instruct",
58
- "Undi95/ReMM-SLERP-L2-13B",
59
- "Undi95/Toppy-M-7B",
60
- "WizardLM/WizardCoder-Python-34B-V1.0",
61
- "WizardLM/WizardLM-13B-V1.2",
62
- "WizardLM/WizardLM-70B-V1.0",
63
- "allenai/OLMo-7B",
64
- "allenai/OLMo-7B-Instruct",
65
- "bert-base-uncased",
66
- "codellama/CodeLlama-13b-Instruct-hf",
67
- "codellama/CodeLlama-13b-Python-hf",
68
- "codellama/CodeLlama-13b-hf",
69
- "codellama/CodeLlama-34b-Python-hf",
70
- "codellama/CodeLlama-34b-hf",
71
- "codellama/CodeLlama-70b-Instruct-hf",
72
- "codellama/CodeLlama-70b-Python-hf",
73
- "codellama/CodeLlama-70b-hf",
74
- "codellama/CodeLlama-7b-Instruct-hf",
75
- "codellama/CodeLlama-7b-Python-hf",
76
- "codellama/CodeLlama-7b-hf",
77
- "cognitivecomputations/dolphin-2.5-mixtral-8x7b",
78
- "deepseek-ai/deepseek-coder-33b-instruct",
79
- "garage-bAInd/Platypus2-70B-instruct",
80
- "google/gemma-2b",
81
- "google/gemma-7b",
82
- "google/gemma-7b-it",
83
- "gradientai/Llama-3-70B-Instruct-Gradient-1048k",
84
- "hazyresearch/M2-BERT-2k-Retrieval-Encoder-V1",
85
- "huggyllama/llama-13b",
86
- "huggyllama/llama-30b",
87
- "huggyllama/llama-65b",
88
- "huggyllama/llama-7b",
89
- "lmsys/vicuna-13b-v1.3",
90
- "lmsys/vicuna-13b-v1.5",
91
- "lmsys/vicuna-13b-v1.5-16k",
92
- "lmsys/vicuna-7b-v1.3",
93
- "lmsys/vicuna-7b-v1.5",
94
- "meta-llama/Llama-2-13b-hf",
95
- "meta-llama/Llama-2-70b-chat-hf",
96
- "meta-llama/Llama-2-7b-hf",
97
- "meta-llama/Llama-3-70b-hf",
98
- "meta-llama/Llama-3-8b-hf",
99
- "meta-llama/Meta-Llama-3-70B",
100
- "meta-llama/Meta-Llama-3-70B-Instruct",
101
- "meta-llama/Meta-Llama-3-8B-Instruct",
102
- "meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
103
- "meta-llama/Meta-Llama-3.1-70B-Reference",
104
- "meta-llama/Meta-Llama-3.1-8B-Reference",
105
- "microsoft/phi-2",
106
- "mistralai/Mixtral-8x22B",
107
- "openchat/openchat-3.5-1210",
108
- "prompthero/openjourney",
109
- "runwayml/stable-diffusion-v1-5",
110
- "sentence-transformers/msmarco-bert-base-dot-v5",
111
- "snorkelai/Snorkel-Mistral-PairRM-DPO",
112
- "stabilityai/stable-diffusion-2-1",
113
- "teknium/OpenHermes-2-Mistral-7B",
114
- "teknium/OpenHermes-2p5-Mistral-7B",
115
- "togethercomputer/CodeLlama-13b-Instruct",
116
- "togethercomputer/CodeLlama-13b-Python",
117
- "togethercomputer/CodeLlama-34b",
118
- "togethercomputer/CodeLlama-34b-Python",
119
- "togethercomputer/CodeLlama-7b-Instruct",
120
- "togethercomputer/CodeLlama-7b-Python",
121
- "togethercomputer/Koala-13B",
122
- "togethercomputer/Koala-7B",
123
- "togethercomputer/LLaMA-2-7B-32K",
124
- "togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4",
125
- "togethercomputer/StripedHyena-Hessian-7B",
126
- "togethercomputer/alpaca-7b",
127
- "togethercomputer/evo-1-131k-base",
128
- "togethercomputer/evo-1-8k-base",
129
- "togethercomputer/guanaco-13b",
130
- "togethercomputer/guanaco-33b",
131
- "togethercomputer/guanaco-65b",
132
- "togethercomputer/guanaco-7b",
133
- "togethercomputer/llama-2-13b",
134
- "togethercomputer/llama-2-70b-chat",
135
- "togethercomputer/llama-2-7b",
136
- "wavymulder/Analog-Diffusion",
137
- "zero-one-ai/Yi-34B",
138
- "zero-one-ai/Yi-34B-Chat",
139
- "zero-one-ai/Yi-6B",
140
- ]
141
-
142
- _sync_client_ = openai.OpenAI
143
- _async_client_ = openai.AsyncOpenAI
144
-
145
- @classmethod
146
- def get_model_list(cls):
147
- # Togheter.ai has a different response in model list then openai
148
- # and the OpenAI class returns an error when calling .models.list()
149
- import requests
150
- import os
151
-
152
- url = "https://api.together.xyz/v1/models?filter=serverless"
153
- token = os.getenv(cls._env_key_name_)
154
- headers = {"accept": "application/json", "authorization": f"Bearer {token}"}
155
-
156
- response = requests.get(url, headers=headers)
157
- return response.json()
158
-
159
- @classmethod
160
- def available(cls) -> List[str]:
161
- if not cls._models_list_cache:
162
- try:
163
- cls._models_list_cache = [
164
- m["id"]
165
- for m in cls.get_model_list()
166
- if m["id"] not in cls.model_exclude_list
167
- ]
168
- except Exception as e:
169
- raise
170
- return cls._models_list_cache
1
+ import aiohttp
2
+ import json
3
+ import requests
4
+ from typing import Any, List, Optional
5
+
6
+ # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
+ from edsl.language_models import LanguageModel
8
+
9
+ from edsl.inference_services.OpenAIService import OpenAIService
10
+ import openai
11
+
12
+
13
+ class TogetherAIService(OpenAIService):
14
+ """DeepInfra service class."""
15
+
16
+ _inference_service_ = "together"
17
+ _env_key_name_ = "TOGETHER_API_KEY"
18
+ _base_url_ = "https://api.together.xyz/v1"
19
+ _models_list_cache: List[str] = []
20
+
21
+ # These are non-serverless models. There was no api param to filter them
22
+ model_exclude_list = [
23
+ "EleutherAI/llemma_7b",
24
+ "HuggingFaceH4/zephyr-7b-beta",
25
+ "Nexusflow/NexusRaven-V2-13B",
26
+ "NousResearch/Hermes-2-Theta-Llama-3-70B",
27
+ "NousResearch/Nous-Capybara-7B-V1p9",
28
+ "NousResearch/Nous-Hermes-13b",
29
+ "NousResearch/Nous-Hermes-2-Mistral-7B-DPO",
30
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
31
+ "NousResearch/Nous-Hermes-Llama2-13b",
32
+ "NousResearch/Nous-Hermes-Llama2-70b",
33
+ "NousResearch/Nous-Hermes-llama-2-7b",
34
+ "NumbersStation/nsql-llama-2-7B",
35
+ "Open-Orca/Mistral-7B-OpenOrca",
36
+ "Phind/Phind-CodeLlama-34B-Python-v1",
37
+ "Phind/Phind-CodeLlama-34B-v2",
38
+ "Qwen/Qwen1.5-0.5B",
39
+ "Qwen/Qwen1.5-0.5B-Chat",
40
+ "Qwen/Qwen1.5-1.8B",
41
+ "Qwen/Qwen1.5-1.8B-Chat",
42
+ "Qwen/Qwen1.5-14B",
43
+ "Qwen/Qwen1.5-14B-Chat",
44
+ "Qwen/Qwen1.5-32B",
45
+ "Qwen/Qwen1.5-32B-Chat",
46
+ "Qwen/Qwen1.5-4B",
47
+ "Qwen/Qwen1.5-4B-Chat",
48
+ "Qwen/Qwen1.5-72B",
49
+ "Qwen/Qwen1.5-7B",
50
+ "Qwen/Qwen1.5-7B-Chat",
51
+ "Qwen/Qwen2-1.5B",
52
+ "Qwen/Qwen2-1.5B-Instruct",
53
+ "Qwen/Qwen2-72B",
54
+ "Qwen/Qwen2-7B",
55
+ "Qwen/Qwen2-7B-Instruct",
56
+ "SG161222/Realistic_Vision_V3.0_VAE",
57
+ "Snowflake/snowflake-arctic-instruct",
58
+ "Undi95/ReMM-SLERP-L2-13B",
59
+ "Undi95/Toppy-M-7B",
60
+ "WizardLM/WizardCoder-Python-34B-V1.0",
61
+ "WizardLM/WizardLM-13B-V1.2",
62
+ "WizardLM/WizardLM-70B-V1.0",
63
+ "allenai/OLMo-7B",
64
+ "allenai/OLMo-7B-Instruct",
65
+ "bert-base-uncased",
66
+ "codellama/CodeLlama-13b-Instruct-hf",
67
+ "codellama/CodeLlama-13b-Python-hf",
68
+ "codellama/CodeLlama-13b-hf",
69
+ "codellama/CodeLlama-34b-Python-hf",
70
+ "codellama/CodeLlama-34b-hf",
71
+ "codellama/CodeLlama-70b-Instruct-hf",
72
+ "codellama/CodeLlama-70b-Python-hf",
73
+ "codellama/CodeLlama-70b-hf",
74
+ "codellama/CodeLlama-7b-Instruct-hf",
75
+ "codellama/CodeLlama-7b-Python-hf",
76
+ "codellama/CodeLlama-7b-hf",
77
+ "cognitivecomputations/dolphin-2.5-mixtral-8x7b",
78
+ "deepseek-ai/deepseek-coder-33b-instruct",
79
+ "garage-bAInd/Platypus2-70B-instruct",
80
+ "google/gemma-2b",
81
+ "google/gemma-7b",
82
+ "google/gemma-7b-it",
83
+ "gradientai/Llama-3-70B-Instruct-Gradient-1048k",
84
+ "hazyresearch/M2-BERT-2k-Retrieval-Encoder-V1",
85
+ "huggyllama/llama-13b",
86
+ "huggyllama/llama-30b",
87
+ "huggyllama/llama-65b",
88
+ "huggyllama/llama-7b",
89
+ "lmsys/vicuna-13b-v1.3",
90
+ "lmsys/vicuna-13b-v1.5",
91
+ "lmsys/vicuna-13b-v1.5-16k",
92
+ "lmsys/vicuna-7b-v1.3",
93
+ "lmsys/vicuna-7b-v1.5",
94
+ "meta-llama/Llama-2-13b-hf",
95
+ "meta-llama/Llama-2-70b-chat-hf",
96
+ "meta-llama/Llama-2-7b-hf",
97
+ "meta-llama/Llama-3-70b-hf",
98
+ "meta-llama/Llama-3-8b-hf",
99
+ "meta-llama/Meta-Llama-3-70B",
100
+ "meta-llama/Meta-Llama-3-70B-Instruct",
101
+ "meta-llama/Meta-Llama-3-8B-Instruct",
102
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
103
+ "meta-llama/Meta-Llama-3.1-70B-Reference",
104
+ "meta-llama/Meta-Llama-3.1-8B-Reference",
105
+ "microsoft/phi-2",
106
+ "mistralai/Mixtral-8x22B",
107
+ "openchat/openchat-3.5-1210",
108
+ "prompthero/openjourney",
109
+ "runwayml/stable-diffusion-v1-5",
110
+ "sentence-transformers/msmarco-bert-base-dot-v5",
111
+ "snorkelai/Snorkel-Mistral-PairRM-DPO",
112
+ "stabilityai/stable-diffusion-2-1",
113
+ "teknium/OpenHermes-2-Mistral-7B",
114
+ "teknium/OpenHermes-2p5-Mistral-7B",
115
+ "togethercomputer/CodeLlama-13b-Instruct",
116
+ "togethercomputer/CodeLlama-13b-Python",
117
+ "togethercomputer/CodeLlama-34b",
118
+ "togethercomputer/CodeLlama-34b-Python",
119
+ "togethercomputer/CodeLlama-7b-Instruct",
120
+ "togethercomputer/CodeLlama-7b-Python",
121
+ "togethercomputer/Koala-13B",
122
+ "togethercomputer/Koala-7B",
123
+ "togethercomputer/LLaMA-2-7B-32K",
124
+ "togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4",
125
+ "togethercomputer/StripedHyena-Hessian-7B",
126
+ "togethercomputer/alpaca-7b",
127
+ "togethercomputer/evo-1-131k-base",
128
+ "togethercomputer/evo-1-8k-base",
129
+ "togethercomputer/guanaco-13b",
130
+ "togethercomputer/guanaco-33b",
131
+ "togethercomputer/guanaco-65b",
132
+ "togethercomputer/guanaco-7b",
133
+ "togethercomputer/llama-2-13b",
134
+ "togethercomputer/llama-2-70b-chat",
135
+ "togethercomputer/llama-2-7b",
136
+ "wavymulder/Analog-Diffusion",
137
+ "zero-one-ai/Yi-34B",
138
+ "zero-one-ai/Yi-34B-Chat",
139
+ "zero-one-ai/Yi-6B",
140
+ ]
141
+
142
+ _sync_client_ = openai.OpenAI
143
+ _async_client_ = openai.AsyncOpenAI
144
+
145
+ @classmethod
146
+ def get_model_list(cls):
147
+ # Togheter.ai has a different response in model list then openai
148
+ # and the OpenAI class returns an error when calling .models.list()
149
+ import requests
150
+ import os
151
+
152
+ url = "https://api.together.xyz/v1/models?filter=serverless"
153
+ token = os.getenv(cls._env_key_name_)
154
+ headers = {"accept": "application/json", "authorization": f"Bearer {token}"}
155
+
156
+ response = requests.get(url, headers=headers)
157
+ return response.json()
158
+
159
+ @classmethod
160
+ def available(cls) -> List[str]:
161
+ if not cls._models_list_cache:
162
+ try:
163
+ cls._models_list_cache = [
164
+ m["id"]
165
+ for m in cls.get_model_list()
166
+ if m["id"] not in cls.model_exclude_list
167
+ ]
168
+ except Exception as e:
169
+ raise
170
+ return cls._models_list_cache