edsl 0.1.38.dev3__py3-none-any.whl → 0.1.38.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (256) hide show
  1. edsl/Base.py +332 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +867 -858
  7. edsl/agents/AgentList.py +413 -362
  8. edsl/agents/Invigilator.py +233 -222
  9. edsl/agents/InvigilatorBase.py +265 -284
  10. edsl/agents/PromptConstructor.py +354 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +157 -149
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +1028 -961
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +555 -530
  37. edsl/data/CacheEntry.py +233 -228
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +78 -97
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +175 -173
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -42
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +148 -156
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/PerplexityService.py +163 -0
  72. edsl/inference_services/TestService.py +89 -89
  73. edsl/inference_services/TogetherAIService.py +170 -170
  74. edsl/inference_services/models_available_cache.py +118 -118
  75. edsl/inference_services/rate_limits_cache.py +25 -25
  76. edsl/inference_services/registry.py +41 -39
  77. edsl/inference_services/write_available.py +10 -10
  78. edsl/jobs/Answers.py +56 -56
  79. edsl/jobs/Jobs.py +898 -1358
  80. edsl/jobs/JobsChecks.py +147 -0
  81. edsl/jobs/JobsPrompts.py +268 -0
  82. edsl/jobs/JobsRemoteInferenceHandler.py +239 -0
  83. edsl/jobs/__init__.py +1 -1
  84. edsl/jobs/buckets/BucketCollection.py +63 -63
  85. edsl/jobs/buckets/ModelBuckets.py +65 -65
  86. edsl/jobs/buckets/TokenBucket.py +251 -251
  87. edsl/jobs/interviews/Interview.py +661 -661
  88. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  89. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  90. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  91. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  92. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  93. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  94. edsl/jobs/interviews/ReportErrors.py +66 -66
  95. edsl/jobs/interviews/interview_status_enum.py +9 -9
  96. edsl/jobs/runners/JobsRunnerAsyncio.py +466 -361
  97. edsl/jobs/runners/JobsRunnerStatus.py +330 -332
  98. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  99. edsl/jobs/tasks/TaskCreators.py +64 -64
  100. edsl/jobs/tasks/TaskHistory.py +450 -451
  101. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  102. edsl/jobs/tasks/task_status_enum.py +163 -163
  103. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  104. edsl/jobs/tokens/TokenUsage.py +34 -34
  105. edsl/language_models/KeyLookup.py +30 -30
  106. edsl/language_models/LanguageModel.py +668 -708
  107. edsl/language_models/ModelList.py +155 -109
  108. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  109. edsl/language_models/__init__.py +3 -3
  110. edsl/language_models/fake_openai_call.py +15 -15
  111. edsl/language_models/fake_openai_service.py +61 -61
  112. edsl/language_models/registry.py +190 -137
  113. edsl/language_models/repair.py +156 -156
  114. edsl/language_models/unused/ReplicateBase.py +83 -83
  115. edsl/language_models/utilities.py +64 -64
  116. edsl/notebooks/Notebook.py +258 -258
  117. edsl/notebooks/__init__.py +1 -1
  118. edsl/prompts/Prompt.py +362 -357
  119. edsl/prompts/__init__.py +2 -2
  120. edsl/questions/AnswerValidatorMixin.py +289 -289
  121. edsl/questions/QuestionBase.py +664 -660
  122. edsl/questions/QuestionBaseGenMixin.py +161 -161
  123. edsl/questions/QuestionBasePromptsMixin.py +217 -217
  124. edsl/questions/QuestionBudget.py +227 -227
  125. edsl/questions/QuestionCheckBox.py +359 -359
  126. edsl/questions/QuestionExtract.py +182 -183
  127. edsl/questions/QuestionFreeText.py +114 -114
  128. edsl/questions/QuestionFunctional.py +166 -166
  129. edsl/questions/QuestionList.py +231 -231
  130. edsl/questions/QuestionMultipleChoice.py +286 -286
  131. edsl/questions/QuestionNumerical.py +153 -153
  132. edsl/questions/QuestionRank.py +324 -324
  133. edsl/questions/Quick.py +41 -41
  134. edsl/questions/RegisterQuestionsMeta.py +71 -71
  135. edsl/questions/ResponseValidatorABC.py +174 -174
  136. edsl/questions/SimpleAskMixin.py +73 -73
  137. edsl/questions/__init__.py +26 -26
  138. edsl/questions/compose_questions.py +98 -98
  139. edsl/questions/decorators.py +21 -21
  140. edsl/questions/derived/QuestionLikertFive.py +76 -76
  141. edsl/questions/derived/QuestionLinearScale.py +87 -87
  142. edsl/questions/derived/QuestionTopK.py +93 -93
  143. edsl/questions/derived/QuestionYesNo.py +82 -82
  144. edsl/questions/descriptors.py +413 -413
  145. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  146. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  147. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  148. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  149. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  150. edsl/questions/prompt_templates/question_list.jinja +17 -17
  151. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  152. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  153. edsl/questions/question_registry.py +177 -147
  154. edsl/questions/settings.py +12 -12
  155. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  157. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  158. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  159. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  160. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  161. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  162. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  163. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  164. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  165. edsl/questions/templates/list/question_presentation.jinja +5 -5
  166. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  167. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  168. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  169. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  170. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  171. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  172. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  173. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  174. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  176. edsl/results/CSSParameterizer.py +108 -0
  177. edsl/results/Dataset.py +424 -293
  178. edsl/results/DatasetExportMixin.py +731 -717
  179. edsl/results/DatasetTree.py +275 -145
  180. edsl/results/Result.py +465 -456
  181. edsl/results/Results.py +1165 -1071
  182. edsl/results/ResultsDBMixin.py +238 -238
  183. edsl/results/ResultsExportMixin.py +43 -43
  184. edsl/results/ResultsFetchMixin.py +33 -33
  185. edsl/results/ResultsGGMixin.py +121 -121
  186. edsl/results/ResultsToolsMixin.py +98 -98
  187. edsl/results/Selector.py +135 -135
  188. edsl/results/TableDisplay.py +198 -0
  189. edsl/results/__init__.py +2 -2
  190. edsl/results/table_display.css +78 -0
  191. edsl/results/tree_explore.py +115 -115
  192. edsl/scenarios/FileStore.py +632 -458
  193. edsl/scenarios/Scenario.py +601 -544
  194. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  195. edsl/scenarios/ScenarioJoin.py +127 -0
  196. edsl/scenarios/ScenarioList.py +1287 -1112
  197. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  198. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  199. edsl/scenarios/__init__.py +4 -4
  200. edsl/shared.py +1 -1
  201. edsl/study/ObjectEntry.py +173 -173
  202. edsl/study/ProofOfWork.py +113 -113
  203. edsl/study/SnapShot.py +80 -80
  204. edsl/study/Study.py +528 -528
  205. edsl/study/__init__.py +4 -4
  206. edsl/surveys/DAG.py +148 -148
  207. edsl/surveys/Memory.py +31 -31
  208. edsl/surveys/MemoryPlan.py +244 -244
  209. edsl/surveys/Rule.py +326 -326
  210. edsl/surveys/RuleCollection.py +387 -387
  211. edsl/surveys/Survey.py +1801 -1787
  212. edsl/surveys/SurveyCSS.py +261 -261
  213. edsl/surveys/SurveyExportMixin.py +259 -259
  214. edsl/surveys/SurveyFlowVisualizationMixin.py +179 -121
  215. edsl/surveys/SurveyQualtricsImport.py +284 -284
  216. edsl/surveys/__init__.py +3 -3
  217. edsl/surveys/base.py +53 -53
  218. edsl/surveys/descriptors.py +56 -56
  219. edsl/surveys/instructions/ChangeInstruction.py +49 -49
  220. edsl/surveys/instructions/Instruction.py +65 -53
  221. edsl/surveys/instructions/InstructionCollection.py +77 -77
  222. edsl/templates/error_reporting/base.html +23 -23
  223. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  224. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  225. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  226. edsl/templates/error_reporting/interview_details.html +115 -115
  227. edsl/templates/error_reporting/interviews.html +19 -10
  228. edsl/templates/error_reporting/overview.html +4 -4
  229. edsl/templates/error_reporting/performance_plot.html +1 -1
  230. edsl/templates/error_reporting/report.css +73 -73
  231. edsl/templates/error_reporting/report.html +117 -117
  232. edsl/templates/error_reporting/report.js +25 -25
  233. edsl/tools/__init__.py +1 -1
  234. edsl/tools/clusters.py +192 -192
  235. edsl/tools/embeddings.py +27 -27
  236. edsl/tools/embeddings_plotting.py +118 -118
  237. edsl/tools/plotting.py +112 -112
  238. edsl/tools/summarize.py +18 -18
  239. edsl/utilities/SystemInfo.py +28 -28
  240. edsl/utilities/__init__.py +22 -22
  241. edsl/utilities/ast_utilities.py +25 -25
  242. edsl/utilities/data/Registry.py +6 -6
  243. edsl/utilities/data/__init__.py +1 -1
  244. edsl/utilities/data/scooter_results.json +1 -1
  245. edsl/utilities/decorators.py +77 -77
  246. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  247. edsl/utilities/interface.py +627 -627
  248. edsl/utilities/naming_utilities.py +263 -263
  249. edsl/utilities/repair_functions.py +28 -28
  250. edsl/utilities/restricted_python.py +70 -70
  251. edsl/utilities/utilities.py +424 -409
  252. {edsl-0.1.38.dev3.dist-info → edsl-0.1.38.dev4.dist-info}/LICENSE +21 -21
  253. {edsl-0.1.38.dev3.dist-info → edsl-0.1.38.dev4.dist-info}/METADATA +2 -1
  254. edsl-0.1.38.dev4.dist-info/RECORD +277 -0
  255. edsl-0.1.38.dev3.dist-info/RECORD +0 -269
  256. {edsl-0.1.38.dev3.dist-info → edsl-0.1.38.dev4.dist-info}/WHEEL +0 -0
@@ -1,97 +1,97 @@
1
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
2
- import warnings
3
-
4
-
5
- class InferenceServicesCollection:
6
- added_models = {}
7
-
8
- def __init__(self, services: list[InferenceServiceABC] = None):
9
- self.services = services or []
10
-
11
- @classmethod
12
- def add_model(cls, service_name, model_name):
13
- if service_name not in cls.added_models:
14
- cls.added_models[service_name] = []
15
- cls.added_models[service_name].append(model_name)
16
-
17
- @staticmethod
18
- def _get_service_available(service, warn: bool = False) -> list[str]:
19
- try:
20
- service_models = service.available()
21
- except Exception:
22
- if warn:
23
- warnings.warn(
24
- f"""Error getting models for {service._inference_service_}.
25
- Check that you have properly stored your Expected Parrot API key and activated remote inference, or stored your own API keys for the language models that you want to use.
26
- See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
27
- Relying on Coop.""",
28
- UserWarning,
29
- )
30
-
31
- # Use the list of models on Coop as a fallback
32
- try:
33
- from edsl import Coop
34
-
35
- c = Coop()
36
- models_from_coop = c.fetch_models()
37
- service_models = models_from_coop.get(service._inference_service_, [])
38
-
39
- # cache results
40
- service._models_list_cache = service_models
41
-
42
- # Finally, use the available models cache from the Python file
43
- except Exception:
44
- if warn:
45
- warnings.warn(
46
- f"""Error getting models for {service._inference_service_}.
47
- Relying on EDSL cache.""",
48
- UserWarning,
49
- )
50
-
51
- from edsl.inference_services.models_available_cache import (
52
- models_available,
53
- )
54
-
55
- service_models = models_available.get(service._inference_service_, [])
56
-
57
- # cache results
58
- service._models_list_cache = service_models
59
-
60
- return service_models
61
-
62
- def available(self):
63
- total_models = []
64
- for service in self.services:
65
- service_models = self._get_service_available(service)
66
- for model in service_models:
67
- total_models.append([model, service._inference_service_, -1])
68
-
69
- for model in self.added_models.get(service._inference_service_, []):
70
- total_models.append([model, service._inference_service_, -1])
71
-
72
- sorted_models = sorted(total_models)
73
- for i, model in enumerate(sorted_models):
74
- model[2] = i
75
- model = tuple(model)
76
- return sorted_models
77
-
78
- def register(self, service):
79
- self.services.append(service)
80
-
81
- def create_model_factory(self, model_name: str, service_name=None, index=None):
82
- from edsl.inference_services.TestService import TestService
83
-
84
- if model_name == "test":
85
- return TestService.create_model(model_name)
86
-
87
- if service_name:
88
- for service in self.services:
89
- if service_name == service._inference_service_:
90
- return service.create_model(model_name)
91
-
92
- for service in self.services:
93
- if model_name in self._get_service_available(service):
94
- if service_name is None or service_name == service._inference_service_:
95
- return service.create_model(model_name)
96
-
97
- raise Exception(f"Model {model_name} not found in any of the services")
1
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
2
+ import warnings
3
+
4
+
5
+ class InferenceServicesCollection:
6
+ added_models = {}
7
+
8
+ def __init__(self, services: list[InferenceServiceABC] = None):
9
+ self.services = services or []
10
+
11
+ @classmethod
12
+ def add_model(cls, service_name, model_name):
13
+ if service_name not in cls.added_models:
14
+ cls.added_models[service_name] = []
15
+ cls.added_models[service_name].append(model_name)
16
+
17
+ @staticmethod
18
+ def _get_service_available(service, warn: bool = False) -> list[str]:
19
+ try:
20
+ service_models = service.available()
21
+ except Exception:
22
+ if warn:
23
+ warnings.warn(
24
+ f"""Error getting models for {service._inference_service_}.
25
+ Check that you have properly stored your Expected Parrot API key and activated remote inference, or stored your own API keys for the language models that you want to use.
26
+ See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
27
+ Relying on Coop.""",
28
+ UserWarning,
29
+ )
30
+
31
+ # Use the list of models on Coop as a fallback
32
+ try:
33
+ from edsl import Coop
34
+
35
+ c = Coop()
36
+ models_from_coop = c.fetch_models()
37
+ service_models = models_from_coop.get(service._inference_service_, [])
38
+
39
+ # cache results
40
+ service._models_list_cache = service_models
41
+
42
+ # Finally, use the available models cache from the Python file
43
+ except Exception:
44
+ if warn:
45
+ warnings.warn(
46
+ f"""Error getting models for {service._inference_service_}.
47
+ Relying on EDSL cache.""",
48
+ UserWarning,
49
+ )
50
+
51
+ from edsl.inference_services.models_available_cache import (
52
+ models_available,
53
+ )
54
+
55
+ service_models = models_available.get(service._inference_service_, [])
56
+
57
+ # cache results
58
+ service._models_list_cache = service_models
59
+
60
+ return service_models
61
+
62
+ def available(self):
63
+ total_models = []
64
+ for service in self.services:
65
+ service_models = self._get_service_available(service)
66
+ for model in service_models:
67
+ total_models.append([model, service._inference_service_, -1])
68
+
69
+ for model in self.added_models.get(service._inference_service_, []):
70
+ total_models.append([model, service._inference_service_, -1])
71
+
72
+ sorted_models = sorted(total_models)
73
+ for i, model in enumerate(sorted_models):
74
+ model[2] = i
75
+ model = tuple(model)
76
+ return sorted_models
77
+
78
+ def register(self, service):
79
+ self.services.append(service)
80
+
81
+ def create_model_factory(self, model_name: str, service_name=None, index=None):
82
+ from edsl.inference_services.TestService import TestService
83
+
84
+ if model_name == "test":
85
+ return TestService.create_model(model_name)
86
+
87
+ if service_name:
88
+ for service in self.services:
89
+ if service_name == service._inference_service_:
90
+ return service.create_model(model_name)
91
+
92
+ for service in self.services:
93
+ if model_name in self._get_service_available(service):
94
+ if service_name is None or service_name == service._inference_service_:
95
+ return service.create_model(model_name)
96
+
97
+ raise Exception(f"Model {model_name} not found in any of the services")
@@ -1,123 +1,123 @@
1
- import os
2
- from typing import Any, List, Optional
3
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
4
- from edsl.language_models.LanguageModel import LanguageModel
5
- import asyncio
6
- from mistralai import Mistral
7
-
8
- from edsl.exceptions.language_models import LanguageModelBadResponseError
9
-
10
-
11
- class MistralAIService(InferenceServiceABC):
12
- """Mistral AI service class."""
13
-
14
- key_sequence = ["choices", 0, "message", "content"]
15
- usage_sequence = ["usage"]
16
-
17
- _inference_service_ = "mistral"
18
- _env_key_name_ = "MISTRAL_API_KEY" # Environment variable for Mistral API key
19
- input_token_name = "prompt_tokens"
20
- output_token_name = "completion_tokens"
21
-
22
- _sync_client_instance = None
23
- _async_client_instance = None
24
-
25
- _sync_client = Mistral
26
- _async_client = Mistral
27
-
28
- _models_list_cache: List[str] = []
29
- model_exclude_list = []
30
-
31
- def __init_subclass__(cls, **kwargs):
32
- super().__init_subclass__(**kwargs)
33
- # so subclasses have to create their own instances of the clients
34
- cls._sync_client_instance = None
35
- cls._async_client_instance = None
36
-
37
- @classmethod
38
- def sync_client(cls):
39
- if cls._sync_client_instance is None:
40
- cls._sync_client_instance = cls._sync_client(
41
- api_key=os.getenv(cls._env_key_name_)
42
- )
43
- return cls._sync_client_instance
44
-
45
- @classmethod
46
- def async_client(cls):
47
- if cls._async_client_instance is None:
48
- cls._async_client_instance = cls._async_client(
49
- api_key=os.getenv(cls._env_key_name_)
50
- )
51
- return cls._async_client_instance
52
-
53
- @classmethod
54
- def available(cls) -> list[str]:
55
- if not cls._models_list_cache:
56
- cls._models_list_cache = [
57
- m.id for m in cls.sync_client().models.list().data
58
- ]
59
-
60
- return cls._models_list_cache
61
-
62
- @classmethod
63
- def create_model(
64
- cls, model_name: str = "mistral", model_class_name=None
65
- ) -> LanguageModel:
66
- if model_class_name is None:
67
- model_class_name = cls.to_class_name(model_name)
68
-
69
- class LLM(LanguageModel):
70
- """
71
- Child class of LanguageModel for interacting with Mistral models.
72
- """
73
-
74
- key_sequence = cls.key_sequence
75
- usage_sequence = cls.usage_sequence
76
-
77
- input_token_name = cls.input_token_name
78
- output_token_name = cls.output_token_name
79
-
80
- _inference_service_ = cls._inference_service_
81
- _model_ = model_name
82
- _parameters_ = {
83
- "temperature": 0.5,
84
- "max_tokens": 512,
85
- "top_p": 0.9,
86
- }
87
-
88
- _tpm = cls.get_tpm(cls)
89
- _rpm = cls.get_rpm(cls)
90
-
91
- def sync_client(self):
92
- return cls.sync_client()
93
-
94
- def async_client(self):
95
- return cls.async_client()
96
-
97
- async def async_execute_model_call(
98
- self,
99
- user_prompt: str,
100
- system_prompt: str = "",
101
- files_list: Optional[List["FileStore"]] = None,
102
- ) -> dict[str, Any]:
103
- """Calls the Mistral API and returns the API response."""
104
- s = self.async_client()
105
-
106
- try:
107
- res = await s.chat.complete_async(
108
- model=model_name,
109
- messages=[
110
- {
111
- "content": user_prompt,
112
- "role": "user",
113
- },
114
- ],
115
- )
116
- except Exception as e:
117
- raise LanguageModelBadResponseError(f"Error with Mistral API: {e}")
118
-
119
- return res.model_dump()
120
-
121
- LLM.__name__ = model_class_name
122
-
123
- return LLM
1
+ import os
2
+ from typing import Any, List, Optional
3
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
4
+ from edsl.language_models.LanguageModel import LanguageModel
5
+ import asyncio
6
+ from mistralai import Mistral
7
+
8
+ from edsl.exceptions.language_models import LanguageModelBadResponseError
9
+
10
+
11
+ class MistralAIService(InferenceServiceABC):
12
+ """Mistral AI service class."""
13
+
14
+ key_sequence = ["choices", 0, "message", "content"]
15
+ usage_sequence = ["usage"]
16
+
17
+ _inference_service_ = "mistral"
18
+ _env_key_name_ = "MISTRAL_API_KEY" # Environment variable for Mistral API key
19
+ input_token_name = "prompt_tokens"
20
+ output_token_name = "completion_tokens"
21
+
22
+ _sync_client_instance = None
23
+ _async_client_instance = None
24
+
25
+ _sync_client = Mistral
26
+ _async_client = Mistral
27
+
28
+ _models_list_cache: List[str] = []
29
+ model_exclude_list = []
30
+
31
+ def __init_subclass__(cls, **kwargs):
32
+ super().__init_subclass__(**kwargs)
33
+ # so subclasses have to create their own instances of the clients
34
+ cls._sync_client_instance = None
35
+ cls._async_client_instance = None
36
+
37
+ @classmethod
38
+ def sync_client(cls):
39
+ if cls._sync_client_instance is None:
40
+ cls._sync_client_instance = cls._sync_client(
41
+ api_key=os.getenv(cls._env_key_name_)
42
+ )
43
+ return cls._sync_client_instance
44
+
45
+ @classmethod
46
+ def async_client(cls):
47
+ if cls._async_client_instance is None:
48
+ cls._async_client_instance = cls._async_client(
49
+ api_key=os.getenv(cls._env_key_name_)
50
+ )
51
+ return cls._async_client_instance
52
+
53
+ @classmethod
54
+ def available(cls) -> list[str]:
55
+ if not cls._models_list_cache:
56
+ cls._models_list_cache = [
57
+ m.id for m in cls.sync_client().models.list().data
58
+ ]
59
+
60
+ return cls._models_list_cache
61
+
62
+ @classmethod
63
+ def create_model(
64
+ cls, model_name: str = "mistral", model_class_name=None
65
+ ) -> LanguageModel:
66
+ if model_class_name is None:
67
+ model_class_name = cls.to_class_name(model_name)
68
+
69
+ class LLM(LanguageModel):
70
+ """
71
+ Child class of LanguageModel for interacting with Mistral models.
72
+ """
73
+
74
+ key_sequence = cls.key_sequence
75
+ usage_sequence = cls.usage_sequence
76
+
77
+ input_token_name = cls.input_token_name
78
+ output_token_name = cls.output_token_name
79
+
80
+ _inference_service_ = cls._inference_service_
81
+ _model_ = model_name
82
+ _parameters_ = {
83
+ "temperature": 0.5,
84
+ "max_tokens": 512,
85
+ "top_p": 0.9,
86
+ }
87
+
88
+ _tpm = cls.get_tpm(cls)
89
+ _rpm = cls.get_rpm(cls)
90
+
91
+ def sync_client(self):
92
+ return cls.sync_client()
93
+
94
+ def async_client(self):
95
+ return cls.async_client()
96
+
97
+ async def async_execute_model_call(
98
+ self,
99
+ user_prompt: str,
100
+ system_prompt: str = "",
101
+ files_list: Optional[List["FileStore"]] = None,
102
+ ) -> dict[str, Any]:
103
+ """Calls the Mistral API and returns the API response."""
104
+ s = self.async_client()
105
+
106
+ try:
107
+ res = await s.chat.complete_async(
108
+ model=model_name,
109
+ messages=[
110
+ {
111
+ "content": user_prompt,
112
+ "role": "user",
113
+ },
114
+ ],
115
+ )
116
+ except Exception as e:
117
+ raise LanguageModelBadResponseError(f"Error with Mistral API: {e}")
118
+
119
+ return res.model_dump()
120
+
121
+ LLM.__name__ = model_class_name
122
+
123
+ return LLM
@@ -1,18 +1,18 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List
5
-
6
- # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
- from edsl.language_models import LanguageModel
8
-
9
- from edsl.inference_services.OpenAIService import OpenAIService
10
-
11
-
12
- class OllamaService(OpenAIService):
13
- """DeepInfra service class."""
14
-
15
- _inference_service_ = "ollama"
16
- _env_key_name_ = "DEEP_INFRA_API_KEY"
17
- _base_url_ = "http://localhost:11434/v1"
18
- _models_list_cache: List[str] = []
1
+ import aiohttp
2
+ import json
3
+ import requests
4
+ from typing import Any, List
5
+
6
+ # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
+ from edsl.language_models import LanguageModel
8
+
9
+ from edsl.inference_services.OpenAIService import OpenAIService
10
+
11
+
12
+ class OllamaService(OpenAIService):
13
+ """DeepInfra service class."""
14
+
15
+ _inference_service_ = "ollama"
16
+ _env_key_name_ = "DEEP_INFRA_API_KEY"
17
+ _base_url_ = "http://localhost:11434/v1"
18
+ _models_list_cache: List[str] = []