edsl 0.1.37.dev2__py3-none-any.whl → 0.1.37.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (257) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +48 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +804 -804
  7. edsl/agents/AgentList.py +345 -345
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +305 -305
  10. edsl/agents/PromptConstructor.py +312 -312
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +86 -86
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +152 -152
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +238 -238
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/mug_negotiation.py +81 -81
  45. edsl/conversation/next_speaker_utilities.py +93 -93
  46. edsl/coop/PriceFetcher.py +54 -54
  47. edsl/coop/__init__.py +2 -2
  48. edsl/coop/coop.py +827 -824
  49. edsl/coop/utils.py +131 -131
  50. edsl/data/Cache.py +527 -527
  51. edsl/data/CacheEntry.py +228 -228
  52. edsl/data/CacheHandler.py +149 -149
  53. edsl/data/RemoteCacheSync.py +97 -97
  54. edsl/data/SQLiteDict.py +292 -292
  55. edsl/data/__init__.py +4 -4
  56. edsl/data/orm.py +10 -10
  57. edsl/data_transfer_models.py +73 -73
  58. edsl/enums.py +173 -173
  59. edsl/exceptions/__init__.py +50 -50
  60. edsl/exceptions/agents.py +40 -40
  61. edsl/exceptions/configuration.py +16 -16
  62. edsl/exceptions/coop.py +10 -10
  63. edsl/exceptions/data.py +14 -14
  64. edsl/exceptions/general.py +34 -34
  65. edsl/exceptions/jobs.py +33 -33
  66. edsl/exceptions/language_models.py +63 -63
  67. edsl/exceptions/prompts.py +15 -15
  68. edsl/exceptions/questions.py +91 -91
  69. edsl/exceptions/results.py +26 -26
  70. edsl/exceptions/surveys.py +34 -34
  71. edsl/inference_services/AnthropicService.py +87 -87
  72. edsl/inference_services/AwsBedrock.py +120 -115
  73. edsl/inference_services/AzureAI.py +217 -217
  74. edsl/inference_services/DeepInfraService.py +18 -18
  75. edsl/inference_services/GoogleService.py +156 -156
  76. edsl/inference_services/GroqService.py +20 -20
  77. edsl/inference_services/InferenceServiceABC.py +147 -147
  78. edsl/inference_services/InferenceServicesCollection.py +74 -74
  79. edsl/inference_services/MistralAIService.py +123 -123
  80. edsl/inference_services/OllamaService.py +18 -18
  81. edsl/inference_services/OpenAIService.py +224 -224
  82. edsl/inference_services/TestService.py +89 -89
  83. edsl/inference_services/TogetherAIService.py +170 -170
  84. edsl/inference_services/models_available_cache.py +118 -118
  85. edsl/inference_services/rate_limits_cache.py +25 -25
  86. edsl/inference_services/registry.py +39 -39
  87. edsl/inference_services/write_available.py +10 -10
  88. edsl/jobs/Answers.py +56 -56
  89. edsl/jobs/Jobs.py +1135 -1112
  90. edsl/jobs/__init__.py +1 -1
  91. edsl/jobs/buckets/BucketCollection.py +63 -63
  92. edsl/jobs/buckets/ModelBuckets.py +65 -65
  93. edsl/jobs/buckets/TokenBucket.py +248 -248
  94. edsl/jobs/interviews/Interview.py +661 -661
  95. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  96. edsl/jobs/interviews/InterviewExceptionEntry.py +182 -182
  97. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  98. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  99. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  100. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  101. edsl/jobs/interviews/ReportErrors.py +66 -66
  102. edsl/jobs/interviews/interview_status_enum.py +9 -9
  103. edsl/jobs/runners/JobsRunnerAsyncio.py +338 -338
  104. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  105. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  106. edsl/jobs/tasks/TaskCreators.py +64 -64
  107. edsl/jobs/tasks/TaskHistory.py +441 -441
  108. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  109. edsl/jobs/tasks/task_status_enum.py +163 -163
  110. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  111. edsl/jobs/tokens/TokenUsage.py +34 -34
  112. edsl/language_models/LanguageModel.py +718 -718
  113. edsl/language_models/ModelList.py +102 -102
  114. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  115. edsl/language_models/__init__.py +2 -2
  116. edsl/language_models/fake_openai_call.py +15 -15
  117. edsl/language_models/fake_openai_service.py +61 -61
  118. edsl/language_models/registry.py +137 -137
  119. edsl/language_models/repair.py +156 -156
  120. edsl/language_models/unused/ReplicateBase.py +83 -83
  121. edsl/language_models/utilities.py +64 -64
  122. edsl/notebooks/Notebook.py +259 -259
  123. edsl/notebooks/__init__.py +1 -1
  124. edsl/prompts/Prompt.py +353 -353
  125. edsl/prompts/__init__.py +2 -2
  126. edsl/questions/AnswerValidatorMixin.py +289 -289
  127. edsl/questions/QuestionBase.py +616 -616
  128. edsl/questions/QuestionBaseGenMixin.py +161 -161
  129. edsl/questions/QuestionBasePromptsMixin.py +266 -266
  130. edsl/questions/QuestionBudget.py +227 -227
  131. edsl/questions/QuestionCheckBox.py +359 -359
  132. edsl/questions/QuestionExtract.py +183 -183
  133. edsl/questions/QuestionFreeText.py +114 -114
  134. edsl/questions/QuestionFunctional.py +159 -159
  135. edsl/questions/QuestionList.py +231 -231
  136. edsl/questions/QuestionMultipleChoice.py +286 -286
  137. edsl/questions/QuestionNumerical.py +153 -153
  138. edsl/questions/QuestionRank.py +324 -324
  139. edsl/questions/Quick.py +41 -41
  140. edsl/questions/RegisterQuestionsMeta.py +71 -71
  141. edsl/questions/ResponseValidatorABC.py +174 -174
  142. edsl/questions/SimpleAskMixin.py +73 -73
  143. edsl/questions/__init__.py +26 -26
  144. edsl/questions/compose_questions.py +98 -98
  145. edsl/questions/decorators.py +21 -21
  146. edsl/questions/derived/QuestionLikertFive.py +76 -76
  147. edsl/questions/derived/QuestionLinearScale.py +87 -87
  148. edsl/questions/derived/QuestionTopK.py +91 -91
  149. edsl/questions/derived/QuestionYesNo.py +82 -82
  150. edsl/questions/descriptors.py +418 -418
  151. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  152. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  153. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  154. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  155. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  156. edsl/questions/prompt_templates/question_list.jinja +17 -17
  157. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  158. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  159. edsl/questions/question_registry.py +147 -147
  160. edsl/questions/settings.py +12 -12
  161. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  162. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  163. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  164. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  165. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  167. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  168. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  169. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  170. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  171. edsl/questions/templates/list/question_presentation.jinja +5 -5
  172. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  173. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  174. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  176. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  177. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  178. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  179. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  180. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  181. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  182. edsl/results/Dataset.py +293 -293
  183. edsl/results/DatasetExportMixin.py +693 -693
  184. edsl/results/DatasetTree.py +145 -145
  185. edsl/results/Result.py +435 -435
  186. edsl/results/Results.py +1160 -1160
  187. edsl/results/ResultsDBMixin.py +238 -238
  188. edsl/results/ResultsExportMixin.py +43 -43
  189. edsl/results/ResultsFetchMixin.py +33 -33
  190. edsl/results/ResultsGGMixin.py +121 -121
  191. edsl/results/ResultsToolsMixin.py +98 -98
  192. edsl/results/Selector.py +118 -118
  193. edsl/results/__init__.py +2 -2
  194. edsl/results/tree_explore.py +115 -115
  195. edsl/scenarios/FileStore.py +458 -458
  196. edsl/scenarios/Scenario.py +510 -510
  197. edsl/scenarios/ScenarioHtmlMixin.py +59 -59
  198. edsl/scenarios/ScenarioList.py +1101 -1101
  199. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  200. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  201. edsl/scenarios/__init__.py +4 -4
  202. edsl/shared.py +1 -1
  203. edsl/study/ObjectEntry.py +173 -173
  204. edsl/study/ProofOfWork.py +113 -113
  205. edsl/study/SnapShot.py +80 -80
  206. edsl/study/Study.py +528 -528
  207. edsl/study/__init__.py +4 -4
  208. edsl/surveys/DAG.py +148 -148
  209. edsl/surveys/Memory.py +31 -31
  210. edsl/surveys/MemoryPlan.py +244 -244
  211. edsl/surveys/Rule.py +324 -324
  212. edsl/surveys/RuleCollection.py +387 -387
  213. edsl/surveys/Survey.py +1772 -1772
  214. edsl/surveys/SurveyCSS.py +261 -261
  215. edsl/surveys/SurveyExportMixin.py +259 -259
  216. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  217. edsl/surveys/SurveyQualtricsImport.py +284 -284
  218. edsl/surveys/__init__.py +3 -3
  219. edsl/surveys/base.py +53 -53
  220. edsl/surveys/descriptors.py +56 -56
  221. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  222. edsl/surveys/instructions/Instruction.py +51 -51
  223. edsl/surveys/instructions/InstructionCollection.py +77 -77
  224. edsl/templates/error_reporting/base.html +23 -23
  225. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  226. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  227. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  228. edsl/templates/error_reporting/interview_details.html +115 -115
  229. edsl/templates/error_reporting/interviews.html +9 -9
  230. edsl/templates/error_reporting/overview.html +4 -4
  231. edsl/templates/error_reporting/performance_plot.html +1 -1
  232. edsl/templates/error_reporting/report.css +73 -73
  233. edsl/templates/error_reporting/report.html +117 -117
  234. edsl/templates/error_reporting/report.js +25 -25
  235. edsl/tools/__init__.py +1 -1
  236. edsl/tools/clusters.py +192 -192
  237. edsl/tools/embeddings.py +27 -27
  238. edsl/tools/embeddings_plotting.py +118 -118
  239. edsl/tools/plotting.py +112 -112
  240. edsl/tools/summarize.py +18 -18
  241. edsl/utilities/SystemInfo.py +28 -28
  242. edsl/utilities/__init__.py +22 -22
  243. edsl/utilities/ast_utilities.py +25 -25
  244. edsl/utilities/data/Registry.py +6 -6
  245. edsl/utilities/data/__init__.py +1 -1
  246. edsl/utilities/data/scooter_results.json +1 -1
  247. edsl/utilities/decorators.py +77 -77
  248. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  249. edsl/utilities/interface.py +627 -627
  250. edsl/utilities/repair_functions.py +28 -28
  251. edsl/utilities/restricted_python.py +70 -70
  252. edsl/utilities/utilities.py +391 -391
  253. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev4.dist-info}/LICENSE +21 -21
  254. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev4.dist-info}/METADATA +1 -1
  255. edsl-0.1.37.dev4.dist-info/RECORD +279 -0
  256. edsl-0.1.37.dev2.dist-info/RECORD +0 -279
  257. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev4.dist-info}/WHEEL +0 -0
@@ -1,74 +1,74 @@
1
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
2
- import warnings
3
-
4
-
5
- class InferenceServicesCollection:
6
- added_models = {}
7
-
8
- def __init__(self, services: list[InferenceServiceABC] = None):
9
- self.services = services or []
10
-
11
- @classmethod
12
- def add_model(cls, service_name, model_name):
13
- if service_name not in cls.added_models:
14
- cls.added_models[service_name] = []
15
- cls.added_models[service_name].append(model_name)
16
-
17
- @staticmethod
18
- def _get_service_available(service, warn: bool = False) -> list[str]:
19
- from_api = True
20
- try:
21
- service_models = service.available()
22
- except Exception as e:
23
- if warn:
24
- warnings.warn(
25
- f"""Error getting models for {service._inference_service_}.
26
- Check that you have properly stored your Expected Parrot API key and activated remote inference, or stored your own API keys for the language models that you want to use.
27
- See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
28
- Relying on cache.""",
29
- UserWarning,
30
- )
31
- from edsl.inference_services.models_available_cache import models_available
32
-
33
- service_models = models_available.get(service._inference_service_, [])
34
- # cache results
35
- service._models_list_cache = service_models
36
- from_api = False
37
- return service_models # , from_api
38
-
39
- def available(self):
40
- total_models = []
41
- for service in self.services:
42
- service_models = self._get_service_available(service)
43
- for model in service_models:
44
- total_models.append([model, service._inference_service_, -1])
45
-
46
- for model in self.added_models.get(service._inference_service_, []):
47
- total_models.append([model, service._inference_service_, -1])
48
-
49
- sorted_models = sorted(total_models)
50
- for i, model in enumerate(sorted_models):
51
- model[2] = i
52
- model = tuple(model)
53
- return sorted_models
54
-
55
- def register(self, service):
56
- self.services.append(service)
57
-
58
- def create_model_factory(self, model_name: str, service_name=None, index=None):
59
- from edsl.inference_services.TestService import TestService
60
-
61
- if model_name == "test":
62
- return TestService.create_model(model_name)
63
-
64
- if service_name:
65
- for service in self.services:
66
- if service_name == service._inference_service_:
67
- return service.create_model(model_name)
68
-
69
- for service in self.services:
70
- if model_name in self._get_service_available(service):
71
- if service_name is None or service_name == service._inference_service_:
72
- return service.create_model(model_name)
73
-
74
- raise Exception(f"Model {model_name} not found in any of the services")
1
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
2
+ import warnings
3
+
4
+
5
+ class InferenceServicesCollection:
6
+ added_models = {}
7
+
8
+ def __init__(self, services: list[InferenceServiceABC] = None):
9
+ self.services = services or []
10
+
11
+ @classmethod
12
+ def add_model(cls, service_name, model_name):
13
+ if service_name not in cls.added_models:
14
+ cls.added_models[service_name] = []
15
+ cls.added_models[service_name].append(model_name)
16
+
17
+ @staticmethod
18
+ def _get_service_available(service, warn: bool = False) -> list[str]:
19
+ from_api = True
20
+ try:
21
+ service_models = service.available()
22
+ except Exception as e:
23
+ if warn:
24
+ warnings.warn(
25
+ f"""Error getting models for {service._inference_service_}.
26
+ Check that you have properly stored your Expected Parrot API key and activated remote inference, or stored your own API keys for the language models that you want to use.
27
+ See https://docs.expectedparrot.com/en/latest/api_keys.html for instructions on storing API keys.
28
+ Relying on cache.""",
29
+ UserWarning,
30
+ )
31
+ from edsl.inference_services.models_available_cache import models_available
32
+
33
+ service_models = models_available.get(service._inference_service_, [])
34
+ # cache results
35
+ service._models_list_cache = service_models
36
+ from_api = False
37
+ return service_models # , from_api
38
+
39
+ def available(self):
40
+ total_models = []
41
+ for service in self.services:
42
+ service_models = self._get_service_available(service)
43
+ for model in service_models:
44
+ total_models.append([model, service._inference_service_, -1])
45
+
46
+ for model in self.added_models.get(service._inference_service_, []):
47
+ total_models.append([model, service._inference_service_, -1])
48
+
49
+ sorted_models = sorted(total_models)
50
+ for i, model in enumerate(sorted_models):
51
+ model[2] = i
52
+ model = tuple(model)
53
+ return sorted_models
54
+
55
+ def register(self, service):
56
+ self.services.append(service)
57
+
58
+ def create_model_factory(self, model_name: str, service_name=None, index=None):
59
+ from edsl.inference_services.TestService import TestService
60
+
61
+ if model_name == "test":
62
+ return TestService.create_model(model_name)
63
+
64
+ if service_name:
65
+ for service in self.services:
66
+ if service_name == service._inference_service_:
67
+ return service.create_model(model_name)
68
+
69
+ for service in self.services:
70
+ if model_name in self._get_service_available(service):
71
+ if service_name is None or service_name == service._inference_service_:
72
+ return service.create_model(model_name)
73
+
74
+ raise Exception(f"Model {model_name} not found in any of the services")
@@ -1,123 +1,123 @@
1
- import os
2
- from typing import Any, List, Optional
3
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
4
- from edsl.language_models.LanguageModel import LanguageModel
5
- import asyncio
6
- from mistralai import Mistral
7
-
8
- from edsl.exceptions.language_models import LanguageModelBadResponseError
9
-
10
-
11
- class MistralAIService(InferenceServiceABC):
12
- """Mistral AI service class."""
13
-
14
- key_sequence = ["choices", 0, "message", "content"]
15
- usage_sequence = ["usage"]
16
-
17
- _inference_service_ = "mistral"
18
- _env_key_name_ = "MISTRAL_API_KEY" # Environment variable for Mistral API key
19
- input_token_name = "prompt_tokens"
20
- output_token_name = "completion_tokens"
21
-
22
- _sync_client_instance = None
23
- _async_client_instance = None
24
-
25
- _sync_client = Mistral
26
- _async_client = Mistral
27
-
28
- _models_list_cache: List[str] = []
29
- model_exclude_list = []
30
-
31
- def __init_subclass__(cls, **kwargs):
32
- super().__init_subclass__(**kwargs)
33
- # so subclasses have to create their own instances of the clients
34
- cls._sync_client_instance = None
35
- cls._async_client_instance = None
36
-
37
- @classmethod
38
- def sync_client(cls):
39
- if cls._sync_client_instance is None:
40
- cls._sync_client_instance = cls._sync_client(
41
- api_key=os.getenv(cls._env_key_name_)
42
- )
43
- return cls._sync_client_instance
44
-
45
- @classmethod
46
- def async_client(cls):
47
- if cls._async_client_instance is None:
48
- cls._async_client_instance = cls._async_client(
49
- api_key=os.getenv(cls._env_key_name_)
50
- )
51
- return cls._async_client_instance
52
-
53
- @classmethod
54
- def available(cls) -> list[str]:
55
- if not cls._models_list_cache:
56
- cls._models_list_cache = [
57
- m.id for m in cls.sync_client().models.list().data
58
- ]
59
-
60
- return cls._models_list_cache
61
-
62
- @classmethod
63
- def create_model(
64
- cls, model_name: str = "mistral", model_class_name=None
65
- ) -> LanguageModel:
66
- if model_class_name is None:
67
- model_class_name = cls.to_class_name(model_name)
68
-
69
- class LLM(LanguageModel):
70
- """
71
- Child class of LanguageModel for interacting with Mistral models.
72
- """
73
-
74
- key_sequence = cls.key_sequence
75
- usage_sequence = cls.usage_sequence
76
-
77
- input_token_name = cls.input_token_name
78
- output_token_name = cls.output_token_name
79
-
80
- _inference_service_ = cls._inference_service_
81
- _model_ = model_name
82
- _parameters_ = {
83
- "temperature": 0.5,
84
- "max_tokens": 512,
85
- "top_p": 0.9,
86
- }
87
-
88
- _tpm = cls.get_tpm(cls)
89
- _rpm = cls.get_rpm(cls)
90
-
91
- def sync_client(self):
92
- return cls.sync_client()
93
-
94
- def async_client(self):
95
- return cls.async_client()
96
-
97
- async def async_execute_model_call(
98
- self,
99
- user_prompt: str,
100
- system_prompt: str = "",
101
- files_list: Optional[List["FileStore"]] = None,
102
- ) -> dict[str, Any]:
103
- """Calls the Mistral API and returns the API response."""
104
- s = self.async_client()
105
-
106
- try:
107
- res = await s.chat.complete_async(
108
- model=model_name,
109
- messages=[
110
- {
111
- "content": user_prompt,
112
- "role": "user",
113
- },
114
- ],
115
- )
116
- except Exception as e:
117
- raise LanguageModelBadResponseError(f"Error with Mistral API: {e}")
118
-
119
- return res.model_dump()
120
-
121
- LLM.__name__ = model_class_name
122
-
123
- return LLM
1
+ import os
2
+ from typing import Any, List, Optional
3
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
4
+ from edsl.language_models.LanguageModel import LanguageModel
5
+ import asyncio
6
+ from mistralai import Mistral
7
+
8
+ from edsl.exceptions.language_models import LanguageModelBadResponseError
9
+
10
+
11
+ class MistralAIService(InferenceServiceABC):
12
+ """Mistral AI service class."""
13
+
14
+ key_sequence = ["choices", 0, "message", "content"]
15
+ usage_sequence = ["usage"]
16
+
17
+ _inference_service_ = "mistral"
18
+ _env_key_name_ = "MISTRAL_API_KEY" # Environment variable for Mistral API key
19
+ input_token_name = "prompt_tokens"
20
+ output_token_name = "completion_tokens"
21
+
22
+ _sync_client_instance = None
23
+ _async_client_instance = None
24
+
25
+ _sync_client = Mistral
26
+ _async_client = Mistral
27
+
28
+ _models_list_cache: List[str] = []
29
+ model_exclude_list = []
30
+
31
+ def __init_subclass__(cls, **kwargs):
32
+ super().__init_subclass__(**kwargs)
33
+ # so subclasses have to create their own instances of the clients
34
+ cls._sync_client_instance = None
35
+ cls._async_client_instance = None
36
+
37
+ @classmethod
38
+ def sync_client(cls):
39
+ if cls._sync_client_instance is None:
40
+ cls._sync_client_instance = cls._sync_client(
41
+ api_key=os.getenv(cls._env_key_name_)
42
+ )
43
+ return cls._sync_client_instance
44
+
45
+ @classmethod
46
+ def async_client(cls):
47
+ if cls._async_client_instance is None:
48
+ cls._async_client_instance = cls._async_client(
49
+ api_key=os.getenv(cls._env_key_name_)
50
+ )
51
+ return cls._async_client_instance
52
+
53
+ @classmethod
54
+ def available(cls) -> list[str]:
55
+ if not cls._models_list_cache:
56
+ cls._models_list_cache = [
57
+ m.id for m in cls.sync_client().models.list().data
58
+ ]
59
+
60
+ return cls._models_list_cache
61
+
62
+ @classmethod
63
+ def create_model(
64
+ cls, model_name: str = "mistral", model_class_name=None
65
+ ) -> LanguageModel:
66
+ if model_class_name is None:
67
+ model_class_name = cls.to_class_name(model_name)
68
+
69
+ class LLM(LanguageModel):
70
+ """
71
+ Child class of LanguageModel for interacting with Mistral models.
72
+ """
73
+
74
+ key_sequence = cls.key_sequence
75
+ usage_sequence = cls.usage_sequence
76
+
77
+ input_token_name = cls.input_token_name
78
+ output_token_name = cls.output_token_name
79
+
80
+ _inference_service_ = cls._inference_service_
81
+ _model_ = model_name
82
+ _parameters_ = {
83
+ "temperature": 0.5,
84
+ "max_tokens": 512,
85
+ "top_p": 0.9,
86
+ }
87
+
88
+ _tpm = cls.get_tpm(cls)
89
+ _rpm = cls.get_rpm(cls)
90
+
91
+ def sync_client(self):
92
+ return cls.sync_client()
93
+
94
+ def async_client(self):
95
+ return cls.async_client()
96
+
97
+ async def async_execute_model_call(
98
+ self,
99
+ user_prompt: str,
100
+ system_prompt: str = "",
101
+ files_list: Optional[List["FileStore"]] = None,
102
+ ) -> dict[str, Any]:
103
+ """Calls the Mistral API and returns the API response."""
104
+ s = self.async_client()
105
+
106
+ try:
107
+ res = await s.chat.complete_async(
108
+ model=model_name,
109
+ messages=[
110
+ {
111
+ "content": user_prompt,
112
+ "role": "user",
113
+ },
114
+ ],
115
+ )
116
+ except Exception as e:
117
+ raise LanguageModelBadResponseError(f"Error with Mistral API: {e}")
118
+
119
+ return res.model_dump()
120
+
121
+ LLM.__name__ = model_class_name
122
+
123
+ return LLM
@@ -1,18 +1,18 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List
5
-
6
- # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
- from edsl.language_models import LanguageModel
8
-
9
- from edsl.inference_services.OpenAIService import OpenAIService
10
-
11
-
12
- class OllamaService(OpenAIService):
13
- """DeepInfra service class."""
14
-
15
- _inference_service_ = "ollama"
16
- _env_key_name_ = "DEEP_INFRA_API_KEY"
17
- _base_url_ = "http://localhost:11434/v1"
18
- _models_list_cache: List[str] = []
1
+ import aiohttp
2
+ import json
3
+ import requests
4
+ from typing import Any, List
5
+
6
+ # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
+ from edsl.language_models import LanguageModel
8
+
9
+ from edsl.inference_services.OpenAIService import OpenAIService
10
+
11
+
12
+ class OllamaService(OpenAIService):
13
+ """DeepInfra service class."""
14
+
15
+ _inference_service_ = "ollama"
16
+ _env_key_name_ = "DEEP_INFRA_API_KEY"
17
+ _base_url_ = "http://localhost:11434/v1"
18
+ _models_list_cache: List[str] = []