edsl 0.1.38.dev2__py3-none-any.whl → 0.1.38.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (248) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +858 -858
  7. edsl/agents/AgentList.py +362 -362
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -284
  10. edsl/agents/PromptConstructor.py +353 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +149 -149
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +961 -961
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +530 -530
  37. edsl/data/CacheEntry.py +228 -228
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +97 -97
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +173 -173
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -42
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +156 -156
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/TestService.py +89 -89
  72. edsl/inference_services/TogetherAIService.py +170 -170
  73. edsl/inference_services/models_available_cache.py +118 -118
  74. edsl/inference_services/rate_limits_cache.py +25 -25
  75. edsl/inference_services/registry.py +39 -39
  76. edsl/inference_services/write_available.py +10 -10
  77. edsl/jobs/Answers.py +56 -56
  78. edsl/jobs/Jobs.py +1358 -1358
  79. edsl/jobs/__init__.py +1 -1
  80. edsl/jobs/buckets/BucketCollection.py +63 -63
  81. edsl/jobs/buckets/ModelBuckets.py +65 -65
  82. edsl/jobs/buckets/TokenBucket.py +251 -251
  83. edsl/jobs/interviews/Interview.py +661 -661
  84. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  85. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  86. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  87. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  88. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  89. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  90. edsl/jobs/interviews/ReportErrors.py +66 -66
  91. edsl/jobs/interviews/interview_status_enum.py +9 -9
  92. edsl/jobs/runners/JobsRunnerAsyncio.py +361 -361
  93. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  94. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  95. edsl/jobs/tasks/TaskCreators.py +64 -64
  96. edsl/jobs/tasks/TaskHistory.py +451 -451
  97. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  98. edsl/jobs/tasks/task_status_enum.py +163 -163
  99. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  100. edsl/jobs/tokens/TokenUsage.py +34 -34
  101. edsl/language_models/KeyLookup.py +30 -30
  102. edsl/language_models/LanguageModel.py +708 -708
  103. edsl/language_models/ModelList.py +109 -109
  104. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  105. edsl/language_models/__init__.py +3 -3
  106. edsl/language_models/fake_openai_call.py +15 -15
  107. edsl/language_models/fake_openai_service.py +61 -61
  108. edsl/language_models/registry.py +137 -137
  109. edsl/language_models/repair.py +156 -156
  110. edsl/language_models/unused/ReplicateBase.py +83 -83
  111. edsl/language_models/utilities.py +64 -64
  112. edsl/notebooks/Notebook.py +258 -258
  113. edsl/notebooks/__init__.py +1 -1
  114. edsl/prompts/Prompt.py +357 -357
  115. edsl/prompts/__init__.py +2 -2
  116. edsl/questions/AnswerValidatorMixin.py +289 -289
  117. edsl/questions/QuestionBase.py +660 -660
  118. edsl/questions/QuestionBaseGenMixin.py +161 -161
  119. edsl/questions/QuestionBasePromptsMixin.py +217 -217
  120. edsl/questions/QuestionBudget.py +227 -227
  121. edsl/questions/QuestionCheckBox.py +359 -359
  122. edsl/questions/QuestionExtract.py +183 -183
  123. edsl/questions/QuestionFreeText.py +114 -114
  124. edsl/questions/QuestionFunctional.py +166 -166
  125. edsl/questions/QuestionList.py +231 -231
  126. edsl/questions/QuestionMultipleChoice.py +286 -286
  127. edsl/questions/QuestionNumerical.py +153 -153
  128. edsl/questions/QuestionRank.py +324 -324
  129. edsl/questions/Quick.py +41 -41
  130. edsl/questions/RegisterQuestionsMeta.py +71 -71
  131. edsl/questions/ResponseValidatorABC.py +174 -174
  132. edsl/questions/SimpleAskMixin.py +73 -73
  133. edsl/questions/__init__.py +26 -26
  134. edsl/questions/compose_questions.py +98 -98
  135. edsl/questions/decorators.py +21 -21
  136. edsl/questions/derived/QuestionLikertFive.py +76 -76
  137. edsl/questions/derived/QuestionLinearScale.py +87 -87
  138. edsl/questions/derived/QuestionTopK.py +93 -93
  139. edsl/questions/derived/QuestionYesNo.py +82 -82
  140. edsl/questions/descriptors.py +413 -413
  141. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  142. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  143. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  144. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  145. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  146. edsl/questions/prompt_templates/question_list.jinja +17 -17
  147. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  148. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  149. edsl/questions/question_registry.py +147 -147
  150. edsl/questions/settings.py +12 -12
  151. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  152. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  153. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  154. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  155. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  157. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  158. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  159. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  160. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  161. edsl/questions/templates/list/question_presentation.jinja +5 -5
  162. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  163. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  164. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  165. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  166. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  167. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  168. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  169. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  170. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  171. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  172. edsl/results/Dataset.py +293 -293
  173. edsl/results/DatasetExportMixin.py +717 -717
  174. edsl/results/DatasetTree.py +145 -145
  175. edsl/results/Result.py +456 -456
  176. edsl/results/Results.py +1071 -1071
  177. edsl/results/ResultsDBMixin.py +238 -238
  178. edsl/results/ResultsExportMixin.py +43 -43
  179. edsl/results/ResultsFetchMixin.py +33 -33
  180. edsl/results/ResultsGGMixin.py +121 -121
  181. edsl/results/ResultsToolsMixin.py +98 -98
  182. edsl/results/Selector.py +135 -135
  183. edsl/results/__init__.py +2 -2
  184. edsl/results/tree_explore.py +115 -115
  185. edsl/scenarios/FileStore.py +458 -458
  186. edsl/scenarios/Scenario.py +544 -544
  187. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  188. edsl/scenarios/ScenarioList.py +1112 -1112
  189. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  190. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  191. edsl/scenarios/__init__.py +4 -4
  192. edsl/shared.py +1 -1
  193. edsl/study/ObjectEntry.py +173 -173
  194. edsl/study/ProofOfWork.py +113 -113
  195. edsl/study/SnapShot.py +80 -80
  196. edsl/study/Study.py +528 -528
  197. edsl/study/__init__.py +4 -4
  198. edsl/surveys/DAG.py +148 -148
  199. edsl/surveys/Memory.py +31 -31
  200. edsl/surveys/MemoryPlan.py +244 -244
  201. edsl/surveys/Rule.py +326 -326
  202. edsl/surveys/RuleCollection.py +387 -387
  203. edsl/surveys/Survey.py +1787 -1787
  204. edsl/surveys/SurveyCSS.py +261 -261
  205. edsl/surveys/SurveyExportMixin.py +259 -259
  206. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  207. edsl/surveys/SurveyQualtricsImport.py +284 -284
  208. edsl/surveys/__init__.py +3 -3
  209. edsl/surveys/base.py +53 -53
  210. edsl/surveys/descriptors.py +56 -56
  211. edsl/surveys/instructions/ChangeInstruction.py +49 -49
  212. edsl/surveys/instructions/Instruction.py +53 -53
  213. edsl/surveys/instructions/InstructionCollection.py +77 -77
  214. edsl/templates/error_reporting/base.html +23 -23
  215. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  216. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  217. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  218. edsl/templates/error_reporting/interview_details.html +115 -115
  219. edsl/templates/error_reporting/interviews.html +9 -9
  220. edsl/templates/error_reporting/overview.html +4 -4
  221. edsl/templates/error_reporting/performance_plot.html +1 -1
  222. edsl/templates/error_reporting/report.css +73 -73
  223. edsl/templates/error_reporting/report.html +117 -117
  224. edsl/templates/error_reporting/report.js +25 -25
  225. edsl/tools/__init__.py +1 -1
  226. edsl/tools/clusters.py +192 -192
  227. edsl/tools/embeddings.py +27 -27
  228. edsl/tools/embeddings_plotting.py +118 -118
  229. edsl/tools/plotting.py +112 -112
  230. edsl/tools/summarize.py +18 -18
  231. edsl/utilities/SystemInfo.py +28 -28
  232. edsl/utilities/__init__.py +22 -22
  233. edsl/utilities/ast_utilities.py +25 -25
  234. edsl/utilities/data/Registry.py +6 -6
  235. edsl/utilities/data/__init__.py +1 -1
  236. edsl/utilities/data/scooter_results.json +1 -1
  237. edsl/utilities/decorators.py +77 -77
  238. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  239. edsl/utilities/interface.py +627 -627
  240. edsl/utilities/naming_utilities.py +263 -263
  241. edsl/utilities/repair_functions.py +28 -28
  242. edsl/utilities/restricted_python.py +70 -70
  243. edsl/utilities/utilities.py +409 -409
  244. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev3.dist-info}/LICENSE +21 -21
  245. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev3.dist-info}/METADATA +1 -1
  246. edsl-0.1.38.dev3.dist-info/RECORD +269 -0
  247. edsl-0.1.38.dev2.dist-info/RECORD +0 -269
  248. {edsl-0.1.38.dev2.dist-info → edsl-0.1.38.dev3.dist-info}/WHEEL +0 -0
@@ -1,217 +1,217 @@
1
- import os
2
- from typing import Any, Optional, List
3
- import re
4
- from openai import AsyncAzureOpenAI
5
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
6
- from edsl.language_models.LanguageModel import LanguageModel
7
-
8
- from azure.ai.inference.aio import ChatCompletionsClient
9
- from azure.core.credentials import AzureKeyCredential
10
- from azure.ai.inference.models import SystemMessage, UserMessage
11
- import asyncio
12
- import json
13
- from edsl.utilities.utilities import fix_partial_correct_response
14
-
15
-
16
- def json_handle_none(value: Any) -> Any:
17
- """
18
- Handle None values during JSON serialization.
19
- - Return "null" if the value is None. Otherwise, don't return anything.
20
- """
21
- if value is None:
22
- return "null"
23
-
24
-
25
- class AzureAIService(InferenceServiceABC):
26
- """Azure AI service class."""
27
-
28
- # key_sequence = ["content", 0, "text"] # ["content"][0]["text"]
29
- key_sequence = ["choices", 0, "message", "content"]
30
- usage_sequence = ["usage"]
31
- input_token_name = "prompt_tokens"
32
- output_token_name = "completion_tokens"
33
-
34
- _inference_service_ = "azure"
35
- _env_key_name_ = (
36
- "AZURE_ENDPOINT_URL_AND_KEY" # Environment variable for Azure API key
37
- )
38
- _model_id_to_endpoint_and_key = {}
39
- model_exclude_list = [
40
- "Cohere-command-r-plus-xncmg",
41
- "Mistral-Nemo-klfsi",
42
- "Mistral-large-2407-ojfld",
43
- ]
44
-
45
- @classmethod
46
- def available(cls):
47
- out = []
48
- azure_endpoints = os.getenv("AZURE_ENDPOINT_URL_AND_KEY", None)
49
- if not azure_endpoints:
50
- raise EnvironmentError(f"AZURE_ENDPOINT_URL_AND_KEY is not defined")
51
- azure_endpoints = azure_endpoints.split(",")
52
- for data in azure_endpoints:
53
- try:
54
- # data has this format for non openai models https://model_id.azure_endpoint:azure_key
55
- _, endpoint, azure_endpoint_key = data.split(":")
56
- if "openai" not in endpoint:
57
- model_id = endpoint.split(".")[0].replace("/", "")
58
- out.append(model_id)
59
- cls._model_id_to_endpoint_and_key[model_id] = {
60
- "endpoint": f"https:{endpoint}",
61
- "azure_endpoint_key": azure_endpoint_key,
62
- }
63
- else:
64
- # data has this format for openai models ,https://azure_project_id.openai.azure.com/openai/deployments/gpt-4o-mini/chat/completions?api-version=2023-03-15-preview:azure_key
65
- if "/deployments/" in endpoint:
66
- start_idx = endpoint.index("/deployments/") + len(
67
- "/deployments/"
68
- )
69
- end_idx = (
70
- endpoint.index("/", start_idx)
71
- if "/" in endpoint[start_idx:]
72
- else len(endpoint)
73
- )
74
- model_id = endpoint[start_idx:end_idx]
75
- api_version_value = None
76
- if "api-version=" in endpoint:
77
- start_idx = endpoint.index("api-version=") + len(
78
- "api-version="
79
- )
80
- end_idx = (
81
- endpoint.index("&", start_idx)
82
- if "&" in endpoint[start_idx:]
83
- else len(endpoint)
84
- )
85
- api_version_value = endpoint[start_idx:end_idx]
86
-
87
- cls._model_id_to_endpoint_and_key[f"azure:{model_id}"] = {
88
- "endpoint": f"https:{endpoint}",
89
- "azure_endpoint_key": azure_endpoint_key,
90
- "api_version": api_version_value,
91
- }
92
- out.append(f"azure:{model_id}")
93
-
94
- except Exception as e:
95
- raise e
96
- return [m for m in out if m not in cls.model_exclude_list]
97
-
98
- @classmethod
99
- def create_model(
100
- cls, model_name: str = "azureai", model_class_name=None
101
- ) -> LanguageModel:
102
- if model_class_name is None:
103
- model_class_name = cls.to_class_name(model_name)
104
-
105
- class LLM(LanguageModel):
106
- """
107
- Child class of LanguageModel for interacting with Azure OpenAI models.
108
- """
109
-
110
- key_sequence = cls.key_sequence
111
- usage_sequence = cls.usage_sequence
112
- input_token_name = cls.input_token_name
113
- output_token_name = cls.output_token_name
114
- _inference_service_ = cls._inference_service_
115
- _model_ = model_name
116
- _parameters_ = {
117
- "temperature": 0.5,
118
- "max_tokens": 512,
119
- "top_p": 0.9,
120
- }
121
- _rpm = cls.get_rpm(cls)
122
- _tpm = cls.get_tpm(cls)
123
-
124
- async def async_execute_model_call(
125
- self,
126
- user_prompt: str,
127
- system_prompt: str = "",
128
- files_list: Optional[List["FileStore"]] = None,
129
- ) -> dict[str, Any]:
130
- """Calls the Azure OpenAI API and returns the API response."""
131
-
132
- try:
133
- api_key = cls._model_id_to_endpoint_and_key[model_name][
134
- "azure_endpoint_key"
135
- ]
136
- except:
137
- api_key = None
138
-
139
- if not api_key:
140
- raise EnvironmentError(
141
- f"AZURE_ENDPOINT_URL_AND_KEY doesn't have the endpoint:key pair for your model: {model_name}"
142
- )
143
-
144
- try:
145
- endpoint = cls._model_id_to_endpoint_and_key[model_name]["endpoint"]
146
- except:
147
- endpoint = None
148
-
149
- if not endpoint:
150
- raise EnvironmentError(
151
- f"AZURE_ENDPOINT_URL_AND_KEY doesn't have the endpoint:key pair for your model: {model_name}"
152
- )
153
-
154
- if "openai" not in endpoint:
155
- client = ChatCompletionsClient(
156
- endpoint=endpoint,
157
- credential=AzureKeyCredential(api_key),
158
- temperature=self.temperature,
159
- top_p=self.top_p,
160
- max_tokens=self.max_tokens,
161
- )
162
- try:
163
- response = await client.complete(
164
- messages=[
165
- SystemMessage(content=system_prompt),
166
- UserMessage(content=user_prompt),
167
- ],
168
- # model_extras={"safe_mode": True},
169
- )
170
- await client.close()
171
- return response.as_dict()
172
- except Exception as e:
173
- await client.close()
174
- return {"error": str(e)}
175
- else:
176
- api_version = cls._model_id_to_endpoint_and_key[model_name][
177
- "api_version"
178
- ]
179
- client = AsyncAzureOpenAI(
180
- azure_endpoint=endpoint,
181
- api_version=api_version,
182
- api_key=api_key,
183
- )
184
- response = await client.chat.completions.create(
185
- model=model_name,
186
- messages=[
187
- {
188
- "role": "user",
189
- "content": user_prompt, # Your question can go here
190
- },
191
- ],
192
- )
193
- return response.model_dump()
194
-
195
- # @staticmethod
196
- # def parse_response(raw_response: dict[str, Any]) -> str:
197
- # """Parses the API response and returns the response text."""
198
- # if (
199
- # raw_response
200
- # and "choices" in raw_response
201
- # and raw_response["choices"]
202
- # ):
203
- # response = raw_response["choices"][0]["message"]["content"]
204
- # pattern = r"^```json(?:\\n|\n)(.+?)(?:\\n|\n)```$"
205
- # match = re.match(pattern, response, re.DOTALL)
206
- # if match:
207
- # return match.group(1)
208
- # else:
209
- # out = fix_partial_correct_response(response)
210
- # if "error" not in out:
211
- # response = out["extracted_json"]
212
- # return response
213
- # return "Error parsing response"
214
-
215
- LLM.__name__ = model_class_name
216
-
217
- return LLM
1
+ import os
2
+ from typing import Any, Optional, List
3
+ import re
4
+ from openai import AsyncAzureOpenAI
5
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
6
+ from edsl.language_models.LanguageModel import LanguageModel
7
+
8
+ from azure.ai.inference.aio import ChatCompletionsClient
9
+ from azure.core.credentials import AzureKeyCredential
10
+ from azure.ai.inference.models import SystemMessage, UserMessage
11
+ import asyncio
12
+ import json
13
+ from edsl.utilities.utilities import fix_partial_correct_response
14
+
15
+
16
+ def json_handle_none(value: Any) -> Any:
17
+ """
18
+ Handle None values during JSON serialization.
19
+ - Return "null" if the value is None. Otherwise, don't return anything.
20
+ """
21
+ if value is None:
22
+ return "null"
23
+
24
+
25
+ class AzureAIService(InferenceServiceABC):
26
+ """Azure AI service class."""
27
+
28
+ # key_sequence = ["content", 0, "text"] # ["content"][0]["text"]
29
+ key_sequence = ["choices", 0, "message", "content"]
30
+ usage_sequence = ["usage"]
31
+ input_token_name = "prompt_tokens"
32
+ output_token_name = "completion_tokens"
33
+
34
+ _inference_service_ = "azure"
35
+ _env_key_name_ = (
36
+ "AZURE_ENDPOINT_URL_AND_KEY" # Environment variable for Azure API key
37
+ )
38
+ _model_id_to_endpoint_and_key = {}
39
+ model_exclude_list = [
40
+ "Cohere-command-r-plus-xncmg",
41
+ "Mistral-Nemo-klfsi",
42
+ "Mistral-large-2407-ojfld",
43
+ ]
44
+
45
+ @classmethod
46
+ def available(cls):
47
+ out = []
48
+ azure_endpoints = os.getenv("AZURE_ENDPOINT_URL_AND_KEY", None)
49
+ if not azure_endpoints:
50
+ raise EnvironmentError(f"AZURE_ENDPOINT_URL_AND_KEY is not defined")
51
+ azure_endpoints = azure_endpoints.split(",")
52
+ for data in azure_endpoints:
53
+ try:
54
+ # data has this format for non openai models https://model_id.azure_endpoint:azure_key
55
+ _, endpoint, azure_endpoint_key = data.split(":")
56
+ if "openai" not in endpoint:
57
+ model_id = endpoint.split(".")[0].replace("/", "")
58
+ out.append(model_id)
59
+ cls._model_id_to_endpoint_and_key[model_id] = {
60
+ "endpoint": f"https:{endpoint}",
61
+ "azure_endpoint_key": azure_endpoint_key,
62
+ }
63
+ else:
64
+ # data has this format for openai models ,https://azure_project_id.openai.azure.com/openai/deployments/gpt-4o-mini/chat/completions?api-version=2023-03-15-preview:azure_key
65
+ if "/deployments/" in endpoint:
66
+ start_idx = endpoint.index("/deployments/") + len(
67
+ "/deployments/"
68
+ )
69
+ end_idx = (
70
+ endpoint.index("/", start_idx)
71
+ if "/" in endpoint[start_idx:]
72
+ else len(endpoint)
73
+ )
74
+ model_id = endpoint[start_idx:end_idx]
75
+ api_version_value = None
76
+ if "api-version=" in endpoint:
77
+ start_idx = endpoint.index("api-version=") + len(
78
+ "api-version="
79
+ )
80
+ end_idx = (
81
+ endpoint.index("&", start_idx)
82
+ if "&" in endpoint[start_idx:]
83
+ else len(endpoint)
84
+ )
85
+ api_version_value = endpoint[start_idx:end_idx]
86
+
87
+ cls._model_id_to_endpoint_and_key[f"azure:{model_id}"] = {
88
+ "endpoint": f"https:{endpoint}",
89
+ "azure_endpoint_key": azure_endpoint_key,
90
+ "api_version": api_version_value,
91
+ }
92
+ out.append(f"azure:{model_id}")
93
+
94
+ except Exception as e:
95
+ raise e
96
+ return [m for m in out if m not in cls.model_exclude_list]
97
+
98
+ @classmethod
99
+ def create_model(
100
+ cls, model_name: str = "azureai", model_class_name=None
101
+ ) -> LanguageModel:
102
+ if model_class_name is None:
103
+ model_class_name = cls.to_class_name(model_name)
104
+
105
+ class LLM(LanguageModel):
106
+ """
107
+ Child class of LanguageModel for interacting with Azure OpenAI models.
108
+ """
109
+
110
+ key_sequence = cls.key_sequence
111
+ usage_sequence = cls.usage_sequence
112
+ input_token_name = cls.input_token_name
113
+ output_token_name = cls.output_token_name
114
+ _inference_service_ = cls._inference_service_
115
+ _model_ = model_name
116
+ _parameters_ = {
117
+ "temperature": 0.5,
118
+ "max_tokens": 512,
119
+ "top_p": 0.9,
120
+ }
121
+ _rpm = cls.get_rpm(cls)
122
+ _tpm = cls.get_tpm(cls)
123
+
124
+ async def async_execute_model_call(
125
+ self,
126
+ user_prompt: str,
127
+ system_prompt: str = "",
128
+ files_list: Optional[List["FileStore"]] = None,
129
+ ) -> dict[str, Any]:
130
+ """Calls the Azure OpenAI API and returns the API response."""
131
+
132
+ try:
133
+ api_key = cls._model_id_to_endpoint_and_key[model_name][
134
+ "azure_endpoint_key"
135
+ ]
136
+ except:
137
+ api_key = None
138
+
139
+ if not api_key:
140
+ raise EnvironmentError(
141
+ f"AZURE_ENDPOINT_URL_AND_KEY doesn't have the endpoint:key pair for your model: {model_name}"
142
+ )
143
+
144
+ try:
145
+ endpoint = cls._model_id_to_endpoint_and_key[model_name]["endpoint"]
146
+ except:
147
+ endpoint = None
148
+
149
+ if not endpoint:
150
+ raise EnvironmentError(
151
+ f"AZURE_ENDPOINT_URL_AND_KEY doesn't have the endpoint:key pair for your model: {model_name}"
152
+ )
153
+
154
+ if "openai" not in endpoint:
155
+ client = ChatCompletionsClient(
156
+ endpoint=endpoint,
157
+ credential=AzureKeyCredential(api_key),
158
+ temperature=self.temperature,
159
+ top_p=self.top_p,
160
+ max_tokens=self.max_tokens,
161
+ )
162
+ try:
163
+ response = await client.complete(
164
+ messages=[
165
+ SystemMessage(content=system_prompt),
166
+ UserMessage(content=user_prompt),
167
+ ],
168
+ # model_extras={"safe_mode": True},
169
+ )
170
+ await client.close()
171
+ return response.as_dict()
172
+ except Exception as e:
173
+ await client.close()
174
+ return {"error": str(e)}
175
+ else:
176
+ api_version = cls._model_id_to_endpoint_and_key[model_name][
177
+ "api_version"
178
+ ]
179
+ client = AsyncAzureOpenAI(
180
+ azure_endpoint=endpoint,
181
+ api_version=api_version,
182
+ api_key=api_key,
183
+ )
184
+ response = await client.chat.completions.create(
185
+ model=model_name,
186
+ messages=[
187
+ {
188
+ "role": "user",
189
+ "content": user_prompt, # Your question can go here
190
+ },
191
+ ],
192
+ )
193
+ return response.model_dump()
194
+
195
+ # @staticmethod
196
+ # def parse_response(raw_response: dict[str, Any]) -> str:
197
+ # """Parses the API response and returns the response text."""
198
+ # if (
199
+ # raw_response
200
+ # and "choices" in raw_response
201
+ # and raw_response["choices"]
202
+ # ):
203
+ # response = raw_response["choices"][0]["message"]["content"]
204
+ # pattern = r"^```json(?:\\n|\n)(.+?)(?:\\n|\n)```$"
205
+ # match = re.match(pattern, response, re.DOTALL)
206
+ # if match:
207
+ # return match.group(1)
208
+ # else:
209
+ # out = fix_partial_correct_response(response)
210
+ # if "error" not in out:
211
+ # response = out["extracted_json"]
212
+ # return response
213
+ # return "Error parsing response"
214
+
215
+ LLM.__name__ = model_class_name
216
+
217
+ return LLM
@@ -1,18 +1,18 @@
1
- import aiohttp
2
- import json
3
- import requests
4
- from typing import Any, List
5
-
6
- # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
- from edsl.language_models import LanguageModel
8
-
9
- from edsl.inference_services.OpenAIService import OpenAIService
10
-
11
-
12
- class DeepInfraService(OpenAIService):
13
- """DeepInfra service class."""
14
-
15
- _inference_service_ = "deep_infra"
16
- _env_key_name_ = "DEEP_INFRA_API_KEY"
17
- _base_url_ = "https://api.deepinfra.com/v1/openai"
18
- _models_list_cache: List[str] = []
1
+ import aiohttp
2
+ import json
3
+ import requests
4
+ from typing import Any, List
5
+
6
+ # from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
7
+ from edsl.language_models import LanguageModel
8
+
9
+ from edsl.inference_services.OpenAIService import OpenAIService
10
+
11
+
12
+ class DeepInfraService(OpenAIService):
13
+ """DeepInfra service class."""
14
+
15
+ _inference_service_ = "deep_infra"
16
+ _env_key_name_ = "DEEP_INFRA_API_KEY"
17
+ _base_url_ = "https://api.deepinfra.com/v1/openai"
18
+ _models_list_cache: List[str] = []