edsl 0.1.38.dev1__py3-none-any.whl → 0.1.38.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (263) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +858 -855
  7. edsl/agents/AgentList.py +362 -350
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -284
  10. edsl/agents/PromptConstructor.py +353 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -289
  26. edsl/config.py +149 -149
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +961 -958
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +530 -527
  37. edsl/data/CacheEntry.py +228 -228
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +97 -97
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +173 -173
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -38
  48. edsl/exceptions/cache.py +5 -0
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +156 -156
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/TestService.py +89 -89
  72. edsl/inference_services/TogetherAIService.py +170 -170
  73. edsl/inference_services/models_available_cache.py +118 -118
  74. edsl/inference_services/rate_limits_cache.py +25 -25
  75. edsl/inference_services/registry.py +39 -39
  76. edsl/inference_services/write_available.py +10 -10
  77. edsl/jobs/Answers.py +56 -56
  78. edsl/jobs/Jobs.py +1358 -1347
  79. edsl/jobs/__init__.py +1 -1
  80. edsl/jobs/buckets/BucketCollection.py +63 -63
  81. edsl/jobs/buckets/ModelBuckets.py +65 -65
  82. edsl/jobs/buckets/TokenBucket.py +251 -248
  83. edsl/jobs/interviews/Interview.py +661 -661
  84. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  85. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  86. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  87. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  88. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  89. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  90. edsl/jobs/interviews/ReportErrors.py +66 -66
  91. edsl/jobs/interviews/interview_status_enum.py +9 -9
  92. edsl/jobs/runners/JobsRunnerAsyncio.py +361 -338
  93. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  94. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  95. edsl/jobs/tasks/TaskCreators.py +64 -64
  96. edsl/jobs/tasks/TaskHistory.py +451 -442
  97. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  98. edsl/jobs/tasks/task_status_enum.py +163 -163
  99. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  100. edsl/jobs/tokens/TokenUsage.py +34 -34
  101. edsl/language_models/KeyLookup.py +30 -30
  102. edsl/language_models/LanguageModel.py +708 -706
  103. edsl/language_models/ModelList.py +109 -102
  104. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  105. edsl/language_models/__init__.py +3 -3
  106. edsl/language_models/fake_openai_call.py +15 -15
  107. edsl/language_models/fake_openai_service.py +61 -61
  108. edsl/language_models/registry.py +137 -137
  109. edsl/language_models/repair.py +156 -156
  110. edsl/language_models/unused/ReplicateBase.py +83 -83
  111. edsl/language_models/utilities.py +64 -64
  112. edsl/notebooks/Notebook.py +258 -259
  113. edsl/notebooks/__init__.py +1 -1
  114. edsl/prompts/Prompt.py +357 -357
  115. edsl/prompts/__init__.py +2 -2
  116. edsl/questions/AnswerValidatorMixin.py +289 -289
  117. edsl/questions/QuestionBase.py +660 -656
  118. edsl/questions/QuestionBaseGenMixin.py +161 -161
  119. edsl/questions/QuestionBasePromptsMixin.py +217 -234
  120. edsl/questions/QuestionBudget.py +227 -227
  121. edsl/questions/QuestionCheckBox.py +359 -359
  122. edsl/questions/QuestionExtract.py +183 -183
  123. edsl/questions/QuestionFreeText.py +114 -114
  124. edsl/questions/QuestionFunctional.py +166 -159
  125. edsl/questions/QuestionList.py +231 -231
  126. edsl/questions/QuestionMultipleChoice.py +286 -286
  127. edsl/questions/QuestionNumerical.py +153 -153
  128. edsl/questions/QuestionRank.py +324 -324
  129. edsl/questions/Quick.py +41 -41
  130. edsl/questions/RegisterQuestionsMeta.py +71 -71
  131. edsl/questions/ResponseValidatorABC.py +174 -174
  132. edsl/questions/SimpleAskMixin.py +73 -73
  133. edsl/questions/__init__.py +26 -26
  134. edsl/questions/compose_questions.py +98 -98
  135. edsl/questions/decorators.py +21 -21
  136. edsl/questions/derived/QuestionLikertFive.py +76 -76
  137. edsl/questions/derived/QuestionLinearScale.py +87 -87
  138. edsl/questions/derived/QuestionTopK.py +93 -91
  139. edsl/questions/derived/QuestionYesNo.py +82 -82
  140. edsl/questions/descriptors.py +413 -413
  141. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  142. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  143. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  144. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  145. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  146. edsl/questions/prompt_templates/question_list.jinja +17 -17
  147. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  148. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  149. edsl/questions/question_registry.py +147 -147
  150. edsl/questions/settings.py +12 -12
  151. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  152. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  153. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  154. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  155. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  157. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  158. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  159. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  160. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  161. edsl/questions/templates/list/question_presentation.jinja +5 -5
  162. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  163. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  164. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  165. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  166. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  167. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  168. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  169. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  170. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  171. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  172. edsl/results/Dataset.py +293 -293
  173. edsl/results/DatasetExportMixin.py +717 -717
  174. edsl/results/DatasetTree.py +145 -145
  175. edsl/results/Result.py +456 -450
  176. edsl/results/Results.py +1071 -1071
  177. edsl/results/ResultsDBMixin.py +238 -238
  178. edsl/results/ResultsExportMixin.py +43 -43
  179. edsl/results/ResultsFetchMixin.py +33 -33
  180. edsl/results/ResultsGGMixin.py +121 -121
  181. edsl/results/ResultsToolsMixin.py +98 -98
  182. edsl/results/Selector.py +135 -135
  183. edsl/results/__init__.py +2 -2
  184. edsl/results/tree_explore.py +115 -115
  185. edsl/scenarios/FileStore.py +458 -458
  186. edsl/scenarios/Scenario.py +544 -546
  187. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  188. edsl/scenarios/ScenarioList.py +1112 -1112
  189. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  190. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  191. edsl/scenarios/__init__.py +4 -4
  192. edsl/shared.py +1 -1
  193. edsl/study/ObjectEntry.py +173 -173
  194. edsl/study/ProofOfWork.py +113 -113
  195. edsl/study/SnapShot.py +80 -80
  196. edsl/study/Study.py +528 -528
  197. edsl/study/__init__.py +4 -4
  198. edsl/surveys/DAG.py +148 -148
  199. edsl/surveys/Memory.py +31 -31
  200. edsl/surveys/MemoryPlan.py +244 -244
  201. edsl/surveys/Rule.py +326 -330
  202. edsl/surveys/RuleCollection.py +387 -387
  203. edsl/surveys/Survey.py +1787 -1795
  204. edsl/surveys/SurveyCSS.py +261 -261
  205. edsl/surveys/SurveyExportMixin.py +259 -259
  206. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  207. edsl/surveys/SurveyQualtricsImport.py +284 -284
  208. edsl/surveys/__init__.py +3 -3
  209. edsl/surveys/base.py +53 -53
  210. edsl/surveys/descriptors.py +56 -56
  211. edsl/surveys/instructions/ChangeInstruction.py +49 -47
  212. edsl/surveys/instructions/Instruction.py +53 -51
  213. edsl/surveys/instructions/InstructionCollection.py +77 -77
  214. edsl/templates/error_reporting/base.html +23 -23
  215. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  216. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  217. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  218. edsl/templates/error_reporting/interview_details.html +115 -115
  219. edsl/templates/error_reporting/interviews.html +9 -9
  220. edsl/templates/error_reporting/overview.html +4 -4
  221. edsl/templates/error_reporting/performance_plot.html +1 -1
  222. edsl/templates/error_reporting/report.css +73 -73
  223. edsl/templates/error_reporting/report.html +117 -117
  224. edsl/templates/error_reporting/report.js +25 -25
  225. edsl/tools/__init__.py +1 -1
  226. edsl/tools/clusters.py +192 -192
  227. edsl/tools/embeddings.py +27 -27
  228. edsl/tools/embeddings_plotting.py +118 -118
  229. edsl/tools/plotting.py +112 -112
  230. edsl/tools/summarize.py +18 -18
  231. edsl/utilities/SystemInfo.py +28 -28
  232. edsl/utilities/__init__.py +22 -22
  233. edsl/utilities/ast_utilities.py +25 -25
  234. edsl/utilities/data/Registry.py +6 -6
  235. edsl/utilities/data/__init__.py +1 -1
  236. edsl/utilities/data/scooter_results.json +1 -1
  237. edsl/utilities/decorators.py +77 -77
  238. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  239. edsl/utilities/interface.py +627 -627
  240. edsl/{conjure → utilities}/naming_utilities.py +263 -263
  241. edsl/utilities/repair_functions.py +28 -28
  242. edsl/utilities/restricted_python.py +70 -70
  243. edsl/utilities/utilities.py +409 -409
  244. {edsl-0.1.38.dev1.dist-info → edsl-0.1.38.dev3.dist-info}/LICENSE +21 -21
  245. {edsl-0.1.38.dev1.dist-info → edsl-0.1.38.dev3.dist-info}/METADATA +1 -1
  246. edsl-0.1.38.dev3.dist-info/RECORD +269 -0
  247. edsl/conjure/AgentConstructionMixin.py +0 -160
  248. edsl/conjure/Conjure.py +0 -62
  249. edsl/conjure/InputData.py +0 -659
  250. edsl/conjure/InputDataCSV.py +0 -48
  251. edsl/conjure/InputDataMixinQuestionStats.py +0 -182
  252. edsl/conjure/InputDataPyRead.py +0 -91
  253. edsl/conjure/InputDataSPSS.py +0 -8
  254. edsl/conjure/InputDataStata.py +0 -8
  255. edsl/conjure/QuestionOptionMixin.py +0 -76
  256. edsl/conjure/QuestionTypeMixin.py +0 -23
  257. edsl/conjure/RawQuestion.py +0 -65
  258. edsl/conjure/SurveyResponses.py +0 -7
  259. edsl/conjure/__init__.py +0 -9
  260. edsl/conjure/examples/placeholder.txt +0 -0
  261. edsl/conjure/utilities.py +0 -201
  262. edsl-0.1.38.dev1.dist-info/RECORD +0 -283
  263. {edsl-0.1.38.dev1.dist-info → edsl-0.1.38.dev3.dist-info}/WHEEL +0 -0
@@ -1,224 +1,224 @@
1
- from __future__ import annotations
2
- from typing import Any, List, Optional
3
- import os
4
-
5
- import openai
6
-
7
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
8
- from edsl.language_models import LanguageModel
9
- from edsl.inference_services.rate_limits_cache import rate_limits
10
- from edsl.utilities.utilities import fix_partial_correct_response
11
-
12
- from edsl.config import CONFIG
13
-
14
-
15
- class OpenAIService(InferenceServiceABC):
16
- """OpenAI service class."""
17
-
18
- _inference_service_ = "openai"
19
- _env_key_name_ = "OPENAI_API_KEY"
20
- _base_url_ = None
21
-
22
- _sync_client_ = openai.OpenAI
23
- _async_client_ = openai.AsyncOpenAI
24
-
25
- _sync_client_instance = None
26
- _async_client_instance = None
27
-
28
- key_sequence = ["choices", 0, "message", "content"]
29
- usage_sequence = ["usage"]
30
- input_token_name = "prompt_tokens"
31
- output_token_name = "completion_tokens"
32
-
33
- def __init_subclass__(cls, **kwargs):
34
- super().__init_subclass__(**kwargs)
35
- # so subclasses have to create their own instances of the clients
36
- cls._sync_client_instance = None
37
- cls._async_client_instance = None
38
-
39
- @classmethod
40
- def sync_client(cls):
41
- if cls._sync_client_instance is None:
42
- cls._sync_client_instance = cls._sync_client_(
43
- api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
44
- )
45
- return cls._sync_client_instance
46
-
47
- @classmethod
48
- def async_client(cls):
49
- if cls._async_client_instance is None:
50
- cls._async_client_instance = cls._async_client_(
51
- api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
52
- )
53
- return cls._async_client_instance
54
-
55
- model_exclude_list = [
56
- "whisper-1",
57
- "davinci-002",
58
- "dall-e-2",
59
- "tts-1-hd-1106",
60
- "tts-1-hd",
61
- "dall-e-3",
62
- "tts-1",
63
- "babbage-002",
64
- "tts-1-1106",
65
- "text-embedding-3-large",
66
- "text-embedding-3-small",
67
- "text-embedding-ada-002",
68
- "ft:davinci-002:mit-horton-lab::8OfuHgoo",
69
- "gpt-3.5-turbo-instruct-0914",
70
- "gpt-3.5-turbo-instruct",
71
- ]
72
- _models_list_cache: List[str] = []
73
-
74
- @classmethod
75
- def get_model_list(cls):
76
- raw_list = cls.sync_client().models.list()
77
- if hasattr(raw_list, "data"):
78
- return raw_list.data
79
- else:
80
- return raw_list
81
-
82
- @classmethod
83
- def available(cls) -> List[str]:
84
- if not cls._models_list_cache:
85
- try:
86
- cls._models_list_cache = [
87
- m.id
88
- for m in cls.get_model_list()
89
- if m.id not in cls.model_exclude_list
90
- ]
91
- except Exception as e:
92
- raise
93
- return cls._models_list_cache
94
-
95
- @classmethod
96
- def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
97
- if model_class_name is None:
98
- model_class_name = cls.to_class_name(model_name)
99
-
100
- class LLM(LanguageModel):
101
- """
102
- Child class of LanguageModel for interacting with OpenAI models
103
- """
104
-
105
- key_sequence = cls.key_sequence
106
- usage_sequence = cls.usage_sequence
107
- input_token_name = cls.input_token_name
108
- output_token_name = cls.output_token_name
109
-
110
- _rpm = cls.get_rpm(cls)
111
- _tpm = cls.get_tpm(cls)
112
-
113
- _inference_service_ = cls._inference_service_
114
- _model_ = model_name
115
- _parameters_ = {
116
- "temperature": 0.5,
117
- "max_tokens": 1000,
118
- "top_p": 1,
119
- "frequency_penalty": 0,
120
- "presence_penalty": 0,
121
- "logprobs": False,
122
- "top_logprobs": 3,
123
- }
124
-
125
- def sync_client(self):
126
- return cls.sync_client()
127
-
128
- def async_client(self):
129
- return cls.async_client()
130
-
131
- @classmethod
132
- def available(cls) -> list[str]:
133
- return cls.sync_client().models.list()
134
-
135
- def get_headers(self) -> dict[str, Any]:
136
- client = self.sync_client()
137
- response = client.chat.completions.with_raw_response.create(
138
- messages=[
139
- {
140
- "role": "user",
141
- "content": "Say this is a test",
142
- }
143
- ],
144
- model=self.model,
145
- )
146
- return dict(response.headers)
147
-
148
- def get_rate_limits(self) -> dict[str, Any]:
149
- try:
150
- if "openai" in rate_limits:
151
- headers = rate_limits["openai"]
152
-
153
- else:
154
- headers = self.get_headers()
155
-
156
- except Exception as e:
157
- return {
158
- "rpm": 10_000,
159
- "tpm": 2_000_000,
160
- }
161
- else:
162
- return {
163
- "rpm": int(headers["x-ratelimit-limit-requests"]),
164
- "tpm": int(headers["x-ratelimit-limit-tokens"]),
165
- }
166
-
167
- async def async_execute_model_call(
168
- self,
169
- user_prompt: str,
170
- system_prompt: str = "",
171
- files_list: Optional[List["Files"]] = None,
172
- invigilator: Optional[
173
- "InvigilatorAI"
174
- ] = None, # TBD - can eventually be used for function-calling
175
- ) -> dict[str, Any]:
176
- """Calls the OpenAI API and returns the API response."""
177
- if files_list:
178
- encoded_image = files_list[0].base64_string
179
- content = [{"type": "text", "text": user_prompt}]
180
- content.append(
181
- {
182
- "type": "image_url",
183
- "image_url": {
184
- "url": f"data:image/jpeg;base64,{encoded_image}"
185
- },
186
- }
187
- )
188
- else:
189
- content = user_prompt
190
- client = self.async_client()
191
-
192
- messages = [
193
- {"role": "system", "content": system_prompt},
194
- {"role": "user", "content": content},
195
- ]
196
- if (
197
- system_prompt == "" and self.omit_system_prompt_if_empty
198
- ) or "o1" in self.model:
199
- messages = messages[1:]
200
-
201
- params = {
202
- "model": self.model,
203
- "messages": messages,
204
- "temperature": self.temperature,
205
- "max_tokens": self.max_tokens,
206
- "top_p": self.top_p,
207
- "frequency_penalty": self.frequency_penalty,
208
- "presence_penalty": self.presence_penalty,
209
- "logprobs": self.logprobs,
210
- "top_logprobs": self.top_logprobs if self.logprobs else None,
211
- }
212
- if "o1" in self.model:
213
- params.pop("max_tokens")
214
- params["max_completion_tokens"] = self.max_tokens
215
- params["temperature"] = 1
216
- try:
217
- response = await client.chat.completions.create(**params)
218
- except Exception as e:
219
- print(e)
220
- return response.model_dump()
221
-
222
- LLM.__name__ = "LanguageModel"
223
-
224
- return LLM
1
+ from __future__ import annotations
2
+ from typing import Any, List, Optional
3
+ import os
4
+
5
+ import openai
6
+
7
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
8
+ from edsl.language_models import LanguageModel
9
+ from edsl.inference_services.rate_limits_cache import rate_limits
10
+ from edsl.utilities.utilities import fix_partial_correct_response
11
+
12
+ from edsl.config import CONFIG
13
+
14
+
15
+ class OpenAIService(InferenceServiceABC):
16
+ """OpenAI service class."""
17
+
18
+ _inference_service_ = "openai"
19
+ _env_key_name_ = "OPENAI_API_KEY"
20
+ _base_url_ = None
21
+
22
+ _sync_client_ = openai.OpenAI
23
+ _async_client_ = openai.AsyncOpenAI
24
+
25
+ _sync_client_instance = None
26
+ _async_client_instance = None
27
+
28
+ key_sequence = ["choices", 0, "message", "content"]
29
+ usage_sequence = ["usage"]
30
+ input_token_name = "prompt_tokens"
31
+ output_token_name = "completion_tokens"
32
+
33
+ def __init_subclass__(cls, **kwargs):
34
+ super().__init_subclass__(**kwargs)
35
+ # so subclasses have to create their own instances of the clients
36
+ cls._sync_client_instance = None
37
+ cls._async_client_instance = None
38
+
39
+ @classmethod
40
+ def sync_client(cls):
41
+ if cls._sync_client_instance is None:
42
+ cls._sync_client_instance = cls._sync_client_(
43
+ api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
44
+ )
45
+ return cls._sync_client_instance
46
+
47
+ @classmethod
48
+ def async_client(cls):
49
+ if cls._async_client_instance is None:
50
+ cls._async_client_instance = cls._async_client_(
51
+ api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
52
+ )
53
+ return cls._async_client_instance
54
+
55
+ model_exclude_list = [
56
+ "whisper-1",
57
+ "davinci-002",
58
+ "dall-e-2",
59
+ "tts-1-hd-1106",
60
+ "tts-1-hd",
61
+ "dall-e-3",
62
+ "tts-1",
63
+ "babbage-002",
64
+ "tts-1-1106",
65
+ "text-embedding-3-large",
66
+ "text-embedding-3-small",
67
+ "text-embedding-ada-002",
68
+ "ft:davinci-002:mit-horton-lab::8OfuHgoo",
69
+ "gpt-3.5-turbo-instruct-0914",
70
+ "gpt-3.5-turbo-instruct",
71
+ ]
72
+ _models_list_cache: List[str] = []
73
+
74
+ @classmethod
75
+ def get_model_list(cls):
76
+ raw_list = cls.sync_client().models.list()
77
+ if hasattr(raw_list, "data"):
78
+ return raw_list.data
79
+ else:
80
+ return raw_list
81
+
82
+ @classmethod
83
+ def available(cls) -> List[str]:
84
+ if not cls._models_list_cache:
85
+ try:
86
+ cls._models_list_cache = [
87
+ m.id
88
+ for m in cls.get_model_list()
89
+ if m.id not in cls.model_exclude_list
90
+ ]
91
+ except Exception as e:
92
+ raise
93
+ return cls._models_list_cache
94
+
95
+ @classmethod
96
+ def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
97
+ if model_class_name is None:
98
+ model_class_name = cls.to_class_name(model_name)
99
+
100
+ class LLM(LanguageModel):
101
+ """
102
+ Child class of LanguageModel for interacting with OpenAI models
103
+ """
104
+
105
+ key_sequence = cls.key_sequence
106
+ usage_sequence = cls.usage_sequence
107
+ input_token_name = cls.input_token_name
108
+ output_token_name = cls.output_token_name
109
+
110
+ _rpm = cls.get_rpm(cls)
111
+ _tpm = cls.get_tpm(cls)
112
+
113
+ _inference_service_ = cls._inference_service_
114
+ _model_ = model_name
115
+ _parameters_ = {
116
+ "temperature": 0.5,
117
+ "max_tokens": 1000,
118
+ "top_p": 1,
119
+ "frequency_penalty": 0,
120
+ "presence_penalty": 0,
121
+ "logprobs": False,
122
+ "top_logprobs": 3,
123
+ }
124
+
125
+ def sync_client(self):
126
+ return cls.sync_client()
127
+
128
+ def async_client(self):
129
+ return cls.async_client()
130
+
131
+ @classmethod
132
+ def available(cls) -> list[str]:
133
+ return cls.sync_client().models.list()
134
+
135
+ def get_headers(self) -> dict[str, Any]:
136
+ client = self.sync_client()
137
+ response = client.chat.completions.with_raw_response.create(
138
+ messages=[
139
+ {
140
+ "role": "user",
141
+ "content": "Say this is a test",
142
+ }
143
+ ],
144
+ model=self.model,
145
+ )
146
+ return dict(response.headers)
147
+
148
+ def get_rate_limits(self) -> dict[str, Any]:
149
+ try:
150
+ if "openai" in rate_limits:
151
+ headers = rate_limits["openai"]
152
+
153
+ else:
154
+ headers = self.get_headers()
155
+
156
+ except Exception as e:
157
+ return {
158
+ "rpm": 10_000,
159
+ "tpm": 2_000_000,
160
+ }
161
+ else:
162
+ return {
163
+ "rpm": int(headers["x-ratelimit-limit-requests"]),
164
+ "tpm": int(headers["x-ratelimit-limit-tokens"]),
165
+ }
166
+
167
+ async def async_execute_model_call(
168
+ self,
169
+ user_prompt: str,
170
+ system_prompt: str = "",
171
+ files_list: Optional[List["Files"]] = None,
172
+ invigilator: Optional[
173
+ "InvigilatorAI"
174
+ ] = None, # TBD - can eventually be used for function-calling
175
+ ) -> dict[str, Any]:
176
+ """Calls the OpenAI API and returns the API response."""
177
+ if files_list:
178
+ encoded_image = files_list[0].base64_string
179
+ content = [{"type": "text", "text": user_prompt}]
180
+ content.append(
181
+ {
182
+ "type": "image_url",
183
+ "image_url": {
184
+ "url": f"data:image/jpeg;base64,{encoded_image}"
185
+ },
186
+ }
187
+ )
188
+ else:
189
+ content = user_prompt
190
+ client = self.async_client()
191
+
192
+ messages = [
193
+ {"role": "system", "content": system_prompt},
194
+ {"role": "user", "content": content},
195
+ ]
196
+ if (
197
+ system_prompt == "" and self.omit_system_prompt_if_empty
198
+ ) or "o1" in self.model:
199
+ messages = messages[1:]
200
+
201
+ params = {
202
+ "model": self.model,
203
+ "messages": messages,
204
+ "temperature": self.temperature,
205
+ "max_tokens": self.max_tokens,
206
+ "top_p": self.top_p,
207
+ "frequency_penalty": self.frequency_penalty,
208
+ "presence_penalty": self.presence_penalty,
209
+ "logprobs": self.logprobs,
210
+ "top_logprobs": self.top_logprobs if self.logprobs else None,
211
+ }
212
+ if "o1" in self.model:
213
+ params.pop("max_tokens")
214
+ params["max_completion_tokens"] = self.max_tokens
215
+ params["temperature"] = 1
216
+ try:
217
+ response = await client.chat.completions.create(**params)
218
+ except Exception as e:
219
+ print(e)
220
+ return response.model_dump()
221
+
222
+ LLM.__name__ = "LanguageModel"
223
+
224
+ return LLM
@@ -1,89 +1,89 @@
1
- from typing import Any, List, Optional
2
- import os
3
- import asyncio
4
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
- from edsl.language_models import LanguageModel
6
- from edsl.inference_services.rate_limits_cache import rate_limits
7
- from edsl.utilities.utilities import fix_partial_correct_response
8
-
9
- from edsl.enums import InferenceServiceType
10
- import random
11
-
12
-
13
- class TestService(InferenceServiceABC):
14
- """OpenAI service class."""
15
-
16
- _inference_service_ = "test"
17
- _env_key_name_ = None
18
- _base_url_ = None
19
-
20
- _sync_client_ = None
21
- _async_client_ = None
22
-
23
- _sync_client_instance = None
24
- _async_client_instance = None
25
-
26
- key_sequence = None
27
- usage_sequence = None
28
- model_exclude_list = []
29
- input_token_name = "prompt_tokens"
30
- output_token_name = "completion_tokens"
31
-
32
- @classmethod
33
- def available(cls) -> list[str]:
34
- return ["test"]
35
-
36
- @classmethod
37
- def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
- throw_exception = False
39
-
40
- class TestServiceLanguageModel(LanguageModel):
41
- _model_ = "test"
42
- _parameters_ = {"temperature": 0.5}
43
- _inference_service_ = InferenceServiceType.TEST.value
44
- usage_sequence = ["usage"]
45
- key_sequence = ["message", 0, "text"]
46
- input_token_name = cls.input_token_name
47
- output_token_name = cls.output_token_name
48
- _rpm = 1000
49
- _tpm = 100000
50
-
51
- @property
52
- def _canned_response(self):
53
- if hasattr(self, "canned_response"):
54
- return self.canned_response
55
- else:
56
- return "Hello, world"
57
-
58
- async def async_execute_model_call(
59
- self,
60
- user_prompt: str,
61
- system_prompt: str,
62
- # func: Optional[callable] = None,
63
- files_list: Optional[List["File"]] = None,
64
- ) -> dict[str, Any]:
65
- await asyncio.sleep(0.1)
66
- # return {"message": """{"answer": "Hello, world"}"""}
67
-
68
- if hasattr(self, "func"):
69
- return {
70
- "message": [
71
- {"text": self.func(user_prompt, system_prompt, files_list)}
72
- ],
73
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
74
- }
75
-
76
- if hasattr(self, "throw_exception") and self.throw_exception:
77
- if hasattr(self, "exception_probability"):
78
- p = self.exception_probability
79
- else:
80
- p = 1
81
-
82
- if random.random() < p:
83
- raise Exception("This is a test error")
84
- return {
85
- "message": [{"text": f"{self._canned_response}"}],
86
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
87
- }
88
-
89
- return TestServiceLanguageModel
1
+ from typing import Any, List, Optional
2
+ import os
3
+ import asyncio
4
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
+ from edsl.language_models import LanguageModel
6
+ from edsl.inference_services.rate_limits_cache import rate_limits
7
+ from edsl.utilities.utilities import fix_partial_correct_response
8
+
9
+ from edsl.enums import InferenceServiceType
10
+ import random
11
+
12
+
13
+ class TestService(InferenceServiceABC):
14
+ """OpenAI service class."""
15
+
16
+ _inference_service_ = "test"
17
+ _env_key_name_ = None
18
+ _base_url_ = None
19
+
20
+ _sync_client_ = None
21
+ _async_client_ = None
22
+
23
+ _sync_client_instance = None
24
+ _async_client_instance = None
25
+
26
+ key_sequence = None
27
+ usage_sequence = None
28
+ model_exclude_list = []
29
+ input_token_name = "prompt_tokens"
30
+ output_token_name = "completion_tokens"
31
+
32
+ @classmethod
33
+ def available(cls) -> list[str]:
34
+ return ["test"]
35
+
36
+ @classmethod
37
+ def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
+ throw_exception = False
39
+
40
+ class TestServiceLanguageModel(LanguageModel):
41
+ _model_ = "test"
42
+ _parameters_ = {"temperature": 0.5}
43
+ _inference_service_ = InferenceServiceType.TEST.value
44
+ usage_sequence = ["usage"]
45
+ key_sequence = ["message", 0, "text"]
46
+ input_token_name = cls.input_token_name
47
+ output_token_name = cls.output_token_name
48
+ _rpm = 1000
49
+ _tpm = 100000
50
+
51
+ @property
52
+ def _canned_response(self):
53
+ if hasattr(self, "canned_response"):
54
+ return self.canned_response
55
+ else:
56
+ return "Hello, world"
57
+
58
+ async def async_execute_model_call(
59
+ self,
60
+ user_prompt: str,
61
+ system_prompt: str,
62
+ # func: Optional[callable] = None,
63
+ files_list: Optional[List["File"]] = None,
64
+ ) -> dict[str, Any]:
65
+ await asyncio.sleep(0.1)
66
+ # return {"message": """{"answer": "Hello, world"}"""}
67
+
68
+ if hasattr(self, "func"):
69
+ return {
70
+ "message": [
71
+ {"text": self.func(user_prompt, system_prompt, files_list)}
72
+ ],
73
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
74
+ }
75
+
76
+ if hasattr(self, "throw_exception") and self.throw_exception:
77
+ if hasattr(self, "exception_probability"):
78
+ p = self.exception_probability
79
+ else:
80
+ p = 1
81
+
82
+ if random.random() < p:
83
+ raise Exception("This is a test error")
84
+ return {
85
+ "message": [{"text": f"{self._canned_response}"}],
86
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
87
+ }
88
+
89
+ return TestServiceLanguageModel