edsl 0.1.37.dev2__py3-none-any.whl → 0.1.37.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (257) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +48 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +804 -804
  7. edsl/agents/AgentList.py +345 -345
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +305 -305
  10. edsl/agents/PromptConstructor.py +312 -312
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +86 -86
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +152 -152
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +238 -238
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/mug_negotiation.py +81 -81
  45. edsl/conversation/next_speaker_utilities.py +93 -93
  46. edsl/coop/PriceFetcher.py +54 -54
  47. edsl/coop/__init__.py +2 -2
  48. edsl/coop/coop.py +824 -824
  49. edsl/coop/utils.py +131 -131
  50. edsl/data/Cache.py +527 -527
  51. edsl/data/CacheEntry.py +228 -228
  52. edsl/data/CacheHandler.py +149 -149
  53. edsl/data/RemoteCacheSync.py +97 -97
  54. edsl/data/SQLiteDict.py +292 -292
  55. edsl/data/__init__.py +4 -4
  56. edsl/data/orm.py +10 -10
  57. edsl/data_transfer_models.py +73 -73
  58. edsl/enums.py +173 -173
  59. edsl/exceptions/__init__.py +50 -50
  60. edsl/exceptions/agents.py +40 -40
  61. edsl/exceptions/configuration.py +16 -16
  62. edsl/exceptions/coop.py +10 -10
  63. edsl/exceptions/data.py +14 -14
  64. edsl/exceptions/general.py +34 -34
  65. edsl/exceptions/jobs.py +33 -33
  66. edsl/exceptions/language_models.py +63 -63
  67. edsl/exceptions/prompts.py +15 -15
  68. edsl/exceptions/questions.py +91 -91
  69. edsl/exceptions/results.py +26 -26
  70. edsl/exceptions/surveys.py +34 -34
  71. edsl/inference_services/AnthropicService.py +87 -87
  72. edsl/inference_services/AwsBedrock.py +115 -115
  73. edsl/inference_services/AzureAI.py +217 -217
  74. edsl/inference_services/DeepInfraService.py +18 -18
  75. edsl/inference_services/GoogleService.py +156 -156
  76. edsl/inference_services/GroqService.py +20 -20
  77. edsl/inference_services/InferenceServiceABC.py +147 -147
  78. edsl/inference_services/InferenceServicesCollection.py +74 -74
  79. edsl/inference_services/MistralAIService.py +123 -123
  80. edsl/inference_services/OllamaService.py +18 -18
  81. edsl/inference_services/OpenAIService.py +224 -224
  82. edsl/inference_services/TestService.py +89 -89
  83. edsl/inference_services/TogetherAIService.py +170 -170
  84. edsl/inference_services/models_available_cache.py +118 -118
  85. edsl/inference_services/rate_limits_cache.py +25 -25
  86. edsl/inference_services/registry.py +39 -39
  87. edsl/inference_services/write_available.py +10 -10
  88. edsl/jobs/Answers.py +56 -56
  89. edsl/jobs/Jobs.py +1121 -1112
  90. edsl/jobs/__init__.py +1 -1
  91. edsl/jobs/buckets/BucketCollection.py +63 -63
  92. edsl/jobs/buckets/ModelBuckets.py +65 -65
  93. edsl/jobs/buckets/TokenBucket.py +248 -248
  94. edsl/jobs/interviews/Interview.py +661 -661
  95. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  96. edsl/jobs/interviews/InterviewExceptionEntry.py +182 -182
  97. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  98. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  99. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  100. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  101. edsl/jobs/interviews/ReportErrors.py +66 -66
  102. edsl/jobs/interviews/interview_status_enum.py +9 -9
  103. edsl/jobs/runners/JobsRunnerAsyncio.py +338 -338
  104. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  105. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  106. edsl/jobs/tasks/TaskCreators.py +64 -64
  107. edsl/jobs/tasks/TaskHistory.py +441 -441
  108. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  109. edsl/jobs/tasks/task_status_enum.py +163 -163
  110. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  111. edsl/jobs/tokens/TokenUsage.py +34 -34
  112. edsl/language_models/LanguageModel.py +718 -718
  113. edsl/language_models/ModelList.py +102 -102
  114. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  115. edsl/language_models/__init__.py +2 -2
  116. edsl/language_models/fake_openai_call.py +15 -15
  117. edsl/language_models/fake_openai_service.py +61 -61
  118. edsl/language_models/registry.py +137 -137
  119. edsl/language_models/repair.py +156 -156
  120. edsl/language_models/unused/ReplicateBase.py +83 -83
  121. edsl/language_models/utilities.py +64 -64
  122. edsl/notebooks/Notebook.py +259 -259
  123. edsl/notebooks/__init__.py +1 -1
  124. edsl/prompts/Prompt.py +353 -353
  125. edsl/prompts/__init__.py +2 -2
  126. edsl/questions/AnswerValidatorMixin.py +289 -289
  127. edsl/questions/QuestionBase.py +616 -616
  128. edsl/questions/QuestionBaseGenMixin.py +161 -161
  129. edsl/questions/QuestionBasePromptsMixin.py +266 -266
  130. edsl/questions/QuestionBudget.py +227 -227
  131. edsl/questions/QuestionCheckBox.py +359 -359
  132. edsl/questions/QuestionExtract.py +183 -183
  133. edsl/questions/QuestionFreeText.py +114 -114
  134. edsl/questions/QuestionFunctional.py +159 -159
  135. edsl/questions/QuestionList.py +231 -231
  136. edsl/questions/QuestionMultipleChoice.py +286 -286
  137. edsl/questions/QuestionNumerical.py +153 -153
  138. edsl/questions/QuestionRank.py +324 -324
  139. edsl/questions/Quick.py +41 -41
  140. edsl/questions/RegisterQuestionsMeta.py +71 -71
  141. edsl/questions/ResponseValidatorABC.py +174 -174
  142. edsl/questions/SimpleAskMixin.py +73 -73
  143. edsl/questions/__init__.py +26 -26
  144. edsl/questions/compose_questions.py +98 -98
  145. edsl/questions/decorators.py +21 -21
  146. edsl/questions/derived/QuestionLikertFive.py +76 -76
  147. edsl/questions/derived/QuestionLinearScale.py +87 -87
  148. edsl/questions/derived/QuestionTopK.py +91 -91
  149. edsl/questions/derived/QuestionYesNo.py +82 -82
  150. edsl/questions/descriptors.py +418 -418
  151. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  152. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  153. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  154. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  155. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  156. edsl/questions/prompt_templates/question_list.jinja +17 -17
  157. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  158. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  159. edsl/questions/question_registry.py +147 -147
  160. edsl/questions/settings.py +12 -12
  161. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  162. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  163. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  164. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  165. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  167. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  168. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  169. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  170. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  171. edsl/questions/templates/list/question_presentation.jinja +5 -5
  172. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  173. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  174. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  176. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  177. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  178. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  179. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  180. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  181. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  182. edsl/results/Dataset.py +293 -293
  183. edsl/results/DatasetExportMixin.py +693 -693
  184. edsl/results/DatasetTree.py +145 -145
  185. edsl/results/Result.py +435 -435
  186. edsl/results/Results.py +1160 -1160
  187. edsl/results/ResultsDBMixin.py +238 -238
  188. edsl/results/ResultsExportMixin.py +43 -43
  189. edsl/results/ResultsFetchMixin.py +33 -33
  190. edsl/results/ResultsGGMixin.py +121 -121
  191. edsl/results/ResultsToolsMixin.py +98 -98
  192. edsl/results/Selector.py +118 -118
  193. edsl/results/__init__.py +2 -2
  194. edsl/results/tree_explore.py +115 -115
  195. edsl/scenarios/FileStore.py +458 -458
  196. edsl/scenarios/Scenario.py +510 -510
  197. edsl/scenarios/ScenarioHtmlMixin.py +59 -59
  198. edsl/scenarios/ScenarioList.py +1101 -1101
  199. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  200. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  201. edsl/scenarios/__init__.py +4 -4
  202. edsl/shared.py +1 -1
  203. edsl/study/ObjectEntry.py +173 -173
  204. edsl/study/ProofOfWork.py +113 -113
  205. edsl/study/SnapShot.py +80 -80
  206. edsl/study/Study.py +528 -528
  207. edsl/study/__init__.py +4 -4
  208. edsl/surveys/DAG.py +148 -148
  209. edsl/surveys/Memory.py +31 -31
  210. edsl/surveys/MemoryPlan.py +244 -244
  211. edsl/surveys/Rule.py +324 -324
  212. edsl/surveys/RuleCollection.py +387 -387
  213. edsl/surveys/Survey.py +1772 -1772
  214. edsl/surveys/SurveyCSS.py +261 -261
  215. edsl/surveys/SurveyExportMixin.py +259 -259
  216. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  217. edsl/surveys/SurveyQualtricsImport.py +284 -284
  218. edsl/surveys/__init__.py +3 -3
  219. edsl/surveys/base.py +53 -53
  220. edsl/surveys/descriptors.py +56 -56
  221. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  222. edsl/surveys/instructions/Instruction.py +51 -51
  223. edsl/surveys/instructions/InstructionCollection.py +77 -77
  224. edsl/templates/error_reporting/base.html +23 -23
  225. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  226. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  227. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  228. edsl/templates/error_reporting/interview_details.html +115 -115
  229. edsl/templates/error_reporting/interviews.html +9 -9
  230. edsl/templates/error_reporting/overview.html +4 -4
  231. edsl/templates/error_reporting/performance_plot.html +1 -1
  232. edsl/templates/error_reporting/report.css +73 -73
  233. edsl/templates/error_reporting/report.html +117 -117
  234. edsl/templates/error_reporting/report.js +25 -25
  235. edsl/tools/__init__.py +1 -1
  236. edsl/tools/clusters.py +192 -192
  237. edsl/tools/embeddings.py +27 -27
  238. edsl/tools/embeddings_plotting.py +118 -118
  239. edsl/tools/plotting.py +112 -112
  240. edsl/tools/summarize.py +18 -18
  241. edsl/utilities/SystemInfo.py +28 -28
  242. edsl/utilities/__init__.py +22 -22
  243. edsl/utilities/ast_utilities.py +25 -25
  244. edsl/utilities/data/Registry.py +6 -6
  245. edsl/utilities/data/__init__.py +1 -1
  246. edsl/utilities/data/scooter_results.json +1 -1
  247. edsl/utilities/decorators.py +77 -77
  248. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  249. edsl/utilities/interface.py +627 -627
  250. edsl/utilities/repair_functions.py +28 -28
  251. edsl/utilities/restricted_python.py +70 -70
  252. edsl/utilities/utilities.py +391 -391
  253. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev3.dist-info}/LICENSE +21 -21
  254. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev3.dist-info}/METADATA +1 -1
  255. edsl-0.1.37.dev3.dist-info/RECORD +279 -0
  256. edsl-0.1.37.dev2.dist-info/RECORD +0 -279
  257. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev3.dist-info}/WHEEL +0 -0
@@ -1,224 +1,224 @@
1
- from __future__ import annotations
2
- from typing import Any, List, Optional
3
- import os
4
-
5
- import openai
6
-
7
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
8
- from edsl.language_models import LanguageModel
9
- from edsl.inference_services.rate_limits_cache import rate_limits
10
- from edsl.utilities.utilities import fix_partial_correct_response
11
-
12
- from edsl.config import CONFIG
13
-
14
-
15
- class OpenAIService(InferenceServiceABC):
16
- """OpenAI service class."""
17
-
18
- _inference_service_ = "openai"
19
- _env_key_name_ = "OPENAI_API_KEY"
20
- _base_url_ = None
21
-
22
- _sync_client_ = openai.OpenAI
23
- _async_client_ = openai.AsyncOpenAI
24
-
25
- _sync_client_instance = None
26
- _async_client_instance = None
27
-
28
- key_sequence = ["choices", 0, "message", "content"]
29
- usage_sequence = ["usage"]
30
- input_token_name = "prompt_tokens"
31
- output_token_name = "completion_tokens"
32
-
33
- def __init_subclass__(cls, **kwargs):
34
- super().__init_subclass__(**kwargs)
35
- # so subclasses have to create their own instances of the clients
36
- cls._sync_client_instance = None
37
- cls._async_client_instance = None
38
-
39
- @classmethod
40
- def sync_client(cls):
41
- if cls._sync_client_instance is None:
42
- cls._sync_client_instance = cls._sync_client_(
43
- api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
44
- )
45
- return cls._sync_client_instance
46
-
47
- @classmethod
48
- def async_client(cls):
49
- if cls._async_client_instance is None:
50
- cls._async_client_instance = cls._async_client_(
51
- api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
52
- )
53
- return cls._async_client_instance
54
-
55
- model_exclude_list = [
56
- "whisper-1",
57
- "davinci-002",
58
- "dall-e-2",
59
- "tts-1-hd-1106",
60
- "tts-1-hd",
61
- "dall-e-3",
62
- "tts-1",
63
- "babbage-002",
64
- "tts-1-1106",
65
- "text-embedding-3-large",
66
- "text-embedding-3-small",
67
- "text-embedding-ada-002",
68
- "ft:davinci-002:mit-horton-lab::8OfuHgoo",
69
- "gpt-3.5-turbo-instruct-0914",
70
- "gpt-3.5-turbo-instruct",
71
- ]
72
- _models_list_cache: List[str] = []
73
-
74
- @classmethod
75
- def get_model_list(cls):
76
- raw_list = cls.sync_client().models.list()
77
- if hasattr(raw_list, "data"):
78
- return raw_list.data
79
- else:
80
- return raw_list
81
-
82
- @classmethod
83
- def available(cls) -> List[str]:
84
- if not cls._models_list_cache:
85
- try:
86
- cls._models_list_cache = [
87
- m.id
88
- for m in cls.get_model_list()
89
- if m.id not in cls.model_exclude_list
90
- ]
91
- except Exception as e:
92
- raise
93
- return cls._models_list_cache
94
-
95
- @classmethod
96
- def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
97
- if model_class_name is None:
98
- model_class_name = cls.to_class_name(model_name)
99
-
100
- class LLM(LanguageModel):
101
- """
102
- Child class of LanguageModel for interacting with OpenAI models
103
- """
104
-
105
- key_sequence = cls.key_sequence
106
- usage_sequence = cls.usage_sequence
107
- input_token_name = cls.input_token_name
108
- output_token_name = cls.output_token_name
109
-
110
- _rpm = cls.get_rpm(cls)
111
- _tpm = cls.get_tpm(cls)
112
-
113
- _inference_service_ = cls._inference_service_
114
- _model_ = model_name
115
- _parameters_ = {
116
- "temperature": 0.5,
117
- "max_tokens": 1000,
118
- "top_p": 1,
119
- "frequency_penalty": 0,
120
- "presence_penalty": 0,
121
- "logprobs": False,
122
- "top_logprobs": 3,
123
- }
124
-
125
- def sync_client(self):
126
- return cls.sync_client()
127
-
128
- def async_client(self):
129
- return cls.async_client()
130
-
131
- @classmethod
132
- def available(cls) -> list[str]:
133
- return cls.sync_client().models.list()
134
-
135
- def get_headers(self) -> dict[str, Any]:
136
- client = self.sync_client()
137
- response = client.chat.completions.with_raw_response.create(
138
- messages=[
139
- {
140
- "role": "user",
141
- "content": "Say this is a test",
142
- }
143
- ],
144
- model=self.model,
145
- )
146
- return dict(response.headers)
147
-
148
- def get_rate_limits(self) -> dict[str, Any]:
149
- try:
150
- if "openai" in rate_limits:
151
- headers = rate_limits["openai"]
152
-
153
- else:
154
- headers = self.get_headers()
155
-
156
- except Exception as e:
157
- return {
158
- "rpm": 10_000,
159
- "tpm": 2_000_000,
160
- }
161
- else:
162
- return {
163
- "rpm": int(headers["x-ratelimit-limit-requests"]),
164
- "tpm": int(headers["x-ratelimit-limit-tokens"]),
165
- }
166
-
167
- async def async_execute_model_call(
168
- self,
169
- user_prompt: str,
170
- system_prompt: str = "",
171
- files_list: Optional[List["Files"]] = None,
172
- invigilator: Optional[
173
- "InvigilatorAI"
174
- ] = None, # TBD - can eventually be used for function-calling
175
- ) -> dict[str, Any]:
176
- """Calls the OpenAI API and returns the API response."""
177
- if files_list:
178
- encoded_image = files_list[0].base64_string
179
- content = [{"type": "text", "text": user_prompt}]
180
- content.append(
181
- {
182
- "type": "image_url",
183
- "image_url": {
184
- "url": f"data:image/jpeg;base64,{encoded_image}"
185
- },
186
- }
187
- )
188
- else:
189
- content = user_prompt
190
- client = self.async_client()
191
-
192
- messages = [
193
- {"role": "system", "content": system_prompt},
194
- {"role": "user", "content": content},
195
- ]
196
- if (
197
- system_prompt == "" and self.omit_system_prompt_if_empty
198
- ) or "o1" in self.model:
199
- messages = messages[1:]
200
-
201
- params = {
202
- "model": self.model,
203
- "messages": messages,
204
- "temperature": self.temperature,
205
- "max_tokens": self.max_tokens,
206
- "top_p": self.top_p,
207
- "frequency_penalty": self.frequency_penalty,
208
- "presence_penalty": self.presence_penalty,
209
- "logprobs": self.logprobs,
210
- "top_logprobs": self.top_logprobs if self.logprobs else None,
211
- }
212
- if "o1" in self.model:
213
- params.pop("max_tokens")
214
- params["max_completion_tokens"] = self.max_tokens
215
- params["temperature"] = 1
216
- try:
217
- response = await client.chat.completions.create(**params)
218
- except Exception as e:
219
- print(e)
220
- return response.model_dump()
221
-
222
- LLM.__name__ = "LanguageModel"
223
-
224
- return LLM
1
+ from __future__ import annotations
2
+ from typing import Any, List, Optional
3
+ import os
4
+
5
+ import openai
6
+
7
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
8
+ from edsl.language_models import LanguageModel
9
+ from edsl.inference_services.rate_limits_cache import rate_limits
10
+ from edsl.utilities.utilities import fix_partial_correct_response
11
+
12
+ from edsl.config import CONFIG
13
+
14
+
15
+ class OpenAIService(InferenceServiceABC):
16
+ """OpenAI service class."""
17
+
18
+ _inference_service_ = "openai"
19
+ _env_key_name_ = "OPENAI_API_KEY"
20
+ _base_url_ = None
21
+
22
+ _sync_client_ = openai.OpenAI
23
+ _async_client_ = openai.AsyncOpenAI
24
+
25
+ _sync_client_instance = None
26
+ _async_client_instance = None
27
+
28
+ key_sequence = ["choices", 0, "message", "content"]
29
+ usage_sequence = ["usage"]
30
+ input_token_name = "prompt_tokens"
31
+ output_token_name = "completion_tokens"
32
+
33
+ def __init_subclass__(cls, **kwargs):
34
+ super().__init_subclass__(**kwargs)
35
+ # so subclasses have to create their own instances of the clients
36
+ cls._sync_client_instance = None
37
+ cls._async_client_instance = None
38
+
39
+ @classmethod
40
+ def sync_client(cls):
41
+ if cls._sync_client_instance is None:
42
+ cls._sync_client_instance = cls._sync_client_(
43
+ api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
44
+ )
45
+ return cls._sync_client_instance
46
+
47
+ @classmethod
48
+ def async_client(cls):
49
+ if cls._async_client_instance is None:
50
+ cls._async_client_instance = cls._async_client_(
51
+ api_key=os.getenv(cls._env_key_name_), base_url=cls._base_url_
52
+ )
53
+ return cls._async_client_instance
54
+
55
+ model_exclude_list = [
56
+ "whisper-1",
57
+ "davinci-002",
58
+ "dall-e-2",
59
+ "tts-1-hd-1106",
60
+ "tts-1-hd",
61
+ "dall-e-3",
62
+ "tts-1",
63
+ "babbage-002",
64
+ "tts-1-1106",
65
+ "text-embedding-3-large",
66
+ "text-embedding-3-small",
67
+ "text-embedding-ada-002",
68
+ "ft:davinci-002:mit-horton-lab::8OfuHgoo",
69
+ "gpt-3.5-turbo-instruct-0914",
70
+ "gpt-3.5-turbo-instruct",
71
+ ]
72
+ _models_list_cache: List[str] = []
73
+
74
+ @classmethod
75
+ def get_model_list(cls):
76
+ raw_list = cls.sync_client().models.list()
77
+ if hasattr(raw_list, "data"):
78
+ return raw_list.data
79
+ else:
80
+ return raw_list
81
+
82
+ @classmethod
83
+ def available(cls) -> List[str]:
84
+ if not cls._models_list_cache:
85
+ try:
86
+ cls._models_list_cache = [
87
+ m.id
88
+ for m in cls.get_model_list()
89
+ if m.id not in cls.model_exclude_list
90
+ ]
91
+ except Exception as e:
92
+ raise
93
+ return cls._models_list_cache
94
+
95
+ @classmethod
96
+ def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
97
+ if model_class_name is None:
98
+ model_class_name = cls.to_class_name(model_name)
99
+
100
+ class LLM(LanguageModel):
101
+ """
102
+ Child class of LanguageModel for interacting with OpenAI models
103
+ """
104
+
105
+ key_sequence = cls.key_sequence
106
+ usage_sequence = cls.usage_sequence
107
+ input_token_name = cls.input_token_name
108
+ output_token_name = cls.output_token_name
109
+
110
+ _rpm = cls.get_rpm(cls)
111
+ _tpm = cls.get_tpm(cls)
112
+
113
+ _inference_service_ = cls._inference_service_
114
+ _model_ = model_name
115
+ _parameters_ = {
116
+ "temperature": 0.5,
117
+ "max_tokens": 1000,
118
+ "top_p": 1,
119
+ "frequency_penalty": 0,
120
+ "presence_penalty": 0,
121
+ "logprobs": False,
122
+ "top_logprobs": 3,
123
+ }
124
+
125
+ def sync_client(self):
126
+ return cls.sync_client()
127
+
128
+ def async_client(self):
129
+ return cls.async_client()
130
+
131
+ @classmethod
132
+ def available(cls) -> list[str]:
133
+ return cls.sync_client().models.list()
134
+
135
+ def get_headers(self) -> dict[str, Any]:
136
+ client = self.sync_client()
137
+ response = client.chat.completions.with_raw_response.create(
138
+ messages=[
139
+ {
140
+ "role": "user",
141
+ "content": "Say this is a test",
142
+ }
143
+ ],
144
+ model=self.model,
145
+ )
146
+ return dict(response.headers)
147
+
148
+ def get_rate_limits(self) -> dict[str, Any]:
149
+ try:
150
+ if "openai" in rate_limits:
151
+ headers = rate_limits["openai"]
152
+
153
+ else:
154
+ headers = self.get_headers()
155
+
156
+ except Exception as e:
157
+ return {
158
+ "rpm": 10_000,
159
+ "tpm": 2_000_000,
160
+ }
161
+ else:
162
+ return {
163
+ "rpm": int(headers["x-ratelimit-limit-requests"]),
164
+ "tpm": int(headers["x-ratelimit-limit-tokens"]),
165
+ }
166
+
167
+ async def async_execute_model_call(
168
+ self,
169
+ user_prompt: str,
170
+ system_prompt: str = "",
171
+ files_list: Optional[List["Files"]] = None,
172
+ invigilator: Optional[
173
+ "InvigilatorAI"
174
+ ] = None, # TBD - can eventually be used for function-calling
175
+ ) -> dict[str, Any]:
176
+ """Calls the OpenAI API and returns the API response."""
177
+ if files_list:
178
+ encoded_image = files_list[0].base64_string
179
+ content = [{"type": "text", "text": user_prompt}]
180
+ content.append(
181
+ {
182
+ "type": "image_url",
183
+ "image_url": {
184
+ "url": f"data:image/jpeg;base64,{encoded_image}"
185
+ },
186
+ }
187
+ )
188
+ else:
189
+ content = user_prompt
190
+ client = self.async_client()
191
+
192
+ messages = [
193
+ {"role": "system", "content": system_prompt},
194
+ {"role": "user", "content": content},
195
+ ]
196
+ if (
197
+ system_prompt == "" and self.omit_system_prompt_if_empty
198
+ ) or "o1" in self.model:
199
+ messages = messages[1:]
200
+
201
+ params = {
202
+ "model": self.model,
203
+ "messages": messages,
204
+ "temperature": self.temperature,
205
+ "max_tokens": self.max_tokens,
206
+ "top_p": self.top_p,
207
+ "frequency_penalty": self.frequency_penalty,
208
+ "presence_penalty": self.presence_penalty,
209
+ "logprobs": self.logprobs,
210
+ "top_logprobs": self.top_logprobs if self.logprobs else None,
211
+ }
212
+ if "o1" in self.model:
213
+ params.pop("max_tokens")
214
+ params["max_completion_tokens"] = self.max_tokens
215
+ params["temperature"] = 1
216
+ try:
217
+ response = await client.chat.completions.create(**params)
218
+ except Exception as e:
219
+ print(e)
220
+ return response.model_dump()
221
+
222
+ LLM.__name__ = "LanguageModel"
223
+
224
+ return LLM
@@ -1,89 +1,89 @@
1
- from typing import Any, List, Optional
2
- import os
3
- import asyncio
4
- from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
- from edsl.language_models import LanguageModel
6
- from edsl.inference_services.rate_limits_cache import rate_limits
7
- from edsl.utilities.utilities import fix_partial_correct_response
8
-
9
- from edsl.enums import InferenceServiceType
10
- import random
11
-
12
-
13
- class TestService(InferenceServiceABC):
14
- """OpenAI service class."""
15
-
16
- _inference_service_ = "test"
17
- _env_key_name_ = None
18
- _base_url_ = None
19
-
20
- _sync_client_ = None
21
- _async_client_ = None
22
-
23
- _sync_client_instance = None
24
- _async_client_instance = None
25
-
26
- key_sequence = None
27
- usage_sequence = None
28
- model_exclude_list = []
29
- input_token_name = "prompt_tokens"
30
- output_token_name = "completion_tokens"
31
-
32
- @classmethod
33
- def available(cls) -> list[str]:
34
- return ["test"]
35
-
36
- @classmethod
37
- def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
- throw_exception = False
39
-
40
- class TestServiceLanguageModel(LanguageModel):
41
- _model_ = "test"
42
- _parameters_ = {"temperature": 0.5}
43
- _inference_service_ = InferenceServiceType.TEST.value
44
- usage_sequence = ["usage"]
45
- key_sequence = ["message", 0, "text"]
46
- input_token_name = cls.input_token_name
47
- output_token_name = cls.output_token_name
48
- _rpm = 1000
49
- _tpm = 100000
50
-
51
- @property
52
- def _canned_response(self):
53
- if hasattr(self, "canned_response"):
54
- return self.canned_response
55
- else:
56
- return "Hello, world"
57
-
58
- async def async_execute_model_call(
59
- self,
60
- user_prompt: str,
61
- system_prompt: str,
62
- # func: Optional[callable] = None,
63
- files_list: Optional[List["File"]] = None,
64
- ) -> dict[str, Any]:
65
- await asyncio.sleep(0.1)
66
- # return {"message": """{"answer": "Hello, world"}"""}
67
-
68
- if hasattr(self, "func"):
69
- return {
70
- "message": [
71
- {"text": self.func(user_prompt, system_prompt, files_list)}
72
- ],
73
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
74
- }
75
-
76
- if hasattr(self, "throw_exception") and self.throw_exception:
77
- if hasattr(self, "exception_probability"):
78
- p = self.exception_probability
79
- else:
80
- p = 1
81
-
82
- if random.random() < p:
83
- raise Exception("This is a test error")
84
- return {
85
- "message": [{"text": f"{self._canned_response}"}],
86
- "usage": {"prompt_tokens": 1, "completion_tokens": 1},
87
- }
88
-
89
- return TestServiceLanguageModel
1
+ from typing import Any, List, Optional
2
+ import os
3
+ import asyncio
4
+ from edsl.inference_services.InferenceServiceABC import InferenceServiceABC
5
+ from edsl.language_models import LanguageModel
6
+ from edsl.inference_services.rate_limits_cache import rate_limits
7
+ from edsl.utilities.utilities import fix_partial_correct_response
8
+
9
+ from edsl.enums import InferenceServiceType
10
+ import random
11
+
12
+
13
+ class TestService(InferenceServiceABC):
14
+ """OpenAI service class."""
15
+
16
+ _inference_service_ = "test"
17
+ _env_key_name_ = None
18
+ _base_url_ = None
19
+
20
+ _sync_client_ = None
21
+ _async_client_ = None
22
+
23
+ _sync_client_instance = None
24
+ _async_client_instance = None
25
+
26
+ key_sequence = None
27
+ usage_sequence = None
28
+ model_exclude_list = []
29
+ input_token_name = "prompt_tokens"
30
+ output_token_name = "completion_tokens"
31
+
32
+ @classmethod
33
+ def available(cls) -> list[str]:
34
+ return ["test"]
35
+
36
+ @classmethod
37
+ def create_model(cls, model_name, model_class_name=None) -> LanguageModel:
38
+ throw_exception = False
39
+
40
+ class TestServiceLanguageModel(LanguageModel):
41
+ _model_ = "test"
42
+ _parameters_ = {"temperature": 0.5}
43
+ _inference_service_ = InferenceServiceType.TEST.value
44
+ usage_sequence = ["usage"]
45
+ key_sequence = ["message", 0, "text"]
46
+ input_token_name = cls.input_token_name
47
+ output_token_name = cls.output_token_name
48
+ _rpm = 1000
49
+ _tpm = 100000
50
+
51
+ @property
52
+ def _canned_response(self):
53
+ if hasattr(self, "canned_response"):
54
+ return self.canned_response
55
+ else:
56
+ return "Hello, world"
57
+
58
+ async def async_execute_model_call(
59
+ self,
60
+ user_prompt: str,
61
+ system_prompt: str,
62
+ # func: Optional[callable] = None,
63
+ files_list: Optional[List["File"]] = None,
64
+ ) -> dict[str, Any]:
65
+ await asyncio.sleep(0.1)
66
+ # return {"message": """{"answer": "Hello, world"}"""}
67
+
68
+ if hasattr(self, "func"):
69
+ return {
70
+ "message": [
71
+ {"text": self.func(user_prompt, system_prompt, files_list)}
72
+ ],
73
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
74
+ }
75
+
76
+ if hasattr(self, "throw_exception") and self.throw_exception:
77
+ if hasattr(self, "exception_probability"):
78
+ p = self.exception_probability
79
+ else:
80
+ p = 1
81
+
82
+ if random.random() < p:
83
+ raise Exception("This is a test error")
84
+ return {
85
+ "message": [{"text": f"{self._canned_response}"}],
86
+ "usage": {"prompt_tokens": 1, "completion_tokens": 1},
87
+ }
88
+
89
+ return TestServiceLanguageModel