edsl 0.1.39.dev1__py3-none-any.whl → 0.1.39.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (256) hide show
  1. edsl/Base.py +332 -332
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +49 -49
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +867 -867
  7. edsl/agents/AgentList.py +413 -413
  8. edsl/agents/Invigilator.py +233 -233
  9. edsl/agents/InvigilatorBase.py +270 -265
  10. edsl/agents/PromptConstructor.py +354 -354
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +279 -279
  26. edsl/config.py +157 -157
  27. edsl/conversation/Conversation.py +290 -290
  28. edsl/conversation/car_buying.py +58 -58
  29. edsl/conversation/chips.py +95 -95
  30. edsl/conversation/mug_negotiation.py +81 -81
  31. edsl/conversation/next_speaker_utilities.py +93 -93
  32. edsl/coop/PriceFetcher.py +54 -54
  33. edsl/coop/__init__.py +2 -2
  34. edsl/coop/coop.py +1028 -1028
  35. edsl/coop/utils.py +131 -131
  36. edsl/data/Cache.py +555 -555
  37. edsl/data/CacheEntry.py +233 -233
  38. edsl/data/CacheHandler.py +149 -149
  39. edsl/data/RemoteCacheSync.py +78 -78
  40. edsl/data/SQLiteDict.py +292 -292
  41. edsl/data/__init__.py +4 -4
  42. edsl/data/orm.py +10 -10
  43. edsl/data_transfer_models.py +73 -73
  44. edsl/enums.py +175 -175
  45. edsl/exceptions/BaseException.py +21 -21
  46. edsl/exceptions/__init__.py +54 -54
  47. edsl/exceptions/agents.py +42 -42
  48. edsl/exceptions/cache.py +5 -5
  49. edsl/exceptions/configuration.py +16 -16
  50. edsl/exceptions/coop.py +10 -10
  51. edsl/exceptions/data.py +14 -14
  52. edsl/exceptions/general.py +34 -34
  53. edsl/exceptions/jobs.py +33 -33
  54. edsl/exceptions/language_models.py +63 -63
  55. edsl/exceptions/prompts.py +15 -15
  56. edsl/exceptions/questions.py +91 -91
  57. edsl/exceptions/results.py +29 -29
  58. edsl/exceptions/scenarios.py +22 -22
  59. edsl/exceptions/surveys.py +37 -37
  60. edsl/inference_services/AnthropicService.py +87 -87
  61. edsl/inference_services/AwsBedrock.py +120 -120
  62. edsl/inference_services/AzureAI.py +217 -217
  63. edsl/inference_services/DeepInfraService.py +18 -18
  64. edsl/inference_services/GoogleService.py +148 -148
  65. edsl/inference_services/GroqService.py +20 -20
  66. edsl/inference_services/InferenceServiceABC.py +147 -147
  67. edsl/inference_services/InferenceServicesCollection.py +97 -97
  68. edsl/inference_services/MistralAIService.py +123 -123
  69. edsl/inference_services/OllamaService.py +18 -18
  70. edsl/inference_services/OpenAIService.py +224 -224
  71. edsl/inference_services/PerplexityService.py +163 -163
  72. edsl/inference_services/TestService.py +89 -89
  73. edsl/inference_services/TogetherAIService.py +170 -170
  74. edsl/inference_services/models_available_cache.py +118 -118
  75. edsl/inference_services/rate_limits_cache.py +25 -25
  76. edsl/inference_services/registry.py +41 -41
  77. edsl/inference_services/write_available.py +10 -10
  78. edsl/jobs/Answers.py +56 -56
  79. edsl/jobs/Jobs.py +898 -898
  80. edsl/jobs/JobsChecks.py +147 -147
  81. edsl/jobs/JobsPrompts.py +268 -268
  82. edsl/jobs/JobsRemoteInferenceHandler.py +239 -239
  83. edsl/jobs/__init__.py +1 -1
  84. edsl/jobs/buckets/BucketCollection.py +63 -63
  85. edsl/jobs/buckets/ModelBuckets.py +65 -65
  86. edsl/jobs/buckets/TokenBucket.py +251 -251
  87. edsl/jobs/interviews/Interview.py +661 -661
  88. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  89. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  90. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  91. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  92. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  93. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  94. edsl/jobs/interviews/ReportErrors.py +66 -66
  95. edsl/jobs/interviews/interview_status_enum.py +9 -9
  96. edsl/jobs/runners/JobsRunnerAsyncio.py +466 -466
  97. edsl/jobs/runners/JobsRunnerStatus.py +330 -330
  98. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  99. edsl/jobs/tasks/TaskCreators.py +64 -64
  100. edsl/jobs/tasks/TaskHistory.py +450 -450
  101. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  102. edsl/jobs/tasks/task_status_enum.py +163 -163
  103. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  104. edsl/jobs/tokens/TokenUsage.py +34 -34
  105. edsl/language_models/KeyLookup.py +30 -30
  106. edsl/language_models/LanguageModel.py +668 -668
  107. edsl/language_models/ModelList.py +155 -155
  108. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  109. edsl/language_models/__init__.py +3 -3
  110. edsl/language_models/fake_openai_call.py +15 -15
  111. edsl/language_models/fake_openai_service.py +61 -61
  112. edsl/language_models/registry.py +190 -190
  113. edsl/language_models/repair.py +156 -156
  114. edsl/language_models/unused/ReplicateBase.py +83 -83
  115. edsl/language_models/utilities.py +64 -64
  116. edsl/notebooks/Notebook.py +258 -258
  117. edsl/notebooks/__init__.py +1 -1
  118. edsl/prompts/Prompt.py +362 -362
  119. edsl/prompts/__init__.py +2 -2
  120. edsl/questions/AnswerValidatorMixin.py +289 -289
  121. edsl/questions/QuestionBase.py +664 -664
  122. edsl/questions/QuestionBaseGenMixin.py +161 -161
  123. edsl/questions/QuestionBasePromptsMixin.py +217 -217
  124. edsl/questions/QuestionBudget.py +227 -227
  125. edsl/questions/QuestionCheckBox.py +359 -359
  126. edsl/questions/QuestionExtract.py +182 -182
  127. edsl/questions/QuestionFreeText.py +114 -114
  128. edsl/questions/QuestionFunctional.py +166 -166
  129. edsl/questions/QuestionList.py +231 -231
  130. edsl/questions/QuestionMultipleChoice.py +286 -286
  131. edsl/questions/QuestionNumerical.py +153 -153
  132. edsl/questions/QuestionRank.py +324 -324
  133. edsl/questions/Quick.py +41 -41
  134. edsl/questions/RegisterQuestionsMeta.py +71 -71
  135. edsl/questions/ResponseValidatorABC.py +174 -174
  136. edsl/questions/SimpleAskMixin.py +73 -73
  137. edsl/questions/__init__.py +26 -26
  138. edsl/questions/compose_questions.py +98 -98
  139. edsl/questions/decorators.py +21 -21
  140. edsl/questions/derived/QuestionLikertFive.py +76 -76
  141. edsl/questions/derived/QuestionLinearScale.py +87 -87
  142. edsl/questions/derived/QuestionTopK.py +93 -93
  143. edsl/questions/derived/QuestionYesNo.py +82 -82
  144. edsl/questions/descriptors.py +413 -413
  145. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  146. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  147. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  148. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  149. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  150. edsl/questions/prompt_templates/question_list.jinja +17 -17
  151. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  152. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  153. edsl/questions/question_registry.py +177 -177
  154. edsl/questions/settings.py +12 -12
  155. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  156. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  157. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  158. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  159. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  160. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  161. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  162. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  163. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  164. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  165. edsl/questions/templates/list/question_presentation.jinja +5 -5
  166. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  167. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  168. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  169. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  170. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  171. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  172. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  173. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  174. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  176. edsl/results/CSSParameterizer.py +108 -108
  177. edsl/results/Dataset.py +424 -424
  178. edsl/results/DatasetExportMixin.py +731 -731
  179. edsl/results/DatasetTree.py +275 -275
  180. edsl/results/Result.py +465 -465
  181. edsl/results/Results.py +1165 -1165
  182. edsl/results/ResultsDBMixin.py +238 -238
  183. edsl/results/ResultsExportMixin.py +43 -43
  184. edsl/results/ResultsFetchMixin.py +33 -33
  185. edsl/results/ResultsGGMixin.py +121 -121
  186. edsl/results/ResultsToolsMixin.py +98 -98
  187. edsl/results/Selector.py +135 -135
  188. edsl/results/TableDisplay.py +198 -198
  189. edsl/results/__init__.py +2 -2
  190. edsl/results/table_display.css +77 -77
  191. edsl/results/tree_explore.py +115 -115
  192. edsl/scenarios/FileStore.py +632 -632
  193. edsl/scenarios/Scenario.py +601 -601
  194. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  195. edsl/scenarios/ScenarioJoin.py +127 -127
  196. edsl/scenarios/ScenarioList.py +1287 -1287
  197. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  198. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  199. edsl/scenarios/__init__.py +4 -4
  200. edsl/shared.py +1 -1
  201. edsl/study/ObjectEntry.py +173 -173
  202. edsl/study/ProofOfWork.py +113 -113
  203. edsl/study/SnapShot.py +80 -80
  204. edsl/study/Study.py +528 -528
  205. edsl/study/__init__.py +4 -4
  206. edsl/surveys/DAG.py +148 -148
  207. edsl/surveys/Memory.py +31 -31
  208. edsl/surveys/MemoryPlan.py +244 -244
  209. edsl/surveys/Rule.py +326 -326
  210. edsl/surveys/RuleCollection.py +387 -387
  211. edsl/surveys/Survey.py +1801 -1801
  212. edsl/surveys/SurveyCSS.py +261 -261
  213. edsl/surveys/SurveyExportMixin.py +259 -259
  214. edsl/surveys/SurveyFlowVisualizationMixin.py +179 -179
  215. edsl/surveys/SurveyQualtricsImport.py +284 -284
  216. edsl/surveys/__init__.py +3 -3
  217. edsl/surveys/base.py +53 -53
  218. edsl/surveys/descriptors.py +56 -56
  219. edsl/surveys/instructions/ChangeInstruction.py +49 -49
  220. edsl/surveys/instructions/Instruction.py +65 -65
  221. edsl/surveys/instructions/InstructionCollection.py +77 -77
  222. edsl/templates/error_reporting/base.html +23 -23
  223. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  224. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  225. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  226. edsl/templates/error_reporting/interview_details.html +115 -115
  227. edsl/templates/error_reporting/interviews.html +19 -19
  228. edsl/templates/error_reporting/overview.html +4 -4
  229. edsl/templates/error_reporting/performance_plot.html +1 -1
  230. edsl/templates/error_reporting/report.css +73 -73
  231. edsl/templates/error_reporting/report.html +117 -117
  232. edsl/templates/error_reporting/report.js +25 -25
  233. edsl/tools/__init__.py +1 -1
  234. edsl/tools/clusters.py +192 -192
  235. edsl/tools/embeddings.py +27 -27
  236. edsl/tools/embeddings_plotting.py +118 -118
  237. edsl/tools/plotting.py +112 -112
  238. edsl/tools/summarize.py +18 -18
  239. edsl/utilities/SystemInfo.py +28 -28
  240. edsl/utilities/__init__.py +22 -22
  241. edsl/utilities/ast_utilities.py +25 -25
  242. edsl/utilities/data/Registry.py +6 -6
  243. edsl/utilities/data/__init__.py +1 -1
  244. edsl/utilities/data/scooter_results.json +1 -1
  245. edsl/utilities/decorators.py +77 -77
  246. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  247. edsl/utilities/interface.py +627 -627
  248. edsl/utilities/naming_utilities.py +263 -263
  249. edsl/utilities/repair_functions.py +28 -28
  250. edsl/utilities/restricted_python.py +70 -70
  251. edsl/utilities/utilities.py +424 -424
  252. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/LICENSE +21 -21
  253. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/METADATA +1 -1
  254. edsl-0.1.39.dev3.dist-info/RECORD +277 -0
  255. edsl-0.1.39.dev1.dist-info/RECORD +0 -277
  256. {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev3.dist-info}/WHEEL +0 -0
@@ -1,239 +1,239 @@
1
- from typing import Optional, Union, Literal
2
- import requests
3
- import sys
4
- from edsl.exceptions.coop import CoopServerResponseError
5
-
6
- # from edsl.enums import VisibilityType
7
- from edsl.results import Results
8
-
9
-
10
- class JobsRemoteInferenceHandler:
11
- def __init__(self, jobs, verbose=False, poll_interval=3):
12
- """
13
- >>> from edsl.jobs import Jobs
14
- >>> jh = JobsRemoteInferenceHandler(Jobs.example(), verbose=True)
15
- >>> jh.use_remote_inference(True)
16
- False
17
- >>> jh._poll_remote_inference_job({'uuid':1234}, testing_simulated_response={"status": "failed"}) # doctest: +NORMALIZE_WHITESPACE
18
- Job failed.
19
- ...
20
- >>> jh._poll_remote_inference_job({'uuid':1234}, testing_simulated_response={"status": "completed"}) # doctest: +NORMALIZE_WHITESPACE
21
- Job completed and Results stored on Coop: None.
22
- Results(...)
23
- """
24
- self.jobs = jobs
25
- self.verbose = verbose
26
- self.poll_interval = poll_interval
27
-
28
- self._remote_job_creation_data = None
29
- self._job_uuid = None
30
-
31
- @property
32
- def remote_job_creation_data(self):
33
- return self._remote_job_creation_data
34
-
35
- @property
36
- def job_uuid(self):
37
- return self._job_uuid
38
-
39
- def use_remote_inference(self, disable_remote_inference: bool) -> bool:
40
- if disable_remote_inference:
41
- return False
42
- if not disable_remote_inference:
43
- try:
44
- from edsl import Coop
45
-
46
- user_edsl_settings = Coop().edsl_settings
47
- return user_edsl_settings.get("remote_inference", False)
48
- except requests.ConnectionError:
49
- pass
50
- except CoopServerResponseError as e:
51
- pass
52
-
53
- return False
54
-
55
- def create_remote_inference_job(
56
- self,
57
- iterations: int = 1,
58
- remote_inference_description: Optional[str] = None,
59
- remote_inference_results_visibility: Optional["VisibilityType"] = "unlisted",
60
- verbose=False,
61
- ):
62
- """ """
63
- from edsl.config import CONFIG
64
- from edsl.coop.coop import Coop
65
- from rich import print as rich_print
66
-
67
- coop = Coop()
68
- print("Remote inference activated. Sending job to server...")
69
- remote_job_creation_data = coop.remote_inference_create(
70
- self.jobs,
71
- description=remote_inference_description,
72
- status="queued",
73
- iterations=iterations,
74
- initial_results_visibility=remote_inference_results_visibility,
75
- )
76
- job_uuid = remote_job_creation_data.get("uuid")
77
- print(f"Job sent to server. (Job uuid={job_uuid}).")
78
-
79
- expected_parrot_url = CONFIG.get("EXPECTED_PARROT_URL")
80
- progress_bar_url = f"{expected_parrot_url}/home/remote-job-progress/{job_uuid}"
81
-
82
- rich_print(
83
- f"View job progress here: [#38bdf8][link={progress_bar_url}]{progress_bar_url}[/link][/#38bdf8]"
84
- )
85
-
86
- self._remote_job_creation_data = remote_job_creation_data
87
- self._job_uuid = job_uuid
88
- # return remote_job_creation_data
89
-
90
- @staticmethod
91
- def check_status(job_uuid):
92
- from edsl.coop.coop import Coop
93
-
94
- coop = Coop()
95
- return coop.remote_inference_get(job_uuid)
96
-
97
- def poll_remote_inference_job(self):
98
- return self._poll_remote_inference_job(
99
- self.remote_job_creation_data, verbose=self.verbose
100
- )
101
-
102
- def _poll_remote_inference_job(
103
- self,
104
- remote_job_creation_data: dict,
105
- verbose=False,
106
- poll_interval: Optional[float] = None,
107
- testing_simulated_response: Optional[dict] = None,
108
- ) -> Union[Results, None]:
109
- import time
110
- from datetime import datetime
111
- from edsl.config import CONFIG
112
- from edsl.coop.coop import Coop
113
-
114
- if poll_interval is None:
115
- poll_interval = self.poll_interval
116
-
117
- expected_parrot_url = CONFIG.get("EXPECTED_PARROT_URL")
118
-
119
- job_uuid = remote_job_creation_data.get("uuid")
120
- coop = Coop()
121
-
122
- if testing_simulated_response is not None:
123
- remote_job_data_fetcher = lambda job_uuid: testing_simulated_response
124
- object_fetcher = (
125
- lambda results_uuid, expected_object_type: Results.example()
126
- )
127
- else:
128
- remote_job_data_fetcher = coop.remote_inference_get
129
- object_fetcher = coop.get
130
-
131
- job_in_queue = True
132
- while job_in_queue:
133
- remote_job_data = remote_job_data_fetcher(job_uuid)
134
- status = remote_job_data.get("status")
135
- if status == "cancelled":
136
- print("\r" + " " * 80 + "\r", end="")
137
- print("Job cancelled by the user.")
138
- print(
139
- f"See {expected_parrot_url}/home/remote-inference for more details."
140
- )
141
- return None
142
- elif status == "failed":
143
- print("\r" + " " * 80 + "\r", end="")
144
- # write to stderr
145
- latest_error_report_url = remote_job_data.get("latest_error_report_url")
146
- if latest_error_report_url:
147
- print("Job failed.")
148
- print(
149
- f"Your job generated exceptions. Details on these exceptions can be found in the following report: {latest_error_report_url}"
150
- )
151
- print(
152
- f"Need support? Post a message at the Expected Parrot Discord channel (https://discord.com/invite/mxAYkjfy9m) or send an email to info@expectedparrot.com."
153
- )
154
- else:
155
- print("Job failed.")
156
- print(
157
- f"See {expected_parrot_url}/home/remote-inference for more details."
158
- )
159
- return None
160
- elif status == "completed":
161
- results_uuid = remote_job_data.get("results_uuid")
162
- results_url = remote_job_data.get("results_url")
163
- results = object_fetcher(results_uuid, expected_object_type="results")
164
- print("\r" + " " * 80 + "\r", end="")
165
- print(f"Job completed and Results stored on Coop: {results_url}.")
166
- return results
167
- else:
168
- duration = poll_interval
169
- time_checked = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
170
- frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
171
- start_time = time.time()
172
- i = 0
173
- while time.time() - start_time < duration:
174
- print(
175
- f"\r{frames[i % len(frames)]} Job status: {status} - last update: {time_checked}",
176
- end="",
177
- flush=True,
178
- )
179
- time.sleep(0.1)
180
- i += 1
181
-
182
- def use_remote_inference(self, disable_remote_inference: bool) -> bool:
183
- if disable_remote_inference:
184
- return False
185
- if not disable_remote_inference:
186
- try:
187
- from edsl import Coop
188
-
189
- user_edsl_settings = Coop().edsl_settings
190
- return user_edsl_settings.get("remote_inference", False)
191
- except requests.ConnectionError:
192
- pass
193
- except CoopServerResponseError as e:
194
- pass
195
-
196
- return False
197
-
198
- async def create_and_poll_remote_job(
199
- self,
200
- iterations: int = 1,
201
- remote_inference_description: Optional[str] = None,
202
- remote_inference_results_visibility: Optional[
203
- Literal["private", "public", "unlisted"]
204
- ] = "unlisted",
205
- ) -> Union[Results, None]:
206
- """
207
- Creates and polls a remote inference job asynchronously.
208
- Reuses existing synchronous methods but runs them in an async context.
209
-
210
- :param iterations: Number of times to run each interview
211
- :param remote_inference_description: Optional description for the remote job
212
- :param remote_inference_results_visibility: Visibility setting for results
213
- :return: Results object if successful, None if job fails or is cancelled
214
- """
215
- import asyncio
216
- from functools import partial
217
-
218
- # Create job using existing method
219
- loop = asyncio.get_event_loop()
220
- remote_job_creation_data = await loop.run_in_executor(
221
- None,
222
- partial(
223
- self.create_remote_inference_job,
224
- iterations=iterations,
225
- remote_inference_description=remote_inference_description,
226
- remote_inference_results_visibility=remote_inference_results_visibility,
227
- ),
228
- )
229
-
230
- # Poll using existing method but with async sleep
231
- return await loop.run_in_executor(
232
- None, partial(self.poll_remote_inference_job, remote_job_creation_data)
233
- )
234
-
235
-
236
- if __name__ == "__main__":
237
- import doctest
238
-
239
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from typing import Optional, Union, Literal
2
+ import requests
3
+ import sys
4
+ from edsl.exceptions.coop import CoopServerResponseError
5
+
6
+ # from edsl.enums import VisibilityType
7
+ from edsl.results import Results
8
+
9
+
10
+ class JobsRemoteInferenceHandler:
11
+ def __init__(self, jobs, verbose=False, poll_interval=3):
12
+ """
13
+ >>> from edsl.jobs import Jobs
14
+ >>> jh = JobsRemoteInferenceHandler(Jobs.example(), verbose=True)
15
+ >>> jh.use_remote_inference(True)
16
+ False
17
+ >>> jh._poll_remote_inference_job({'uuid':1234}, testing_simulated_response={"status": "failed"}) # doctest: +NORMALIZE_WHITESPACE
18
+ Job failed.
19
+ ...
20
+ >>> jh._poll_remote_inference_job({'uuid':1234}, testing_simulated_response={"status": "completed"}) # doctest: +NORMALIZE_WHITESPACE
21
+ Job completed and Results stored on Coop: None.
22
+ Results(...)
23
+ """
24
+ self.jobs = jobs
25
+ self.verbose = verbose
26
+ self.poll_interval = poll_interval
27
+
28
+ self._remote_job_creation_data = None
29
+ self._job_uuid = None
30
+
31
+ @property
32
+ def remote_job_creation_data(self):
33
+ return self._remote_job_creation_data
34
+
35
+ @property
36
+ def job_uuid(self):
37
+ return self._job_uuid
38
+
39
+ def use_remote_inference(self, disable_remote_inference: bool) -> bool:
40
+ if disable_remote_inference:
41
+ return False
42
+ if not disable_remote_inference:
43
+ try:
44
+ from edsl import Coop
45
+
46
+ user_edsl_settings = Coop().edsl_settings
47
+ return user_edsl_settings.get("remote_inference", False)
48
+ except requests.ConnectionError:
49
+ pass
50
+ except CoopServerResponseError as e:
51
+ pass
52
+
53
+ return False
54
+
55
+ def create_remote_inference_job(
56
+ self,
57
+ iterations: int = 1,
58
+ remote_inference_description: Optional[str] = None,
59
+ remote_inference_results_visibility: Optional["VisibilityType"] = "unlisted",
60
+ verbose=False,
61
+ ):
62
+ """ """
63
+ from edsl.config import CONFIG
64
+ from edsl.coop.coop import Coop
65
+ from rich import print as rich_print
66
+
67
+ coop = Coop()
68
+ print("Remote inference activated. Sending job to server...")
69
+ remote_job_creation_data = coop.remote_inference_create(
70
+ self.jobs,
71
+ description=remote_inference_description,
72
+ status="queued",
73
+ iterations=iterations,
74
+ initial_results_visibility=remote_inference_results_visibility,
75
+ )
76
+ job_uuid = remote_job_creation_data.get("uuid")
77
+ print(f"Job sent to server. (Job uuid={job_uuid}).")
78
+
79
+ expected_parrot_url = CONFIG.get("EXPECTED_PARROT_URL")
80
+ progress_bar_url = f"{expected_parrot_url}/home/remote-job-progress/{job_uuid}"
81
+
82
+ rich_print(
83
+ f"View job progress here: [#38bdf8][link={progress_bar_url}]{progress_bar_url}[/link][/#38bdf8]"
84
+ )
85
+
86
+ self._remote_job_creation_data = remote_job_creation_data
87
+ self._job_uuid = job_uuid
88
+ # return remote_job_creation_data
89
+
90
+ @staticmethod
91
+ def check_status(job_uuid):
92
+ from edsl.coop.coop import Coop
93
+
94
+ coop = Coop()
95
+ return coop.remote_inference_get(job_uuid)
96
+
97
+ def poll_remote_inference_job(self):
98
+ return self._poll_remote_inference_job(
99
+ self.remote_job_creation_data, verbose=self.verbose
100
+ )
101
+
102
+ def _poll_remote_inference_job(
103
+ self,
104
+ remote_job_creation_data: dict,
105
+ verbose=False,
106
+ poll_interval: Optional[float] = None,
107
+ testing_simulated_response: Optional[dict] = None,
108
+ ) -> Union[Results, None]:
109
+ import time
110
+ from datetime import datetime
111
+ from edsl.config import CONFIG
112
+ from edsl.coop.coop import Coop
113
+
114
+ if poll_interval is None:
115
+ poll_interval = self.poll_interval
116
+
117
+ expected_parrot_url = CONFIG.get("EXPECTED_PARROT_URL")
118
+
119
+ job_uuid = remote_job_creation_data.get("uuid")
120
+ coop = Coop()
121
+
122
+ if testing_simulated_response is not None:
123
+ remote_job_data_fetcher = lambda job_uuid: testing_simulated_response
124
+ object_fetcher = (
125
+ lambda results_uuid, expected_object_type: Results.example()
126
+ )
127
+ else:
128
+ remote_job_data_fetcher = coop.remote_inference_get
129
+ object_fetcher = coop.get
130
+
131
+ job_in_queue = True
132
+ while job_in_queue:
133
+ remote_job_data = remote_job_data_fetcher(job_uuid)
134
+ status = remote_job_data.get("status")
135
+ if status == "cancelled":
136
+ print("\r" + " " * 80 + "\r", end="")
137
+ print("Job cancelled by the user.")
138
+ print(
139
+ f"See {expected_parrot_url}/home/remote-inference for more details."
140
+ )
141
+ return None
142
+ elif status == "failed":
143
+ print("\r" + " " * 80 + "\r", end="")
144
+ # write to stderr
145
+ latest_error_report_url = remote_job_data.get("latest_error_report_url")
146
+ if latest_error_report_url:
147
+ print("Job failed.")
148
+ print(
149
+ f"Your job generated exceptions. Details on these exceptions can be found in the following report: {latest_error_report_url}"
150
+ )
151
+ print(
152
+ f"Need support? Post a message at the Expected Parrot Discord channel (https://discord.com/invite/mxAYkjfy9m) or send an email to info@expectedparrot.com."
153
+ )
154
+ else:
155
+ print("Job failed.")
156
+ print(
157
+ f"See {expected_parrot_url}/home/remote-inference for more details."
158
+ )
159
+ return None
160
+ elif status == "completed":
161
+ results_uuid = remote_job_data.get("results_uuid")
162
+ results_url = remote_job_data.get("results_url")
163
+ results = object_fetcher(results_uuid, expected_object_type="results")
164
+ print("\r" + " " * 80 + "\r", end="")
165
+ print(f"Job completed and Results stored on Coop: {results_url}.")
166
+ return results
167
+ else:
168
+ duration = poll_interval
169
+ time_checked = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
170
+ frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
171
+ start_time = time.time()
172
+ i = 0
173
+ while time.time() - start_time < duration:
174
+ print(
175
+ f"\r{frames[i % len(frames)]} Job status: {status} - last update: {time_checked}",
176
+ end="",
177
+ flush=True,
178
+ )
179
+ time.sleep(0.1)
180
+ i += 1
181
+
182
+ def use_remote_inference(self, disable_remote_inference: bool) -> bool:
183
+ if disable_remote_inference:
184
+ return False
185
+ if not disable_remote_inference:
186
+ try:
187
+ from edsl import Coop
188
+
189
+ user_edsl_settings = Coop().edsl_settings
190
+ return user_edsl_settings.get("remote_inference", False)
191
+ except requests.ConnectionError:
192
+ pass
193
+ except CoopServerResponseError as e:
194
+ pass
195
+
196
+ return False
197
+
198
+ async def create_and_poll_remote_job(
199
+ self,
200
+ iterations: int = 1,
201
+ remote_inference_description: Optional[str] = None,
202
+ remote_inference_results_visibility: Optional[
203
+ Literal["private", "public", "unlisted"]
204
+ ] = "unlisted",
205
+ ) -> Union[Results, None]:
206
+ """
207
+ Creates and polls a remote inference job asynchronously.
208
+ Reuses existing synchronous methods but runs them in an async context.
209
+
210
+ :param iterations: Number of times to run each interview
211
+ :param remote_inference_description: Optional description for the remote job
212
+ :param remote_inference_results_visibility: Visibility setting for results
213
+ :return: Results object if successful, None if job fails or is cancelled
214
+ """
215
+ import asyncio
216
+ from functools import partial
217
+
218
+ # Create job using existing method
219
+ loop = asyncio.get_event_loop()
220
+ remote_job_creation_data = await loop.run_in_executor(
221
+ None,
222
+ partial(
223
+ self.create_remote_inference_job,
224
+ iterations=iterations,
225
+ remote_inference_description=remote_inference_description,
226
+ remote_inference_results_visibility=remote_inference_results_visibility,
227
+ ),
228
+ )
229
+
230
+ # Poll using existing method but with async sleep
231
+ return await loop.run_in_executor(
232
+ None, partial(self.poll_remote_inference_job, remote_job_creation_data)
233
+ )
234
+
235
+
236
+ if __name__ == "__main__":
237
+ import doctest
238
+
239
+ doctest.testmod(optionflags=doctest.ELLIPSIS)
edsl/jobs/__init__.py CHANGED
@@ -1 +1 @@
1
- from edsl.jobs.Jobs import Jobs
1
+ from edsl.jobs.Jobs import Jobs
@@ -1,63 +1,63 @@
1
- from collections import UserDict
2
- from edsl.jobs.buckets.TokenBucket import TokenBucket
3
- from edsl.jobs.buckets.ModelBuckets import ModelBuckets
4
-
5
-
6
- class BucketCollection(UserDict):
7
- """A Jobs object will have a whole collection of model buckets, as multiple models could be used.
8
-
9
- The keys here are the models, and the values are the ModelBuckets objects.
10
- Models themselves are hashable, so this works.
11
- """
12
-
13
- def __init__(self, infinity_buckets=False):
14
- super().__init__()
15
- self.infinity_buckets = infinity_buckets
16
- self.models_to_services = {}
17
- self.services_to_buckets = {}
18
-
19
- def __repr__(self):
20
- return f"BucketCollection({self.data})"
21
-
22
- def add_model(self, model: "LanguageModel") -> None:
23
- """Adds a model to the bucket collection.
24
-
25
- This will create the token and request buckets for the model."""
26
-
27
- # compute the TPS and RPS from the model
28
- if not self.infinity_buckets:
29
- TPS = model.TPM / 60.0
30
- RPS = model.RPM / 60.0
31
- else:
32
- TPS = float("inf")
33
- RPS = float("inf")
34
-
35
- if model.model not in self.models_to_services:
36
- service = model._inference_service_
37
- if service not in self.services_to_buckets:
38
- requests_bucket = TokenBucket(
39
- bucket_name=service,
40
- bucket_type="requests",
41
- capacity=RPS,
42
- refill_rate=RPS,
43
- )
44
- tokens_bucket = TokenBucket(
45
- bucket_name=service,
46
- bucket_type="tokens",
47
- capacity=TPS,
48
- refill_rate=TPS,
49
- )
50
- self.services_to_buckets[service] = ModelBuckets(
51
- requests_bucket, tokens_bucket
52
- )
53
- self.models_to_services[model.model] = service
54
- self[model] = self.services_to_buckets[service]
55
- else:
56
- self[model] = self.services_to_buckets[self.models_to_services[model.model]]
57
-
58
- def visualize(self) -> dict:
59
- """Visualize the token and request buckets for each model."""
60
- plots = {}
61
- for model in self:
62
- plots[model] = self[model].visualize()
63
- return plots
1
+ from collections import UserDict
2
+ from edsl.jobs.buckets.TokenBucket import TokenBucket
3
+ from edsl.jobs.buckets.ModelBuckets import ModelBuckets
4
+
5
+
6
+ class BucketCollection(UserDict):
7
+ """A Jobs object will have a whole collection of model buckets, as multiple models could be used.
8
+
9
+ The keys here are the models, and the values are the ModelBuckets objects.
10
+ Models themselves are hashable, so this works.
11
+ """
12
+
13
+ def __init__(self, infinity_buckets=False):
14
+ super().__init__()
15
+ self.infinity_buckets = infinity_buckets
16
+ self.models_to_services = {}
17
+ self.services_to_buckets = {}
18
+
19
+ def __repr__(self):
20
+ return f"BucketCollection({self.data})"
21
+
22
+ def add_model(self, model: "LanguageModel") -> None:
23
+ """Adds a model to the bucket collection.
24
+
25
+ This will create the token and request buckets for the model."""
26
+
27
+ # compute the TPS and RPS from the model
28
+ if not self.infinity_buckets:
29
+ TPS = model.TPM / 60.0
30
+ RPS = model.RPM / 60.0
31
+ else:
32
+ TPS = float("inf")
33
+ RPS = float("inf")
34
+
35
+ if model.model not in self.models_to_services:
36
+ service = model._inference_service_
37
+ if service not in self.services_to_buckets:
38
+ requests_bucket = TokenBucket(
39
+ bucket_name=service,
40
+ bucket_type="requests",
41
+ capacity=RPS,
42
+ refill_rate=RPS,
43
+ )
44
+ tokens_bucket = TokenBucket(
45
+ bucket_name=service,
46
+ bucket_type="tokens",
47
+ capacity=TPS,
48
+ refill_rate=TPS,
49
+ )
50
+ self.services_to_buckets[service] = ModelBuckets(
51
+ requests_bucket, tokens_bucket
52
+ )
53
+ self.models_to_services[model.model] = service
54
+ self[model] = self.services_to_buckets[service]
55
+ else:
56
+ self[model] = self.services_to_buckets[self.models_to_services[model.model]]
57
+
58
+ def visualize(self) -> dict:
59
+ """Visualize the token and request buckets for each model."""
60
+ plots = {}
61
+ for model in self:
62
+ plots[model] = self[model].visualize()
63
+ return plots