edsl 0.1.37.dev6__py3-none-any.whl → 0.1.38.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +48 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +855 -855
  7. edsl/agents/AgentList.py +350 -350
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +284 -284
  10. edsl/agents/PromptConstructor.py +353 -353
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +99 -99
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +160 -160
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +290 -290
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/chips.py +95 -95
  45. edsl/conversation/mug_negotiation.py +81 -81
  46. edsl/conversation/next_speaker_utilities.py +93 -93
  47. edsl/coop/PriceFetcher.py +54 -54
  48. edsl/coop/__init__.py +2 -2
  49. edsl/coop/coop.py +958 -958
  50. edsl/coop/utils.py +131 -131
  51. edsl/data/Cache.py +527 -527
  52. edsl/data/CacheEntry.py +228 -228
  53. edsl/data/CacheHandler.py +149 -149
  54. edsl/data/RemoteCacheSync.py +97 -97
  55. edsl/data/SQLiteDict.py +292 -292
  56. edsl/data/__init__.py +4 -4
  57. edsl/data/orm.py +10 -10
  58. edsl/data_transfer_models.py +73 -73
  59. edsl/enums.py +173 -173
  60. edsl/exceptions/BaseException.py +21 -21
  61. edsl/exceptions/__init__.py +54 -54
  62. edsl/exceptions/agents.py +38 -38
  63. edsl/exceptions/configuration.py +16 -16
  64. edsl/exceptions/coop.py +10 -10
  65. edsl/exceptions/data.py +14 -14
  66. edsl/exceptions/general.py +34 -34
  67. edsl/exceptions/jobs.py +33 -33
  68. edsl/exceptions/language_models.py +63 -63
  69. edsl/exceptions/prompts.py +15 -15
  70. edsl/exceptions/questions.py +91 -91
  71. edsl/exceptions/results.py +29 -29
  72. edsl/exceptions/scenarios.py +22 -22
  73. edsl/exceptions/surveys.py +37 -37
  74. edsl/inference_services/AnthropicService.py +87 -87
  75. edsl/inference_services/AwsBedrock.py +120 -120
  76. edsl/inference_services/AzureAI.py +217 -217
  77. edsl/inference_services/DeepInfraService.py +18 -18
  78. edsl/inference_services/GoogleService.py +156 -156
  79. edsl/inference_services/GroqService.py +20 -20
  80. edsl/inference_services/InferenceServiceABC.py +147 -147
  81. edsl/inference_services/InferenceServicesCollection.py +97 -97
  82. edsl/inference_services/MistralAIService.py +123 -123
  83. edsl/inference_services/OllamaService.py +18 -18
  84. edsl/inference_services/OpenAIService.py +224 -224
  85. edsl/inference_services/TestService.py +89 -89
  86. edsl/inference_services/TogetherAIService.py +170 -170
  87. edsl/inference_services/models_available_cache.py +118 -118
  88. edsl/inference_services/rate_limits_cache.py +25 -25
  89. edsl/inference_services/registry.py +39 -39
  90. edsl/inference_services/write_available.py +10 -10
  91. edsl/jobs/Answers.py +56 -56
  92. edsl/jobs/Jobs.py +1347 -1347
  93. edsl/jobs/__init__.py +1 -1
  94. edsl/jobs/buckets/BucketCollection.py +63 -63
  95. edsl/jobs/buckets/ModelBuckets.py +65 -65
  96. edsl/jobs/buckets/TokenBucket.py +248 -248
  97. edsl/jobs/interviews/Interview.py +661 -661
  98. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  99. edsl/jobs/interviews/InterviewExceptionEntry.py +186 -186
  100. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  101. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  102. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  103. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  104. edsl/jobs/interviews/ReportErrors.py +66 -66
  105. edsl/jobs/interviews/interview_status_enum.py +9 -9
  106. edsl/jobs/runners/JobsRunnerAsyncio.py +338 -338
  107. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  108. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  109. edsl/jobs/tasks/TaskCreators.py +64 -64
  110. edsl/jobs/tasks/TaskHistory.py +442 -442
  111. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  112. edsl/jobs/tasks/task_status_enum.py +163 -163
  113. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  114. edsl/jobs/tokens/TokenUsage.py +34 -34
  115. edsl/language_models/KeyLookup.py +30 -30
  116. edsl/language_models/LanguageModel.py +706 -706
  117. edsl/language_models/ModelList.py +102 -102
  118. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  119. edsl/language_models/__init__.py +3 -3
  120. edsl/language_models/fake_openai_call.py +15 -15
  121. edsl/language_models/fake_openai_service.py +61 -61
  122. edsl/language_models/registry.py +137 -137
  123. edsl/language_models/repair.py +156 -156
  124. edsl/language_models/unused/ReplicateBase.py +83 -83
  125. edsl/language_models/utilities.py +64 -64
  126. edsl/notebooks/Notebook.py +259 -259
  127. edsl/notebooks/__init__.py +1 -1
  128. edsl/prompts/Prompt.py +357 -357
  129. edsl/prompts/__init__.py +2 -2
  130. edsl/questions/AnswerValidatorMixin.py +289 -289
  131. edsl/questions/QuestionBase.py +656 -656
  132. edsl/questions/QuestionBaseGenMixin.py +161 -161
  133. edsl/questions/QuestionBasePromptsMixin.py +234 -234
  134. edsl/questions/QuestionBudget.py +227 -227
  135. edsl/questions/QuestionCheckBox.py +359 -359
  136. edsl/questions/QuestionExtract.py +183 -183
  137. edsl/questions/QuestionFreeText.py +114 -114
  138. edsl/questions/QuestionFunctional.py +159 -159
  139. edsl/questions/QuestionList.py +231 -231
  140. edsl/questions/QuestionMultipleChoice.py +286 -286
  141. edsl/questions/QuestionNumerical.py +153 -153
  142. edsl/questions/QuestionRank.py +324 -324
  143. edsl/questions/Quick.py +41 -41
  144. edsl/questions/RegisterQuestionsMeta.py +71 -71
  145. edsl/questions/ResponseValidatorABC.py +174 -174
  146. edsl/questions/SimpleAskMixin.py +73 -73
  147. edsl/questions/__init__.py +26 -26
  148. edsl/questions/compose_questions.py +98 -98
  149. edsl/questions/decorators.py +21 -21
  150. edsl/questions/derived/QuestionLikertFive.py +76 -76
  151. edsl/questions/derived/QuestionLinearScale.py +87 -87
  152. edsl/questions/derived/QuestionTopK.py +91 -91
  153. edsl/questions/derived/QuestionYesNo.py +82 -82
  154. edsl/questions/descriptors.py +413 -413
  155. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  156. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  157. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  158. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  159. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  160. edsl/questions/prompt_templates/question_list.jinja +17 -17
  161. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  162. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  163. edsl/questions/question_registry.py +147 -147
  164. edsl/questions/settings.py +12 -12
  165. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  167. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  168. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  169. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  170. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  171. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  172. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  173. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  174. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  175. edsl/questions/templates/list/question_presentation.jinja +5 -5
  176. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  177. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  178. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  179. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  180. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  181. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  182. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  183. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  184. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  185. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  186. edsl/results/Dataset.py +293 -293
  187. edsl/results/DatasetExportMixin.py +717 -717
  188. edsl/results/DatasetTree.py +145 -145
  189. edsl/results/Result.py +450 -450
  190. edsl/results/Results.py +1071 -1071
  191. edsl/results/ResultsDBMixin.py +238 -238
  192. edsl/results/ResultsExportMixin.py +43 -43
  193. edsl/results/ResultsFetchMixin.py +33 -33
  194. edsl/results/ResultsGGMixin.py +121 -121
  195. edsl/results/ResultsToolsMixin.py +98 -98
  196. edsl/results/Selector.py +135 -135
  197. edsl/results/__init__.py +2 -2
  198. edsl/results/tree_explore.py +115 -115
  199. edsl/scenarios/FileStore.py +458 -458
  200. edsl/scenarios/Scenario.py +546 -546
  201. edsl/scenarios/ScenarioHtmlMixin.py +64 -64
  202. edsl/scenarios/ScenarioList.py +1112 -1112
  203. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  204. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  205. edsl/scenarios/__init__.py +4 -4
  206. edsl/shared.py +1 -1
  207. edsl/study/ObjectEntry.py +173 -173
  208. edsl/study/ProofOfWork.py +113 -113
  209. edsl/study/SnapShot.py +80 -80
  210. edsl/study/Study.py +528 -528
  211. edsl/study/__init__.py +4 -4
  212. edsl/surveys/DAG.py +148 -148
  213. edsl/surveys/Memory.py +31 -31
  214. edsl/surveys/MemoryPlan.py +244 -244
  215. edsl/surveys/Rule.py +330 -330
  216. edsl/surveys/RuleCollection.py +387 -387
  217. edsl/surveys/Survey.py +1795 -1795
  218. edsl/surveys/SurveyCSS.py +261 -261
  219. edsl/surveys/SurveyExportMixin.py +259 -259
  220. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  221. edsl/surveys/SurveyQualtricsImport.py +284 -284
  222. edsl/surveys/__init__.py +3 -3
  223. edsl/surveys/base.py +53 -53
  224. edsl/surveys/descriptors.py +56 -56
  225. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  226. edsl/surveys/instructions/Instruction.py +51 -51
  227. edsl/surveys/instructions/InstructionCollection.py +77 -77
  228. edsl/templates/error_reporting/base.html +23 -23
  229. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  230. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  231. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  232. edsl/templates/error_reporting/interview_details.html +115 -115
  233. edsl/templates/error_reporting/interviews.html +9 -9
  234. edsl/templates/error_reporting/overview.html +4 -4
  235. edsl/templates/error_reporting/performance_plot.html +1 -1
  236. edsl/templates/error_reporting/report.css +73 -73
  237. edsl/templates/error_reporting/report.html +117 -117
  238. edsl/templates/error_reporting/report.js +25 -25
  239. edsl/tools/__init__.py +1 -1
  240. edsl/tools/clusters.py +192 -192
  241. edsl/tools/embeddings.py +27 -27
  242. edsl/tools/embeddings_plotting.py +118 -118
  243. edsl/tools/plotting.py +112 -112
  244. edsl/tools/summarize.py +18 -18
  245. edsl/utilities/SystemInfo.py +28 -28
  246. edsl/utilities/__init__.py +22 -22
  247. edsl/utilities/ast_utilities.py +25 -25
  248. edsl/utilities/data/Registry.py +6 -6
  249. edsl/utilities/data/__init__.py +1 -1
  250. edsl/utilities/data/scooter_results.json +1 -1
  251. edsl/utilities/decorators.py +77 -77
  252. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  253. edsl/utilities/interface.py +627 -627
  254. edsl/utilities/repair_functions.py +28 -28
  255. edsl/utilities/restricted_python.py +70 -70
  256. edsl/utilities/utilities.py +409 -409
  257. {edsl-0.1.37.dev6.dist-info → edsl-0.1.38.dev1.dist-info}/LICENSE +21 -21
  258. {edsl-0.1.37.dev6.dist-info → edsl-0.1.38.dev1.dist-info}/METADATA +1 -1
  259. edsl-0.1.38.dev1.dist-info/RECORD +283 -0
  260. edsl-0.1.37.dev6.dist-info/RECORD +0 -283
  261. {edsl-0.1.37.dev6.dist-info → edsl-0.1.38.dev1.dist-info}/WHEEL +0 -0
@@ -1,332 +1,332 @@
1
- from __future__ import annotations
2
-
3
- import time
4
- from dataclasses import dataclass, asdict
5
-
6
- from typing import List, DefaultDict, Optional, Type, Literal
7
- from collections import UserDict, defaultdict
8
-
9
- from rich.text import Text
10
- from rich.box import SIMPLE
11
- from rich.table import Table
12
- from rich.live import Live
13
- from rich.panel import Panel
14
- from rich.progress import Progress, TextColumn, BarColumn, TaskProgressColumn
15
- from rich.layout import Layout
16
- from rich.console import Group
17
- from rich import box
18
-
19
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
20
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
21
- from edsl.jobs.tokens.TokenUsage import TokenUsage
22
- from edsl.enums import get_token_pricing
23
- from edsl.jobs.tasks.task_status_enum import TaskStatus
24
-
25
- InterviewTokenUsageMapping = DefaultDict[str, InterviewTokenUsage]
26
-
27
- from edsl.jobs.interviews.InterviewStatistic import InterviewStatistic
28
- from edsl.jobs.interviews.InterviewStatisticsCollection import (
29
- InterviewStatisticsCollection,
30
- )
31
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
32
-
33
-
34
- @dataclass
35
- class ModelInfo:
36
- model_name: str
37
- TPM_limit_k: float
38
- RPM_limit_k: float
39
- num_tasks_waiting: int
40
- token_usage_info: dict
41
-
42
-
43
- @dataclass
44
- class ModelTokenUsageStats:
45
- token_usage_type: str
46
- details: List[dict]
47
- cost: str
48
-
49
-
50
- class Stats:
51
- def elapsed_time(self):
52
- InterviewStatistic("elapsed_time", value=elapsed_time, digits=1, units="sec.")
53
-
54
-
55
- class JobsRunnerStatus:
56
- def __init__(
57
- self, jobs_runner: "JobsRunnerAsyncio", n: int, refresh_rate: float = 0.25
58
- ):
59
- self.jobs_runner = jobs_runner
60
- self.start_time = time.time()
61
- self.completed_interviews = []
62
- self.refresh_rate = refresh_rate
63
- self.statistics = [
64
- "elapsed_time",
65
- "total_interviews_requested",
66
- "completed_interviews",
67
- # "percent_complete",
68
- "average_time_per_interview",
69
- # "task_remaining",
70
- "estimated_time_remaining",
71
- "exceptions",
72
- "unfixed_exceptions",
73
- "throughput",
74
- ]
75
- self.num_total_interviews = n * len(self.jobs_runner.interviews)
76
-
77
- self.distinct_models = list(
78
- set(i.model.model for i in self.jobs_runner.interviews)
79
- )
80
-
81
- self.completed_interview_by_model = defaultdict(list)
82
-
83
- def add_completed_interview(self, result):
84
- self.completed_interviews.append(result.interview_hash)
85
-
86
- relevant_model = result.model.model
87
- self.completed_interview_by_model[relevant_model].append(result.interview_hash)
88
-
89
- def _compute_statistic(self, stat_name: str):
90
- completed_tasks = self.completed_interviews
91
- elapsed_time = time.time() - self.start_time
92
- interviews = self.jobs_runner.total_interviews
93
-
94
- stat_definitions = {
95
- "elapsed_time": lambda: InterviewStatistic(
96
- "elapsed_time", value=elapsed_time, digits=1, units="sec."
97
- ),
98
- "total_interviews_requested": lambda: InterviewStatistic(
99
- "total_interviews_requested", value=len(interviews), units=""
100
- ),
101
- "completed_interviews": lambda: InterviewStatistic(
102
- "completed_interviews", value=len(completed_tasks), units=""
103
- ),
104
- "percent_complete": lambda: InterviewStatistic(
105
- "percent_complete",
106
- value=(
107
- len(completed_tasks) / len(interviews) * 100
108
- if len(interviews) > 0
109
- else 0
110
- ),
111
- digits=1,
112
- units="%",
113
- ),
114
- "average_time_per_interview": lambda: InterviewStatistic(
115
- "average_time_per_interview",
116
- value=elapsed_time / len(completed_tasks) if completed_tasks else 0,
117
- digits=2,
118
- units="sec.",
119
- ),
120
- "task_remaining": lambda: InterviewStatistic(
121
- "task_remaining", value=len(interviews) - len(completed_tasks), units=""
122
- ),
123
- "estimated_time_remaining": lambda: InterviewStatistic(
124
- "estimated_time_remaining",
125
- value=(
126
- (len(interviews) - len(completed_tasks))
127
- * (elapsed_time / len(completed_tasks))
128
- if len(completed_tasks) > 0
129
- else 0
130
- ),
131
- digits=1,
132
- units="sec.",
133
- ),
134
- "exceptions": lambda: InterviewStatistic(
135
- "exceptions",
136
- value=sum(len(i.exceptions) for i in interviews),
137
- units="",
138
- ),
139
- "unfixed_exceptions": lambda: InterviewStatistic(
140
- "unfixed_exceptions",
141
- value=sum(i.exceptions.num_unfixed() for i in interviews),
142
- units="",
143
- ),
144
- "throughput": lambda: InterviewStatistic(
145
- "throughput",
146
- value=len(completed_tasks) / elapsed_time if elapsed_time > 0 else 0,
147
- digits=2,
148
- units="interviews/sec.",
149
- ),
150
- }
151
- return stat_definitions[stat_name]()
152
-
153
- def create_progress_bar(self):
154
- return Progress(
155
- TextColumn("[progress.description]{task.description}"),
156
- BarColumn(),
157
- TaskProgressColumn(),
158
- TextColumn("{task.completed}/{task.total}"),
159
- )
160
-
161
- def generate_model_queues_table(self):
162
- table = Table(show_header=False, box=box.SIMPLE)
163
- table.add_column("Info", style="cyan")
164
- table.add_column("Value", style="magenta")
165
- # table.add_row("Bucket collection", str(self.jobs_runner.bucket_collection))
166
- for model, bucket in self.jobs_runner.bucket_collection.items():
167
- table.add_row(Text(model.model, style="bold blue"), "")
168
- bucket_types = ["requests_bucket", "tokens_bucket"]
169
- for bucket_type in bucket_types:
170
- table.add_row(Text(" " + bucket_type, style="green"), "")
171
- # table.add_row(
172
- # f" Current level (capacity = {round(getattr(bucket, bucket_type).capacity, 3)})",
173
- # str(round(getattr(bucket, bucket_type).tokens, 3)),
174
- # )
175
- num_requests = getattr(bucket, bucket_type).num_requests
176
- num_released = getattr(bucket, bucket_type).num_released
177
- tokens_returned = getattr(bucket, bucket_type).tokens_returned
178
- # table.add_row(
179
- # f" Requested",
180
- # str(num_requests),
181
- # )
182
- # table.add_row(
183
- # f" Completed",
184
- # str(num_released),
185
- # )
186
- table.add_row(
187
- " Completed vs. Requested", f"{num_released} vs. {num_requests}"
188
- )
189
- table.add_row(
190
- " Added tokens (from cache)",
191
- str(tokens_returned),
192
- )
193
- if bucket_type == "tokens_bucket":
194
- rate_name = "TPM"
195
- else:
196
- rate_name = "RPM"
197
- target_rate = round(getattr(bucket, bucket_type).target_rate, 1)
198
- table.add_row(
199
- f" Empirical {rate_name} (target = {target_rate})",
200
- str(round(getattr(bucket, bucket_type).get_throughput(), 0)),
201
- )
202
-
203
- return table
204
-
205
- def generate_layout(self):
206
- progress = self.create_progress_bar()
207
- task_ids = []
208
- for model in self.distinct_models:
209
- task_id = progress.add_task(
210
- f"[cyan]{model}...",
211
- total=int(self.num_total_interviews / len(self.distinct_models)),
212
- )
213
- task_ids.append((model, task_id))
214
-
215
- progress_height = min(5, 2 + len(self.distinct_models))
216
- layout = Layout()
217
-
218
- # Create the top row with only the progress panel
219
- layout.split_column(
220
- Layout(
221
- Panel(
222
- progress,
223
- title="Interview Progress",
224
- border_style="cyan",
225
- box=box.ROUNDED,
226
- ),
227
- name="progress",
228
- size=progress_height, # Adjusted size
229
- ),
230
- Layout(name="bottom_row"), # Adjusted size
231
- )
232
-
233
- # Split the bottom row into two columns for metrics and model queues
234
- layout["bottom_row"].split_row(
235
- Layout(
236
- Panel(
237
- self.generate_metrics_table(),
238
- title="Metrics",
239
- border_style="magenta",
240
- box=box.ROUNDED,
241
- ),
242
- name="metrics",
243
- ),
244
- Layout(
245
- Panel(
246
- self.generate_model_queues_table(),
247
- title="Model Queues",
248
- border_style="yellow",
249
- box=box.ROUNDED,
250
- ),
251
- name="model_queues",
252
- ),
253
- )
254
-
255
- return layout, progress, task_ids
256
-
257
- def generate_metrics_table(self):
258
- table = Table(show_header=True, header_style="bold magenta", box=box.SIMPLE)
259
- table.add_column("Metric", style="cyan", no_wrap=True)
260
- table.add_column("Value", justify="right")
261
-
262
- for stat_name in self.statistics:
263
- pretty_name, value = list(self._compute_statistic(stat_name).items())[0]
264
- # breakpoint()
265
- table.add_row(pretty_name, value)
266
- return table
267
-
268
- def update_progress(self, stop_event):
269
- layout, progress, task_ids = self.generate_layout()
270
-
271
- with Live(
272
- layout, refresh_per_second=int(1 / self.refresh_rate), transient=True
273
- ) as live:
274
- while (
275
- len(self.completed_interviews) < len(self.jobs_runner.total_interviews)
276
- and not stop_event.is_set()
277
- ):
278
- completed_tasks = len(self.completed_interviews)
279
- total_tasks = len(self.jobs_runner.total_interviews)
280
-
281
- for model, task_id in task_ids:
282
- completed_tasks = len(self.completed_interview_by_model[model])
283
- progress.update(
284
- task_id,
285
- completed=completed_tasks,
286
- description=f"[cyan]Conducting interviews for {model}...",
287
- )
288
-
289
- layout["metrics"].update(
290
- Panel(
291
- self.generate_metrics_table(),
292
- title="Metrics",
293
- border_style="magenta",
294
- box=box.ROUNDED,
295
- )
296
- )
297
- layout["model_queues"].update(
298
- Panel(
299
- self.generate_model_queues_table(),
300
- title="Final Model Queues",
301
- border_style="yellow",
302
- box=box.ROUNDED,
303
- )
304
- )
305
-
306
- time.sleep(self.refresh_rate)
307
-
308
- # Final update
309
- for model, task_id in task_ids:
310
- completed_tasks = len(self.completed_interview_by_model[model])
311
- progress.update(
312
- task_id,
313
- completed=completed_tasks,
314
- description=f"[cyan]Conducting interviews for {model}...",
315
- )
316
-
317
- layout["metrics"].update(
318
- Panel(
319
- self.generate_metrics_table(),
320
- title="Final Metrics",
321
- border_style="magenta",
322
- box=box.ROUNDED,
323
- )
324
- )
325
- live.update(layout)
326
- time.sleep(1) # Show final state for 1 second
327
-
328
-
329
- if __name__ == "__main__":
330
- import doctest
331
-
332
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from dataclasses import dataclass, asdict
5
+
6
+ from typing import List, DefaultDict, Optional, Type, Literal
7
+ from collections import UserDict, defaultdict
8
+
9
+ from rich.text import Text
10
+ from rich.box import SIMPLE
11
+ from rich.table import Table
12
+ from rich.live import Live
13
+ from rich.panel import Panel
14
+ from rich.progress import Progress, TextColumn, BarColumn, TaskProgressColumn
15
+ from rich.layout import Layout
16
+ from rich.console import Group
17
+ from rich import box
18
+
19
+ from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
20
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
21
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
22
+ from edsl.enums import get_token_pricing
23
+ from edsl.jobs.tasks.task_status_enum import TaskStatus
24
+
25
+ InterviewTokenUsageMapping = DefaultDict[str, InterviewTokenUsage]
26
+
27
+ from edsl.jobs.interviews.InterviewStatistic import InterviewStatistic
28
+ from edsl.jobs.interviews.InterviewStatisticsCollection import (
29
+ InterviewStatisticsCollection,
30
+ )
31
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
32
+
33
+
34
+ @dataclass
35
+ class ModelInfo:
36
+ model_name: str
37
+ TPM_limit_k: float
38
+ RPM_limit_k: float
39
+ num_tasks_waiting: int
40
+ token_usage_info: dict
41
+
42
+
43
+ @dataclass
44
+ class ModelTokenUsageStats:
45
+ token_usage_type: str
46
+ details: List[dict]
47
+ cost: str
48
+
49
+
50
+ class Stats:
51
+ def elapsed_time(self):
52
+ InterviewStatistic("elapsed_time", value=elapsed_time, digits=1, units="sec.")
53
+
54
+
55
+ class JobsRunnerStatus:
56
+ def __init__(
57
+ self, jobs_runner: "JobsRunnerAsyncio", n: int, refresh_rate: float = 0.25
58
+ ):
59
+ self.jobs_runner = jobs_runner
60
+ self.start_time = time.time()
61
+ self.completed_interviews = []
62
+ self.refresh_rate = refresh_rate
63
+ self.statistics = [
64
+ "elapsed_time",
65
+ "total_interviews_requested",
66
+ "completed_interviews",
67
+ # "percent_complete",
68
+ "average_time_per_interview",
69
+ # "task_remaining",
70
+ "estimated_time_remaining",
71
+ "exceptions",
72
+ "unfixed_exceptions",
73
+ "throughput",
74
+ ]
75
+ self.num_total_interviews = n * len(self.jobs_runner.interviews)
76
+
77
+ self.distinct_models = list(
78
+ set(i.model.model for i in self.jobs_runner.interviews)
79
+ )
80
+
81
+ self.completed_interview_by_model = defaultdict(list)
82
+
83
+ def add_completed_interview(self, result):
84
+ self.completed_interviews.append(result.interview_hash)
85
+
86
+ relevant_model = result.model.model
87
+ self.completed_interview_by_model[relevant_model].append(result.interview_hash)
88
+
89
+ def _compute_statistic(self, stat_name: str):
90
+ completed_tasks = self.completed_interviews
91
+ elapsed_time = time.time() - self.start_time
92
+ interviews = self.jobs_runner.total_interviews
93
+
94
+ stat_definitions = {
95
+ "elapsed_time": lambda: InterviewStatistic(
96
+ "elapsed_time", value=elapsed_time, digits=1, units="sec."
97
+ ),
98
+ "total_interviews_requested": lambda: InterviewStatistic(
99
+ "total_interviews_requested", value=len(interviews), units=""
100
+ ),
101
+ "completed_interviews": lambda: InterviewStatistic(
102
+ "completed_interviews", value=len(completed_tasks), units=""
103
+ ),
104
+ "percent_complete": lambda: InterviewStatistic(
105
+ "percent_complete",
106
+ value=(
107
+ len(completed_tasks) / len(interviews) * 100
108
+ if len(interviews) > 0
109
+ else 0
110
+ ),
111
+ digits=1,
112
+ units="%",
113
+ ),
114
+ "average_time_per_interview": lambda: InterviewStatistic(
115
+ "average_time_per_interview",
116
+ value=elapsed_time / len(completed_tasks) if completed_tasks else 0,
117
+ digits=2,
118
+ units="sec.",
119
+ ),
120
+ "task_remaining": lambda: InterviewStatistic(
121
+ "task_remaining", value=len(interviews) - len(completed_tasks), units=""
122
+ ),
123
+ "estimated_time_remaining": lambda: InterviewStatistic(
124
+ "estimated_time_remaining",
125
+ value=(
126
+ (len(interviews) - len(completed_tasks))
127
+ * (elapsed_time / len(completed_tasks))
128
+ if len(completed_tasks) > 0
129
+ else 0
130
+ ),
131
+ digits=1,
132
+ units="sec.",
133
+ ),
134
+ "exceptions": lambda: InterviewStatistic(
135
+ "exceptions",
136
+ value=sum(len(i.exceptions) for i in interviews),
137
+ units="",
138
+ ),
139
+ "unfixed_exceptions": lambda: InterviewStatistic(
140
+ "unfixed_exceptions",
141
+ value=sum(i.exceptions.num_unfixed() for i in interviews),
142
+ units="",
143
+ ),
144
+ "throughput": lambda: InterviewStatistic(
145
+ "throughput",
146
+ value=len(completed_tasks) / elapsed_time if elapsed_time > 0 else 0,
147
+ digits=2,
148
+ units="interviews/sec.",
149
+ ),
150
+ }
151
+ return stat_definitions[stat_name]()
152
+
153
+ def create_progress_bar(self):
154
+ return Progress(
155
+ TextColumn("[progress.description]{task.description}"),
156
+ BarColumn(),
157
+ TaskProgressColumn(),
158
+ TextColumn("{task.completed}/{task.total}"),
159
+ )
160
+
161
+ def generate_model_queues_table(self):
162
+ table = Table(show_header=False, box=box.SIMPLE)
163
+ table.add_column("Info", style="cyan")
164
+ table.add_column("Value", style="magenta")
165
+ # table.add_row("Bucket collection", str(self.jobs_runner.bucket_collection))
166
+ for model, bucket in self.jobs_runner.bucket_collection.items():
167
+ table.add_row(Text(model.model, style="bold blue"), "")
168
+ bucket_types = ["requests_bucket", "tokens_bucket"]
169
+ for bucket_type in bucket_types:
170
+ table.add_row(Text(" " + bucket_type, style="green"), "")
171
+ # table.add_row(
172
+ # f" Current level (capacity = {round(getattr(bucket, bucket_type).capacity, 3)})",
173
+ # str(round(getattr(bucket, bucket_type).tokens, 3)),
174
+ # )
175
+ num_requests = getattr(bucket, bucket_type).num_requests
176
+ num_released = getattr(bucket, bucket_type).num_released
177
+ tokens_returned = getattr(bucket, bucket_type).tokens_returned
178
+ # table.add_row(
179
+ # f" Requested",
180
+ # str(num_requests),
181
+ # )
182
+ # table.add_row(
183
+ # f" Completed",
184
+ # str(num_released),
185
+ # )
186
+ table.add_row(
187
+ " Completed vs. Requested", f"{num_released} vs. {num_requests}"
188
+ )
189
+ table.add_row(
190
+ " Added tokens (from cache)",
191
+ str(tokens_returned),
192
+ )
193
+ if bucket_type == "tokens_bucket":
194
+ rate_name = "TPM"
195
+ else:
196
+ rate_name = "RPM"
197
+ target_rate = round(getattr(bucket, bucket_type).target_rate, 1)
198
+ table.add_row(
199
+ f" Empirical {rate_name} (target = {target_rate})",
200
+ str(round(getattr(bucket, bucket_type).get_throughput(), 0)),
201
+ )
202
+
203
+ return table
204
+
205
+ def generate_layout(self):
206
+ progress = self.create_progress_bar()
207
+ task_ids = []
208
+ for model in self.distinct_models:
209
+ task_id = progress.add_task(
210
+ f"[cyan]{model}...",
211
+ total=int(self.num_total_interviews / len(self.distinct_models)),
212
+ )
213
+ task_ids.append((model, task_id))
214
+
215
+ progress_height = min(5, 2 + len(self.distinct_models))
216
+ layout = Layout()
217
+
218
+ # Create the top row with only the progress panel
219
+ layout.split_column(
220
+ Layout(
221
+ Panel(
222
+ progress,
223
+ title="Interview Progress",
224
+ border_style="cyan",
225
+ box=box.ROUNDED,
226
+ ),
227
+ name="progress",
228
+ size=progress_height, # Adjusted size
229
+ ),
230
+ Layout(name="bottom_row"), # Adjusted size
231
+ )
232
+
233
+ # Split the bottom row into two columns for metrics and model queues
234
+ layout["bottom_row"].split_row(
235
+ Layout(
236
+ Panel(
237
+ self.generate_metrics_table(),
238
+ title="Metrics",
239
+ border_style="magenta",
240
+ box=box.ROUNDED,
241
+ ),
242
+ name="metrics",
243
+ ),
244
+ Layout(
245
+ Panel(
246
+ self.generate_model_queues_table(),
247
+ title="Model Queues",
248
+ border_style="yellow",
249
+ box=box.ROUNDED,
250
+ ),
251
+ name="model_queues",
252
+ ),
253
+ )
254
+
255
+ return layout, progress, task_ids
256
+
257
+ def generate_metrics_table(self):
258
+ table = Table(show_header=True, header_style="bold magenta", box=box.SIMPLE)
259
+ table.add_column("Metric", style="cyan", no_wrap=True)
260
+ table.add_column("Value", justify="right")
261
+
262
+ for stat_name in self.statistics:
263
+ pretty_name, value = list(self._compute_statistic(stat_name).items())[0]
264
+ # breakpoint()
265
+ table.add_row(pretty_name, value)
266
+ return table
267
+
268
+ def update_progress(self, stop_event):
269
+ layout, progress, task_ids = self.generate_layout()
270
+
271
+ with Live(
272
+ layout, refresh_per_second=int(1 / self.refresh_rate), transient=True
273
+ ) as live:
274
+ while (
275
+ len(self.completed_interviews) < len(self.jobs_runner.total_interviews)
276
+ and not stop_event.is_set()
277
+ ):
278
+ completed_tasks = len(self.completed_interviews)
279
+ total_tasks = len(self.jobs_runner.total_interviews)
280
+
281
+ for model, task_id in task_ids:
282
+ completed_tasks = len(self.completed_interview_by_model[model])
283
+ progress.update(
284
+ task_id,
285
+ completed=completed_tasks,
286
+ description=f"[cyan]Conducting interviews for {model}...",
287
+ )
288
+
289
+ layout["metrics"].update(
290
+ Panel(
291
+ self.generate_metrics_table(),
292
+ title="Metrics",
293
+ border_style="magenta",
294
+ box=box.ROUNDED,
295
+ )
296
+ )
297
+ layout["model_queues"].update(
298
+ Panel(
299
+ self.generate_model_queues_table(),
300
+ title="Final Model Queues",
301
+ border_style="yellow",
302
+ box=box.ROUNDED,
303
+ )
304
+ )
305
+
306
+ time.sleep(self.refresh_rate)
307
+
308
+ # Final update
309
+ for model, task_id in task_ids:
310
+ completed_tasks = len(self.completed_interview_by_model[model])
311
+ progress.update(
312
+ task_id,
313
+ completed=completed_tasks,
314
+ description=f"[cyan]Conducting interviews for {model}...",
315
+ )
316
+
317
+ layout["metrics"].update(
318
+ Panel(
319
+ self.generate_metrics_table(),
320
+ title="Final Metrics",
321
+ border_style="magenta",
322
+ box=box.ROUNDED,
323
+ )
324
+ )
325
+ live.update(layout)
326
+ time.sleep(1) # Show final state for 1 second
327
+
328
+
329
+ if __name__ == "__main__":
330
+ import doctest
331
+
332
+ doctest.testmod(optionflags=doctest.ELLIPSIS)