edsl 0.1.37.dev2__py3-none-any.whl → 0.1.37.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (257) hide show
  1. edsl/Base.py +303 -303
  2. edsl/BaseDiff.py +260 -260
  3. edsl/TemplateLoader.py +24 -24
  4. edsl/__init__.py +48 -48
  5. edsl/__version__.py +1 -1
  6. edsl/agents/Agent.py +804 -804
  7. edsl/agents/AgentList.py +345 -345
  8. edsl/agents/Invigilator.py +222 -222
  9. edsl/agents/InvigilatorBase.py +305 -305
  10. edsl/agents/PromptConstructor.py +312 -312
  11. edsl/agents/__init__.py +3 -3
  12. edsl/agents/descriptors.py +86 -86
  13. edsl/agents/prompt_helpers.py +129 -129
  14. edsl/auto/AutoStudy.py +117 -117
  15. edsl/auto/StageBase.py +230 -230
  16. edsl/auto/StageGenerateSurvey.py +178 -178
  17. edsl/auto/StageLabelQuestions.py +125 -125
  18. edsl/auto/StagePersona.py +61 -61
  19. edsl/auto/StagePersonaDimensionValueRanges.py +88 -88
  20. edsl/auto/StagePersonaDimensionValues.py +74 -74
  21. edsl/auto/StagePersonaDimensions.py +69 -69
  22. edsl/auto/StageQuestions.py +73 -73
  23. edsl/auto/SurveyCreatorPipeline.py +21 -21
  24. edsl/auto/utilities.py +224 -224
  25. edsl/base/Base.py +289 -289
  26. edsl/config.py +149 -149
  27. edsl/conjure/AgentConstructionMixin.py +152 -152
  28. edsl/conjure/Conjure.py +62 -62
  29. edsl/conjure/InputData.py +659 -659
  30. edsl/conjure/InputDataCSV.py +48 -48
  31. edsl/conjure/InputDataMixinQuestionStats.py +182 -182
  32. edsl/conjure/InputDataPyRead.py +91 -91
  33. edsl/conjure/InputDataSPSS.py +8 -8
  34. edsl/conjure/InputDataStata.py +8 -8
  35. edsl/conjure/QuestionOptionMixin.py +76 -76
  36. edsl/conjure/QuestionTypeMixin.py +23 -23
  37. edsl/conjure/RawQuestion.py +65 -65
  38. edsl/conjure/SurveyResponses.py +7 -7
  39. edsl/conjure/__init__.py +9 -9
  40. edsl/conjure/naming_utilities.py +263 -263
  41. edsl/conjure/utilities.py +201 -201
  42. edsl/conversation/Conversation.py +238 -238
  43. edsl/conversation/car_buying.py +58 -58
  44. edsl/conversation/mug_negotiation.py +81 -81
  45. edsl/conversation/next_speaker_utilities.py +93 -93
  46. edsl/coop/PriceFetcher.py +54 -54
  47. edsl/coop/__init__.py +2 -2
  48. edsl/coop/coop.py +827 -824
  49. edsl/coop/utils.py +131 -131
  50. edsl/data/Cache.py +527 -527
  51. edsl/data/CacheEntry.py +228 -228
  52. edsl/data/CacheHandler.py +149 -149
  53. edsl/data/RemoteCacheSync.py +97 -97
  54. edsl/data/SQLiteDict.py +292 -292
  55. edsl/data/__init__.py +4 -4
  56. edsl/data/orm.py +10 -10
  57. edsl/data_transfer_models.py +73 -73
  58. edsl/enums.py +173 -173
  59. edsl/exceptions/__init__.py +50 -50
  60. edsl/exceptions/agents.py +40 -40
  61. edsl/exceptions/configuration.py +16 -16
  62. edsl/exceptions/coop.py +10 -10
  63. edsl/exceptions/data.py +14 -14
  64. edsl/exceptions/general.py +34 -34
  65. edsl/exceptions/jobs.py +33 -33
  66. edsl/exceptions/language_models.py +63 -63
  67. edsl/exceptions/prompts.py +15 -15
  68. edsl/exceptions/questions.py +91 -91
  69. edsl/exceptions/results.py +26 -26
  70. edsl/exceptions/surveys.py +34 -34
  71. edsl/inference_services/AnthropicService.py +87 -87
  72. edsl/inference_services/AwsBedrock.py +120 -115
  73. edsl/inference_services/AzureAI.py +217 -217
  74. edsl/inference_services/DeepInfraService.py +18 -18
  75. edsl/inference_services/GoogleService.py +156 -156
  76. edsl/inference_services/GroqService.py +20 -20
  77. edsl/inference_services/InferenceServiceABC.py +147 -147
  78. edsl/inference_services/InferenceServicesCollection.py +74 -74
  79. edsl/inference_services/MistralAIService.py +123 -123
  80. edsl/inference_services/OllamaService.py +18 -18
  81. edsl/inference_services/OpenAIService.py +224 -224
  82. edsl/inference_services/TestService.py +89 -89
  83. edsl/inference_services/TogetherAIService.py +170 -170
  84. edsl/inference_services/models_available_cache.py +118 -118
  85. edsl/inference_services/rate_limits_cache.py +25 -25
  86. edsl/inference_services/registry.py +39 -39
  87. edsl/inference_services/write_available.py +10 -10
  88. edsl/jobs/Answers.py +56 -56
  89. edsl/jobs/Jobs.py +1135 -1112
  90. edsl/jobs/__init__.py +1 -1
  91. edsl/jobs/buckets/BucketCollection.py +63 -63
  92. edsl/jobs/buckets/ModelBuckets.py +65 -65
  93. edsl/jobs/buckets/TokenBucket.py +248 -248
  94. edsl/jobs/interviews/Interview.py +661 -661
  95. edsl/jobs/interviews/InterviewExceptionCollection.py +99 -99
  96. edsl/jobs/interviews/InterviewExceptionEntry.py +182 -182
  97. edsl/jobs/interviews/InterviewStatistic.py +63 -63
  98. edsl/jobs/interviews/InterviewStatisticsCollection.py +25 -25
  99. edsl/jobs/interviews/InterviewStatusDictionary.py +78 -78
  100. edsl/jobs/interviews/InterviewStatusLog.py +92 -92
  101. edsl/jobs/interviews/ReportErrors.py +66 -66
  102. edsl/jobs/interviews/interview_status_enum.py +9 -9
  103. edsl/jobs/runners/JobsRunnerAsyncio.py +338 -338
  104. edsl/jobs/runners/JobsRunnerStatus.py +332 -332
  105. edsl/jobs/tasks/QuestionTaskCreator.py +242 -242
  106. edsl/jobs/tasks/TaskCreators.py +64 -64
  107. edsl/jobs/tasks/TaskHistory.py +441 -441
  108. edsl/jobs/tasks/TaskStatusLog.py +23 -23
  109. edsl/jobs/tasks/task_status_enum.py +163 -163
  110. edsl/jobs/tokens/InterviewTokenUsage.py +27 -27
  111. edsl/jobs/tokens/TokenUsage.py +34 -34
  112. edsl/language_models/LanguageModel.py +718 -718
  113. edsl/language_models/ModelList.py +102 -102
  114. edsl/language_models/RegisterLanguageModelsMeta.py +184 -184
  115. edsl/language_models/__init__.py +2 -2
  116. edsl/language_models/fake_openai_call.py +15 -15
  117. edsl/language_models/fake_openai_service.py +61 -61
  118. edsl/language_models/registry.py +137 -137
  119. edsl/language_models/repair.py +156 -156
  120. edsl/language_models/unused/ReplicateBase.py +83 -83
  121. edsl/language_models/utilities.py +64 -64
  122. edsl/notebooks/Notebook.py +259 -259
  123. edsl/notebooks/__init__.py +1 -1
  124. edsl/prompts/Prompt.py +353 -353
  125. edsl/prompts/__init__.py +2 -2
  126. edsl/questions/AnswerValidatorMixin.py +289 -289
  127. edsl/questions/QuestionBase.py +616 -616
  128. edsl/questions/QuestionBaseGenMixin.py +161 -161
  129. edsl/questions/QuestionBasePromptsMixin.py +266 -266
  130. edsl/questions/QuestionBudget.py +227 -227
  131. edsl/questions/QuestionCheckBox.py +359 -359
  132. edsl/questions/QuestionExtract.py +183 -183
  133. edsl/questions/QuestionFreeText.py +114 -114
  134. edsl/questions/QuestionFunctional.py +159 -159
  135. edsl/questions/QuestionList.py +231 -231
  136. edsl/questions/QuestionMultipleChoice.py +286 -286
  137. edsl/questions/QuestionNumerical.py +153 -153
  138. edsl/questions/QuestionRank.py +324 -324
  139. edsl/questions/Quick.py +41 -41
  140. edsl/questions/RegisterQuestionsMeta.py +71 -71
  141. edsl/questions/ResponseValidatorABC.py +174 -174
  142. edsl/questions/SimpleAskMixin.py +73 -73
  143. edsl/questions/__init__.py +26 -26
  144. edsl/questions/compose_questions.py +98 -98
  145. edsl/questions/decorators.py +21 -21
  146. edsl/questions/derived/QuestionLikertFive.py +76 -76
  147. edsl/questions/derived/QuestionLinearScale.py +87 -87
  148. edsl/questions/derived/QuestionTopK.py +91 -91
  149. edsl/questions/derived/QuestionYesNo.py +82 -82
  150. edsl/questions/descriptors.py +418 -418
  151. edsl/questions/prompt_templates/question_budget.jinja +13 -13
  152. edsl/questions/prompt_templates/question_checkbox.jinja +32 -32
  153. edsl/questions/prompt_templates/question_extract.jinja +11 -11
  154. edsl/questions/prompt_templates/question_free_text.jinja +3 -3
  155. edsl/questions/prompt_templates/question_linear_scale.jinja +11 -11
  156. edsl/questions/prompt_templates/question_list.jinja +17 -17
  157. edsl/questions/prompt_templates/question_multiple_choice.jinja +33 -33
  158. edsl/questions/prompt_templates/question_numerical.jinja +36 -36
  159. edsl/questions/question_registry.py +147 -147
  160. edsl/questions/settings.py +12 -12
  161. edsl/questions/templates/budget/answering_instructions.jinja +7 -7
  162. edsl/questions/templates/budget/question_presentation.jinja +7 -7
  163. edsl/questions/templates/checkbox/answering_instructions.jinja +10 -10
  164. edsl/questions/templates/checkbox/question_presentation.jinja +22 -22
  165. edsl/questions/templates/extract/answering_instructions.jinja +7 -7
  166. edsl/questions/templates/likert_five/answering_instructions.jinja +10 -10
  167. edsl/questions/templates/likert_five/question_presentation.jinja +11 -11
  168. edsl/questions/templates/linear_scale/answering_instructions.jinja +5 -5
  169. edsl/questions/templates/linear_scale/question_presentation.jinja +5 -5
  170. edsl/questions/templates/list/answering_instructions.jinja +3 -3
  171. edsl/questions/templates/list/question_presentation.jinja +5 -5
  172. edsl/questions/templates/multiple_choice/answering_instructions.jinja +9 -9
  173. edsl/questions/templates/multiple_choice/question_presentation.jinja +11 -11
  174. edsl/questions/templates/numerical/answering_instructions.jinja +6 -6
  175. edsl/questions/templates/numerical/question_presentation.jinja +6 -6
  176. edsl/questions/templates/rank/answering_instructions.jinja +11 -11
  177. edsl/questions/templates/rank/question_presentation.jinja +15 -15
  178. edsl/questions/templates/top_k/answering_instructions.jinja +8 -8
  179. edsl/questions/templates/top_k/question_presentation.jinja +22 -22
  180. edsl/questions/templates/yes_no/answering_instructions.jinja +6 -6
  181. edsl/questions/templates/yes_no/question_presentation.jinja +11 -11
  182. edsl/results/Dataset.py +293 -293
  183. edsl/results/DatasetExportMixin.py +693 -693
  184. edsl/results/DatasetTree.py +145 -145
  185. edsl/results/Result.py +435 -435
  186. edsl/results/Results.py +1160 -1160
  187. edsl/results/ResultsDBMixin.py +238 -238
  188. edsl/results/ResultsExportMixin.py +43 -43
  189. edsl/results/ResultsFetchMixin.py +33 -33
  190. edsl/results/ResultsGGMixin.py +121 -121
  191. edsl/results/ResultsToolsMixin.py +98 -98
  192. edsl/results/Selector.py +118 -118
  193. edsl/results/__init__.py +2 -2
  194. edsl/results/tree_explore.py +115 -115
  195. edsl/scenarios/FileStore.py +458 -458
  196. edsl/scenarios/Scenario.py +510 -510
  197. edsl/scenarios/ScenarioHtmlMixin.py +59 -59
  198. edsl/scenarios/ScenarioList.py +1101 -1101
  199. edsl/scenarios/ScenarioListExportMixin.py +52 -52
  200. edsl/scenarios/ScenarioListPdfMixin.py +261 -261
  201. edsl/scenarios/__init__.py +4 -4
  202. edsl/shared.py +1 -1
  203. edsl/study/ObjectEntry.py +173 -173
  204. edsl/study/ProofOfWork.py +113 -113
  205. edsl/study/SnapShot.py +80 -80
  206. edsl/study/Study.py +528 -528
  207. edsl/study/__init__.py +4 -4
  208. edsl/surveys/DAG.py +148 -148
  209. edsl/surveys/Memory.py +31 -31
  210. edsl/surveys/MemoryPlan.py +244 -244
  211. edsl/surveys/Rule.py +324 -324
  212. edsl/surveys/RuleCollection.py +387 -387
  213. edsl/surveys/Survey.py +1772 -1772
  214. edsl/surveys/SurveyCSS.py +261 -261
  215. edsl/surveys/SurveyExportMixin.py +259 -259
  216. edsl/surveys/SurveyFlowVisualizationMixin.py +121 -121
  217. edsl/surveys/SurveyQualtricsImport.py +284 -284
  218. edsl/surveys/__init__.py +3 -3
  219. edsl/surveys/base.py +53 -53
  220. edsl/surveys/descriptors.py +56 -56
  221. edsl/surveys/instructions/ChangeInstruction.py +47 -47
  222. edsl/surveys/instructions/Instruction.py +51 -51
  223. edsl/surveys/instructions/InstructionCollection.py +77 -77
  224. edsl/templates/error_reporting/base.html +23 -23
  225. edsl/templates/error_reporting/exceptions_by_model.html +34 -34
  226. edsl/templates/error_reporting/exceptions_by_question_name.html +16 -16
  227. edsl/templates/error_reporting/exceptions_by_type.html +16 -16
  228. edsl/templates/error_reporting/interview_details.html +115 -115
  229. edsl/templates/error_reporting/interviews.html +9 -9
  230. edsl/templates/error_reporting/overview.html +4 -4
  231. edsl/templates/error_reporting/performance_plot.html +1 -1
  232. edsl/templates/error_reporting/report.css +73 -73
  233. edsl/templates/error_reporting/report.html +117 -117
  234. edsl/templates/error_reporting/report.js +25 -25
  235. edsl/tools/__init__.py +1 -1
  236. edsl/tools/clusters.py +192 -192
  237. edsl/tools/embeddings.py +27 -27
  238. edsl/tools/embeddings_plotting.py +118 -118
  239. edsl/tools/plotting.py +112 -112
  240. edsl/tools/summarize.py +18 -18
  241. edsl/utilities/SystemInfo.py +28 -28
  242. edsl/utilities/__init__.py +22 -22
  243. edsl/utilities/ast_utilities.py +25 -25
  244. edsl/utilities/data/Registry.py +6 -6
  245. edsl/utilities/data/__init__.py +1 -1
  246. edsl/utilities/data/scooter_results.json +1 -1
  247. edsl/utilities/decorators.py +77 -77
  248. edsl/utilities/gcp_bucket/cloud_storage.py +96 -96
  249. edsl/utilities/interface.py +627 -627
  250. edsl/utilities/repair_functions.py +28 -28
  251. edsl/utilities/restricted_python.py +70 -70
  252. edsl/utilities/utilities.py +391 -391
  253. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev4.dist-info}/LICENSE +21 -21
  254. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev4.dist-info}/METADATA +1 -1
  255. edsl-0.1.37.dev4.dist-info/RECORD +279 -0
  256. edsl-0.1.37.dev2.dist-info/RECORD +0 -279
  257. {edsl-0.1.37.dev2.dist-info → edsl-0.1.37.dev4.dist-info}/WHEEL +0 -0
@@ -1,332 +1,332 @@
1
- from __future__ import annotations
2
-
3
- import time
4
- from dataclasses import dataclass, asdict
5
-
6
- from typing import List, DefaultDict, Optional, Type, Literal
7
- from collections import UserDict, defaultdict
8
-
9
- from rich.text import Text
10
- from rich.box import SIMPLE
11
- from rich.table import Table
12
- from rich.live import Live
13
- from rich.panel import Panel
14
- from rich.progress import Progress, TextColumn, BarColumn, TaskProgressColumn
15
- from rich.layout import Layout
16
- from rich.console import Group
17
- from rich import box
18
-
19
- from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
20
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
21
- from edsl.jobs.tokens.TokenUsage import TokenUsage
22
- from edsl.enums import get_token_pricing
23
- from edsl.jobs.tasks.task_status_enum import TaskStatus
24
-
25
- InterviewTokenUsageMapping = DefaultDict[str, InterviewTokenUsage]
26
-
27
- from edsl.jobs.interviews.InterviewStatistic import InterviewStatistic
28
- from edsl.jobs.interviews.InterviewStatisticsCollection import (
29
- InterviewStatisticsCollection,
30
- )
31
- from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
32
-
33
-
34
- @dataclass
35
- class ModelInfo:
36
- model_name: str
37
- TPM_limit_k: float
38
- RPM_limit_k: float
39
- num_tasks_waiting: int
40
- token_usage_info: dict
41
-
42
-
43
- @dataclass
44
- class ModelTokenUsageStats:
45
- token_usage_type: str
46
- details: List[dict]
47
- cost: str
48
-
49
-
50
- class Stats:
51
- def elapsed_time(self):
52
- InterviewStatistic("elapsed_time", value=elapsed_time, digits=1, units="sec.")
53
-
54
-
55
- class JobsRunnerStatus:
56
- def __init__(
57
- self, jobs_runner: "JobsRunnerAsyncio", n: int, refresh_rate: float = 0.25
58
- ):
59
- self.jobs_runner = jobs_runner
60
- self.start_time = time.time()
61
- self.completed_interviews = []
62
- self.refresh_rate = refresh_rate
63
- self.statistics = [
64
- "elapsed_time",
65
- "total_interviews_requested",
66
- "completed_interviews",
67
- # "percent_complete",
68
- "average_time_per_interview",
69
- # "task_remaining",
70
- "estimated_time_remaining",
71
- "exceptions",
72
- "unfixed_exceptions",
73
- "throughput",
74
- ]
75
- self.num_total_interviews = n * len(self.jobs_runner.interviews)
76
-
77
- self.distinct_models = list(
78
- set(i.model.model for i in self.jobs_runner.interviews)
79
- )
80
-
81
- self.completed_interview_by_model = defaultdict(list)
82
-
83
- def add_completed_interview(self, result):
84
- self.completed_interviews.append(result.interview_hash)
85
-
86
- relevant_model = result.model.model
87
- self.completed_interview_by_model[relevant_model].append(result.interview_hash)
88
-
89
- def _compute_statistic(self, stat_name: str):
90
- completed_tasks = self.completed_interviews
91
- elapsed_time = time.time() - self.start_time
92
- interviews = self.jobs_runner.total_interviews
93
-
94
- stat_definitions = {
95
- "elapsed_time": lambda: InterviewStatistic(
96
- "elapsed_time", value=elapsed_time, digits=1, units="sec."
97
- ),
98
- "total_interviews_requested": lambda: InterviewStatistic(
99
- "total_interviews_requested", value=len(interviews), units=""
100
- ),
101
- "completed_interviews": lambda: InterviewStatistic(
102
- "completed_interviews", value=len(completed_tasks), units=""
103
- ),
104
- "percent_complete": lambda: InterviewStatistic(
105
- "percent_complete",
106
- value=(
107
- len(completed_tasks) / len(interviews) * 100
108
- if len(interviews) > 0
109
- else 0
110
- ),
111
- digits=1,
112
- units="%",
113
- ),
114
- "average_time_per_interview": lambda: InterviewStatistic(
115
- "average_time_per_interview",
116
- value=elapsed_time / len(completed_tasks) if completed_tasks else 0,
117
- digits=2,
118
- units="sec.",
119
- ),
120
- "task_remaining": lambda: InterviewStatistic(
121
- "task_remaining", value=len(interviews) - len(completed_tasks), units=""
122
- ),
123
- "estimated_time_remaining": lambda: InterviewStatistic(
124
- "estimated_time_remaining",
125
- value=(
126
- (len(interviews) - len(completed_tasks))
127
- * (elapsed_time / len(completed_tasks))
128
- if len(completed_tasks) > 0
129
- else 0
130
- ),
131
- digits=1,
132
- units="sec.",
133
- ),
134
- "exceptions": lambda: InterviewStatistic(
135
- "exceptions",
136
- value=sum(len(i.exceptions) for i in interviews),
137
- units="",
138
- ),
139
- "unfixed_exceptions": lambda: InterviewStatistic(
140
- "unfixed_exceptions",
141
- value=sum(i.exceptions.num_unfixed() for i in interviews),
142
- units="",
143
- ),
144
- "throughput": lambda: InterviewStatistic(
145
- "throughput",
146
- value=len(completed_tasks) / elapsed_time if elapsed_time > 0 else 0,
147
- digits=2,
148
- units="interviews/sec.",
149
- ),
150
- }
151
- return stat_definitions[stat_name]()
152
-
153
- def create_progress_bar(self):
154
- return Progress(
155
- TextColumn("[progress.description]{task.description}"),
156
- BarColumn(),
157
- TaskProgressColumn(),
158
- TextColumn("{task.completed}/{task.total}"),
159
- )
160
-
161
- def generate_model_queues_table(self):
162
- table = Table(show_header=False, box=box.SIMPLE)
163
- table.add_column("Info", style="cyan")
164
- table.add_column("Value", style="magenta")
165
- # table.add_row("Bucket collection", str(self.jobs_runner.bucket_collection))
166
- for model, bucket in self.jobs_runner.bucket_collection.items():
167
- table.add_row(Text(model.model, style="bold blue"), "")
168
- bucket_types = ["requests_bucket", "tokens_bucket"]
169
- for bucket_type in bucket_types:
170
- table.add_row(Text(" " + bucket_type, style="green"), "")
171
- # table.add_row(
172
- # f" Current level (capacity = {round(getattr(bucket, bucket_type).capacity, 3)})",
173
- # str(round(getattr(bucket, bucket_type).tokens, 3)),
174
- # )
175
- num_requests = getattr(bucket, bucket_type).num_requests
176
- num_released = getattr(bucket, bucket_type).num_released
177
- tokens_returned = getattr(bucket, bucket_type).tokens_returned
178
- # table.add_row(
179
- # f" Requested",
180
- # str(num_requests),
181
- # )
182
- # table.add_row(
183
- # f" Completed",
184
- # str(num_released),
185
- # )
186
- table.add_row(
187
- " Completed vs. Requested", f"{num_released} vs. {num_requests}"
188
- )
189
- table.add_row(
190
- " Added tokens (from cache)",
191
- str(tokens_returned),
192
- )
193
- if bucket_type == "tokens_bucket":
194
- rate_name = "TPM"
195
- else:
196
- rate_name = "RPM"
197
- target_rate = round(getattr(bucket, bucket_type).target_rate, 1)
198
- table.add_row(
199
- f" Empirical {rate_name} (target = {target_rate})",
200
- str(round(getattr(bucket, bucket_type).get_throughput(), 0)),
201
- )
202
-
203
- return table
204
-
205
- def generate_layout(self):
206
- progress = self.create_progress_bar()
207
- task_ids = []
208
- for model in self.distinct_models:
209
- task_id = progress.add_task(
210
- f"[cyan]{model}...",
211
- total=int(self.num_total_interviews / len(self.distinct_models)),
212
- )
213
- task_ids.append((model, task_id))
214
-
215
- progress_height = min(5, 2 + len(self.distinct_models))
216
- layout = Layout()
217
-
218
- # Create the top row with only the progress panel
219
- layout.split_column(
220
- Layout(
221
- Panel(
222
- progress,
223
- title="Interview Progress",
224
- border_style="cyan",
225
- box=box.ROUNDED,
226
- ),
227
- name="progress",
228
- size=progress_height, # Adjusted size
229
- ),
230
- Layout(name="bottom_row"), # Adjusted size
231
- )
232
-
233
- # Split the bottom row into two columns for metrics and model queues
234
- layout["bottom_row"].split_row(
235
- Layout(
236
- Panel(
237
- self.generate_metrics_table(),
238
- title="Metrics",
239
- border_style="magenta",
240
- box=box.ROUNDED,
241
- ),
242
- name="metrics",
243
- ),
244
- Layout(
245
- Panel(
246
- self.generate_model_queues_table(),
247
- title="Model Queues",
248
- border_style="yellow",
249
- box=box.ROUNDED,
250
- ),
251
- name="model_queues",
252
- ),
253
- )
254
-
255
- return layout, progress, task_ids
256
-
257
- def generate_metrics_table(self):
258
- table = Table(show_header=True, header_style="bold magenta", box=box.SIMPLE)
259
- table.add_column("Metric", style="cyan", no_wrap=True)
260
- table.add_column("Value", justify="right")
261
-
262
- for stat_name in self.statistics:
263
- pretty_name, value = list(self._compute_statistic(stat_name).items())[0]
264
- # breakpoint()
265
- table.add_row(pretty_name, value)
266
- return table
267
-
268
- def update_progress(self, stop_event):
269
- layout, progress, task_ids = self.generate_layout()
270
-
271
- with Live(
272
- layout, refresh_per_second=int(1 / self.refresh_rate), transient=True
273
- ) as live:
274
- while (
275
- len(self.completed_interviews) < len(self.jobs_runner.total_interviews)
276
- and not stop_event.is_set()
277
- ):
278
- completed_tasks = len(self.completed_interviews)
279
- total_tasks = len(self.jobs_runner.total_interviews)
280
-
281
- for model, task_id in task_ids:
282
- completed_tasks = len(self.completed_interview_by_model[model])
283
- progress.update(
284
- task_id,
285
- completed=completed_tasks,
286
- description=f"[cyan]Conducting interviews for {model}...",
287
- )
288
-
289
- layout["metrics"].update(
290
- Panel(
291
- self.generate_metrics_table(),
292
- title="Metrics",
293
- border_style="magenta",
294
- box=box.ROUNDED,
295
- )
296
- )
297
- layout["model_queues"].update(
298
- Panel(
299
- self.generate_model_queues_table(),
300
- title="Final Model Queues",
301
- border_style="yellow",
302
- box=box.ROUNDED,
303
- )
304
- )
305
-
306
- time.sleep(self.refresh_rate)
307
-
308
- # Final update
309
- for model, task_id in task_ids:
310
- completed_tasks = len(self.completed_interview_by_model[model])
311
- progress.update(
312
- task_id,
313
- completed=completed_tasks,
314
- description=f"[cyan]Conducting interviews for {model}...",
315
- )
316
-
317
- layout["metrics"].update(
318
- Panel(
319
- self.generate_metrics_table(),
320
- title="Final Metrics",
321
- border_style="magenta",
322
- box=box.ROUNDED,
323
- )
324
- )
325
- live.update(layout)
326
- time.sleep(1) # Show final state for 1 second
327
-
328
-
329
- if __name__ == "__main__":
330
- import doctest
331
-
332
- doctest.testmod(optionflags=doctest.ELLIPSIS)
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from dataclasses import dataclass, asdict
5
+
6
+ from typing import List, DefaultDict, Optional, Type, Literal
7
+ from collections import UserDict, defaultdict
8
+
9
+ from rich.text import Text
10
+ from rich.box import SIMPLE
11
+ from rich.table import Table
12
+ from rich.live import Live
13
+ from rich.panel import Panel
14
+ from rich.progress import Progress, TextColumn, BarColumn, TaskProgressColumn
15
+ from rich.layout import Layout
16
+ from rich.console import Group
17
+ from rich import box
18
+
19
+ from edsl.jobs.interviews.InterviewStatusDictionary import InterviewStatusDictionary
20
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
21
+ from edsl.jobs.tokens.TokenUsage import TokenUsage
22
+ from edsl.enums import get_token_pricing
23
+ from edsl.jobs.tasks.task_status_enum import TaskStatus
24
+
25
+ InterviewTokenUsageMapping = DefaultDict[str, InterviewTokenUsage]
26
+
27
+ from edsl.jobs.interviews.InterviewStatistic import InterviewStatistic
28
+ from edsl.jobs.interviews.InterviewStatisticsCollection import (
29
+ InterviewStatisticsCollection,
30
+ )
31
+ from edsl.jobs.tokens.InterviewTokenUsage import InterviewTokenUsage
32
+
33
+
34
+ @dataclass
35
+ class ModelInfo:
36
+ model_name: str
37
+ TPM_limit_k: float
38
+ RPM_limit_k: float
39
+ num_tasks_waiting: int
40
+ token_usage_info: dict
41
+
42
+
43
+ @dataclass
44
+ class ModelTokenUsageStats:
45
+ token_usage_type: str
46
+ details: List[dict]
47
+ cost: str
48
+
49
+
50
+ class Stats:
51
+ def elapsed_time(self):
52
+ InterviewStatistic("elapsed_time", value=elapsed_time, digits=1, units="sec.")
53
+
54
+
55
+ class JobsRunnerStatus:
56
+ def __init__(
57
+ self, jobs_runner: "JobsRunnerAsyncio", n: int, refresh_rate: float = 0.25
58
+ ):
59
+ self.jobs_runner = jobs_runner
60
+ self.start_time = time.time()
61
+ self.completed_interviews = []
62
+ self.refresh_rate = refresh_rate
63
+ self.statistics = [
64
+ "elapsed_time",
65
+ "total_interviews_requested",
66
+ "completed_interviews",
67
+ # "percent_complete",
68
+ "average_time_per_interview",
69
+ # "task_remaining",
70
+ "estimated_time_remaining",
71
+ "exceptions",
72
+ "unfixed_exceptions",
73
+ "throughput",
74
+ ]
75
+ self.num_total_interviews = n * len(self.jobs_runner.interviews)
76
+
77
+ self.distinct_models = list(
78
+ set(i.model.model for i in self.jobs_runner.interviews)
79
+ )
80
+
81
+ self.completed_interview_by_model = defaultdict(list)
82
+
83
+ def add_completed_interview(self, result):
84
+ self.completed_interviews.append(result.interview_hash)
85
+
86
+ relevant_model = result.model.model
87
+ self.completed_interview_by_model[relevant_model].append(result.interview_hash)
88
+
89
+ def _compute_statistic(self, stat_name: str):
90
+ completed_tasks = self.completed_interviews
91
+ elapsed_time = time.time() - self.start_time
92
+ interviews = self.jobs_runner.total_interviews
93
+
94
+ stat_definitions = {
95
+ "elapsed_time": lambda: InterviewStatistic(
96
+ "elapsed_time", value=elapsed_time, digits=1, units="sec."
97
+ ),
98
+ "total_interviews_requested": lambda: InterviewStatistic(
99
+ "total_interviews_requested", value=len(interviews), units=""
100
+ ),
101
+ "completed_interviews": lambda: InterviewStatistic(
102
+ "completed_interviews", value=len(completed_tasks), units=""
103
+ ),
104
+ "percent_complete": lambda: InterviewStatistic(
105
+ "percent_complete",
106
+ value=(
107
+ len(completed_tasks) / len(interviews) * 100
108
+ if len(interviews) > 0
109
+ else 0
110
+ ),
111
+ digits=1,
112
+ units="%",
113
+ ),
114
+ "average_time_per_interview": lambda: InterviewStatistic(
115
+ "average_time_per_interview",
116
+ value=elapsed_time / len(completed_tasks) if completed_tasks else 0,
117
+ digits=2,
118
+ units="sec.",
119
+ ),
120
+ "task_remaining": lambda: InterviewStatistic(
121
+ "task_remaining", value=len(interviews) - len(completed_tasks), units=""
122
+ ),
123
+ "estimated_time_remaining": lambda: InterviewStatistic(
124
+ "estimated_time_remaining",
125
+ value=(
126
+ (len(interviews) - len(completed_tasks))
127
+ * (elapsed_time / len(completed_tasks))
128
+ if len(completed_tasks) > 0
129
+ else 0
130
+ ),
131
+ digits=1,
132
+ units="sec.",
133
+ ),
134
+ "exceptions": lambda: InterviewStatistic(
135
+ "exceptions",
136
+ value=sum(len(i.exceptions) for i in interviews),
137
+ units="",
138
+ ),
139
+ "unfixed_exceptions": lambda: InterviewStatistic(
140
+ "unfixed_exceptions",
141
+ value=sum(i.exceptions.num_unfixed() for i in interviews),
142
+ units="",
143
+ ),
144
+ "throughput": lambda: InterviewStatistic(
145
+ "throughput",
146
+ value=len(completed_tasks) / elapsed_time if elapsed_time > 0 else 0,
147
+ digits=2,
148
+ units="interviews/sec.",
149
+ ),
150
+ }
151
+ return stat_definitions[stat_name]()
152
+
153
+ def create_progress_bar(self):
154
+ return Progress(
155
+ TextColumn("[progress.description]{task.description}"),
156
+ BarColumn(),
157
+ TaskProgressColumn(),
158
+ TextColumn("{task.completed}/{task.total}"),
159
+ )
160
+
161
+ def generate_model_queues_table(self):
162
+ table = Table(show_header=False, box=box.SIMPLE)
163
+ table.add_column("Info", style="cyan")
164
+ table.add_column("Value", style="magenta")
165
+ # table.add_row("Bucket collection", str(self.jobs_runner.bucket_collection))
166
+ for model, bucket in self.jobs_runner.bucket_collection.items():
167
+ table.add_row(Text(model.model, style="bold blue"), "")
168
+ bucket_types = ["requests_bucket", "tokens_bucket"]
169
+ for bucket_type in bucket_types:
170
+ table.add_row(Text(" " + bucket_type, style="green"), "")
171
+ # table.add_row(
172
+ # f" Current level (capacity = {round(getattr(bucket, bucket_type).capacity, 3)})",
173
+ # str(round(getattr(bucket, bucket_type).tokens, 3)),
174
+ # )
175
+ num_requests = getattr(bucket, bucket_type).num_requests
176
+ num_released = getattr(bucket, bucket_type).num_released
177
+ tokens_returned = getattr(bucket, bucket_type).tokens_returned
178
+ # table.add_row(
179
+ # f" Requested",
180
+ # str(num_requests),
181
+ # )
182
+ # table.add_row(
183
+ # f" Completed",
184
+ # str(num_released),
185
+ # )
186
+ table.add_row(
187
+ " Completed vs. Requested", f"{num_released} vs. {num_requests}"
188
+ )
189
+ table.add_row(
190
+ " Added tokens (from cache)",
191
+ str(tokens_returned),
192
+ )
193
+ if bucket_type == "tokens_bucket":
194
+ rate_name = "TPM"
195
+ else:
196
+ rate_name = "RPM"
197
+ target_rate = round(getattr(bucket, bucket_type).target_rate, 1)
198
+ table.add_row(
199
+ f" Empirical {rate_name} (target = {target_rate})",
200
+ str(round(getattr(bucket, bucket_type).get_throughput(), 0)),
201
+ )
202
+
203
+ return table
204
+
205
+ def generate_layout(self):
206
+ progress = self.create_progress_bar()
207
+ task_ids = []
208
+ for model in self.distinct_models:
209
+ task_id = progress.add_task(
210
+ f"[cyan]{model}...",
211
+ total=int(self.num_total_interviews / len(self.distinct_models)),
212
+ )
213
+ task_ids.append((model, task_id))
214
+
215
+ progress_height = min(5, 2 + len(self.distinct_models))
216
+ layout = Layout()
217
+
218
+ # Create the top row with only the progress panel
219
+ layout.split_column(
220
+ Layout(
221
+ Panel(
222
+ progress,
223
+ title="Interview Progress",
224
+ border_style="cyan",
225
+ box=box.ROUNDED,
226
+ ),
227
+ name="progress",
228
+ size=progress_height, # Adjusted size
229
+ ),
230
+ Layout(name="bottom_row"), # Adjusted size
231
+ )
232
+
233
+ # Split the bottom row into two columns for metrics and model queues
234
+ layout["bottom_row"].split_row(
235
+ Layout(
236
+ Panel(
237
+ self.generate_metrics_table(),
238
+ title="Metrics",
239
+ border_style="magenta",
240
+ box=box.ROUNDED,
241
+ ),
242
+ name="metrics",
243
+ ),
244
+ Layout(
245
+ Panel(
246
+ self.generate_model_queues_table(),
247
+ title="Model Queues",
248
+ border_style="yellow",
249
+ box=box.ROUNDED,
250
+ ),
251
+ name="model_queues",
252
+ ),
253
+ )
254
+
255
+ return layout, progress, task_ids
256
+
257
+ def generate_metrics_table(self):
258
+ table = Table(show_header=True, header_style="bold magenta", box=box.SIMPLE)
259
+ table.add_column("Metric", style="cyan", no_wrap=True)
260
+ table.add_column("Value", justify="right")
261
+
262
+ for stat_name in self.statistics:
263
+ pretty_name, value = list(self._compute_statistic(stat_name).items())[0]
264
+ # breakpoint()
265
+ table.add_row(pretty_name, value)
266
+ return table
267
+
268
+ def update_progress(self, stop_event):
269
+ layout, progress, task_ids = self.generate_layout()
270
+
271
+ with Live(
272
+ layout, refresh_per_second=int(1 / self.refresh_rate), transient=True
273
+ ) as live:
274
+ while (
275
+ len(self.completed_interviews) < len(self.jobs_runner.total_interviews)
276
+ and not stop_event.is_set()
277
+ ):
278
+ completed_tasks = len(self.completed_interviews)
279
+ total_tasks = len(self.jobs_runner.total_interviews)
280
+
281
+ for model, task_id in task_ids:
282
+ completed_tasks = len(self.completed_interview_by_model[model])
283
+ progress.update(
284
+ task_id,
285
+ completed=completed_tasks,
286
+ description=f"[cyan]Conducting interviews for {model}...",
287
+ )
288
+
289
+ layout["metrics"].update(
290
+ Panel(
291
+ self.generate_metrics_table(),
292
+ title="Metrics",
293
+ border_style="magenta",
294
+ box=box.ROUNDED,
295
+ )
296
+ )
297
+ layout["model_queues"].update(
298
+ Panel(
299
+ self.generate_model_queues_table(),
300
+ title="Final Model Queues",
301
+ border_style="yellow",
302
+ box=box.ROUNDED,
303
+ )
304
+ )
305
+
306
+ time.sleep(self.refresh_rate)
307
+
308
+ # Final update
309
+ for model, task_id in task_ids:
310
+ completed_tasks = len(self.completed_interview_by_model[model])
311
+ progress.update(
312
+ task_id,
313
+ completed=completed_tasks,
314
+ description=f"[cyan]Conducting interviews for {model}...",
315
+ )
316
+
317
+ layout["metrics"].update(
318
+ Panel(
319
+ self.generate_metrics_table(),
320
+ title="Final Metrics",
321
+ border_style="magenta",
322
+ box=box.ROUNDED,
323
+ )
324
+ )
325
+ live.update(layout)
326
+ time.sleep(1) # Show final state for 1 second
327
+
328
+
329
+ if __name__ == "__main__":
330
+ import doctest
331
+
332
+ doctest.testmod(optionflags=doctest.ELLIPSIS)