edsl 0.1.39.dev1__py3-none-any.whl → 0.1.39.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edsl/Base.py +169 -116
- edsl/__init__.py +14 -6
- edsl/__version__.py +1 -1
- edsl/agents/Agent.py +358 -146
- edsl/agents/AgentList.py +211 -73
- edsl/agents/Invigilator.py +88 -36
- edsl/agents/InvigilatorBase.py +59 -70
- edsl/agents/PromptConstructor.py +117 -219
- edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
- edsl/agents/QuestionOptionProcessor.py +172 -0
- edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
- edsl/agents/__init__.py +0 -1
- edsl/agents/prompt_helpers.py +3 -3
- edsl/config.py +22 -2
- edsl/conversation/car_buying.py +2 -1
- edsl/coop/CoopFunctionsMixin.py +15 -0
- edsl/coop/ExpectedParrotKeyHandler.py +125 -0
- edsl/coop/PriceFetcher.py +1 -1
- edsl/coop/coop.py +104 -42
- edsl/coop/utils.py +14 -14
- edsl/data/Cache.py +21 -14
- edsl/data/CacheEntry.py +12 -15
- edsl/data/CacheHandler.py +33 -12
- edsl/data/__init__.py +4 -3
- edsl/data_transfer_models.py +2 -1
- edsl/enums.py +20 -0
- edsl/exceptions/__init__.py +50 -50
- edsl/exceptions/agents.py +12 -0
- edsl/exceptions/inference_services.py +5 -0
- edsl/exceptions/questions.py +24 -6
- edsl/exceptions/scenarios.py +7 -0
- edsl/inference_services/AnthropicService.py +0 -3
- edsl/inference_services/AvailableModelCacheHandler.py +184 -0
- edsl/inference_services/AvailableModelFetcher.py +209 -0
- edsl/inference_services/AwsBedrock.py +0 -2
- edsl/inference_services/AzureAI.py +0 -2
- edsl/inference_services/GoogleService.py +2 -11
- edsl/inference_services/InferenceServiceABC.py +18 -85
- edsl/inference_services/InferenceServicesCollection.py +105 -80
- edsl/inference_services/MistralAIService.py +0 -3
- edsl/inference_services/OpenAIService.py +1 -4
- edsl/inference_services/PerplexityService.py +0 -3
- edsl/inference_services/ServiceAvailability.py +135 -0
- edsl/inference_services/TestService.py +11 -8
- edsl/inference_services/data_structures.py +62 -0
- edsl/jobs/AnswerQuestionFunctionConstructor.py +188 -0
- edsl/jobs/Answers.py +1 -14
- edsl/jobs/FetchInvigilator.py +40 -0
- edsl/jobs/InterviewTaskManager.py +98 -0
- edsl/jobs/InterviewsConstructor.py +48 -0
- edsl/jobs/Jobs.py +102 -243
- edsl/jobs/JobsChecks.py +35 -10
- edsl/jobs/JobsComponentConstructor.py +189 -0
- edsl/jobs/JobsPrompts.py +5 -3
- edsl/jobs/JobsRemoteInferenceHandler.py +128 -80
- edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
- edsl/jobs/RequestTokenEstimator.py +30 -0
- edsl/jobs/buckets/BucketCollection.py +44 -3
- edsl/jobs/buckets/TokenBucket.py +53 -21
- edsl/jobs/buckets/TokenBucketAPI.py +211 -0
- edsl/jobs/buckets/TokenBucketClient.py +191 -0
- edsl/jobs/decorators.py +35 -0
- edsl/jobs/interviews/Interview.py +77 -380
- edsl/jobs/jobs_status_enums.py +9 -0
- edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
- edsl/jobs/runners/JobsRunnerAsyncio.py +4 -49
- edsl/jobs/tasks/QuestionTaskCreator.py +21 -19
- edsl/jobs/tasks/TaskHistory.py +14 -15
- edsl/jobs/tasks/task_status_enum.py +0 -2
- edsl/language_models/ComputeCost.py +63 -0
- edsl/language_models/LanguageModel.py +137 -234
- edsl/language_models/ModelList.py +11 -13
- edsl/language_models/PriceManager.py +127 -0
- edsl/language_models/RawResponseHandler.py +106 -0
- edsl/language_models/ServiceDataSources.py +0 -0
- edsl/language_models/__init__.py +0 -1
- edsl/language_models/key_management/KeyLookup.py +63 -0
- edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
- edsl/language_models/key_management/KeyLookupCollection.py +38 -0
- edsl/language_models/key_management/__init__.py +0 -0
- edsl/language_models/key_management/models.py +131 -0
- edsl/language_models/registry.py +49 -59
- edsl/language_models/repair.py +2 -2
- edsl/language_models/utilities.py +5 -4
- edsl/notebooks/Notebook.py +19 -14
- edsl/notebooks/NotebookToLaTeX.py +142 -0
- edsl/prompts/Prompt.py +29 -39
- edsl/questions/AnswerValidatorMixin.py +47 -2
- edsl/questions/ExceptionExplainer.py +77 -0
- edsl/questions/HTMLQuestion.py +103 -0
- edsl/questions/LoopProcessor.py +149 -0
- edsl/questions/QuestionBase.py +37 -192
- edsl/questions/QuestionBaseGenMixin.py +52 -48
- edsl/questions/QuestionBasePromptsMixin.py +7 -3
- edsl/questions/QuestionCheckBox.py +1 -1
- edsl/questions/QuestionExtract.py +1 -1
- edsl/questions/QuestionFreeText.py +1 -2
- edsl/questions/QuestionList.py +3 -5
- edsl/questions/QuestionMatrix.py +265 -0
- edsl/questions/QuestionMultipleChoice.py +66 -22
- edsl/questions/QuestionNumerical.py +1 -3
- edsl/questions/QuestionRank.py +6 -16
- edsl/questions/ResponseValidatorABC.py +37 -11
- edsl/questions/ResponseValidatorFactory.py +28 -0
- edsl/questions/SimpleAskMixin.py +4 -3
- edsl/questions/__init__.py +1 -0
- edsl/questions/derived/QuestionLinearScale.py +6 -3
- edsl/questions/derived/QuestionTopK.py +1 -1
- edsl/questions/descriptors.py +17 -3
- edsl/questions/question_registry.py +1 -1
- edsl/questions/templates/matrix/__init__.py +1 -0
- edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
- edsl/questions/templates/matrix/question_presentation.jinja +20 -0
- edsl/results/CSSParameterizer.py +1 -1
- edsl/results/Dataset.py +170 -7
- edsl/results/DatasetExportMixin.py +224 -302
- edsl/results/DatasetTree.py +28 -8
- edsl/results/MarkdownToDocx.py +122 -0
- edsl/results/MarkdownToPDF.py +111 -0
- edsl/results/Result.py +192 -206
- edsl/results/Results.py +120 -113
- edsl/results/ResultsExportMixin.py +2 -0
- edsl/results/Selector.py +23 -13
- edsl/results/TableDisplay.py +98 -171
- edsl/results/TextEditor.py +50 -0
- edsl/results/__init__.py +1 -1
- edsl/results/smart_objects.py +96 -0
- edsl/results/table_data_class.py +12 -0
- edsl/results/table_renderers.py +118 -0
- edsl/scenarios/ConstructDownloadLink.py +109 -0
- edsl/scenarios/DirectoryScanner.py +96 -0
- edsl/scenarios/DocumentChunker.py +102 -0
- edsl/scenarios/DocxScenario.py +16 -0
- edsl/scenarios/FileStore.py +118 -239
- edsl/scenarios/PdfExtractor.py +40 -0
- edsl/scenarios/Scenario.py +90 -193
- edsl/scenarios/ScenarioHtmlMixin.py +4 -3
- edsl/scenarios/ScenarioJoin.py +10 -6
- edsl/scenarios/ScenarioList.py +383 -240
- edsl/scenarios/ScenarioListExportMixin.py +0 -7
- edsl/scenarios/ScenarioListPdfMixin.py +15 -37
- edsl/scenarios/ScenarioSelector.py +156 -0
- edsl/scenarios/__init__.py +1 -2
- edsl/scenarios/file_methods.py +85 -0
- edsl/scenarios/handlers/__init__.py +13 -0
- edsl/scenarios/handlers/csv.py +38 -0
- edsl/scenarios/handlers/docx.py +76 -0
- edsl/scenarios/handlers/html.py +37 -0
- edsl/scenarios/handlers/json.py +111 -0
- edsl/scenarios/handlers/latex.py +5 -0
- edsl/scenarios/handlers/md.py +51 -0
- edsl/scenarios/handlers/pdf.py +68 -0
- edsl/scenarios/handlers/png.py +39 -0
- edsl/scenarios/handlers/pptx.py +105 -0
- edsl/scenarios/handlers/py.py +294 -0
- edsl/scenarios/handlers/sql.py +313 -0
- edsl/scenarios/handlers/sqlite.py +149 -0
- edsl/scenarios/handlers/txt.py +33 -0
- edsl/study/ObjectEntry.py +1 -1
- edsl/study/SnapShot.py +1 -1
- edsl/study/Study.py +5 -12
- edsl/surveys/ConstructDAG.py +92 -0
- edsl/surveys/EditSurvey.py +221 -0
- edsl/surveys/InstructionHandler.py +100 -0
- edsl/surveys/MemoryManagement.py +72 -0
- edsl/surveys/Rule.py +5 -4
- edsl/surveys/RuleCollection.py +25 -27
- edsl/surveys/RuleManager.py +172 -0
- edsl/surveys/Simulator.py +75 -0
- edsl/surveys/Survey.py +199 -771
- edsl/surveys/SurveyCSS.py +20 -8
- edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +11 -9
- edsl/surveys/SurveyToApp.py +141 -0
- edsl/surveys/__init__.py +4 -2
- edsl/surveys/descriptors.py +6 -2
- edsl/surveys/instructions/ChangeInstruction.py +1 -2
- edsl/surveys/instructions/Instruction.py +4 -13
- edsl/surveys/instructions/InstructionCollection.py +11 -6
- edsl/templates/error_reporting/interview_details.html +1 -1
- edsl/templates/error_reporting/report.html +1 -1
- edsl/tools/plotting.py +1 -1
- edsl/utilities/PrettyList.py +56 -0
- edsl/utilities/is_notebook.py +18 -0
- edsl/utilities/is_valid_variable_name.py +11 -0
- edsl/utilities/remove_edsl_version.py +24 -0
- edsl/utilities/utilities.py +35 -23
- {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev2.dist-info}/METADATA +12 -10
- edsl-0.1.39.dev2.dist-info/RECORD +352 -0
- edsl/language_models/KeyLookup.py +0 -30
- edsl/language_models/unused/ReplicateBase.py +0 -83
- edsl/results/ResultsDBMixin.py +0 -238
- edsl-0.1.39.dev1.dist-info/RECORD +0 -277
- {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev2.dist-info}/LICENSE +0 -0
- {edsl-0.1.39.dev1.dist-info → edsl-0.1.39.dev2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,239 @@
|
|
1
|
+
import re
|
2
|
+
import sys
|
3
|
+
import uuid
|
4
|
+
from abc import ABC, abstractmethod
|
5
|
+
from typing import Optional, Union, Literal, TYPE_CHECKING, List, Dict
|
6
|
+
from datetime import datetime
|
7
|
+
from dataclasses import dataclass
|
8
|
+
from edsl.exceptions.coop import CoopServerResponseError
|
9
|
+
|
10
|
+
from edsl.jobs.jobs_status_enums import JobsStatus
|
11
|
+
|
12
|
+
if TYPE_CHECKING:
|
13
|
+
from edsl.results.Results import Results
|
14
|
+
|
15
|
+
|
16
|
+
@dataclass
|
17
|
+
class LogMessage:
|
18
|
+
text: str
|
19
|
+
status: str
|
20
|
+
timestamp: datetime
|
21
|
+
status: JobsStatus
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class JobsInfo:
|
26
|
+
job_uuid: str = None
|
27
|
+
progress_bar_url: str = None
|
28
|
+
error_report_url: str = None
|
29
|
+
results_uuid: str = None
|
30
|
+
results_url: str = None
|
31
|
+
|
32
|
+
pretty_names = {
|
33
|
+
"job_uuid": "Job UUID",
|
34
|
+
"progress_bar_url": "Progress Bar URL",
|
35
|
+
"error_report_url": "Error Report URL",
|
36
|
+
"results_uuid": "Results UUID",
|
37
|
+
"results_url": "Results URL",
|
38
|
+
}
|
39
|
+
|
40
|
+
|
41
|
+
class JobLogger(ABC):
|
42
|
+
def __init__(self, verbose: bool = False):
|
43
|
+
self.verbose = verbose
|
44
|
+
self.jobs_info = JobsInfo()
|
45
|
+
|
46
|
+
def add_info(
|
47
|
+
self,
|
48
|
+
information_type: Literal[
|
49
|
+
"job_uuid",
|
50
|
+
"progress_bar_url",
|
51
|
+
"error_report_url",
|
52
|
+
"results_uuid",
|
53
|
+
"results_url",
|
54
|
+
],
|
55
|
+
value: str,
|
56
|
+
):
|
57
|
+
"""Add information to the logger
|
58
|
+
|
59
|
+
>>> j = StdOutJobLogger()
|
60
|
+
>>> j.add_info("job_uuid", "1234")
|
61
|
+
>>> j.jobs_info.job_uuid
|
62
|
+
'1234'
|
63
|
+
"""
|
64
|
+
if information_type not in self.jobs_info.__annotations__:
|
65
|
+
raise ValueError(f"Information type {information_type} not supported")
|
66
|
+
setattr(self.jobs_info, information_type, value)
|
67
|
+
|
68
|
+
@abstractmethod
|
69
|
+
def update(self, message: str, status: str = "running"):
|
70
|
+
pass
|
71
|
+
|
72
|
+
|
73
|
+
class HTMLTableJobLogger(JobLogger):
|
74
|
+
def __init__(self, verbose=True, **kwargs):
|
75
|
+
from IPython.display import display, HTML
|
76
|
+
|
77
|
+
super().__init__(verbose=verbose)
|
78
|
+
self.display_handle = display(HTML(""), display_id=True)
|
79
|
+
self.current_message = None
|
80
|
+
self.log_id = str(uuid.uuid4())
|
81
|
+
self.is_expanded = True
|
82
|
+
self.spinner_chars = ["◐", "◓", "◑", "◒"] # Rotating spinner characters
|
83
|
+
self.spinner_idx = 0
|
84
|
+
|
85
|
+
def _get_table_row(self, key: str, value: str) -> str:
|
86
|
+
"""Generate a table row with key-value pair"""
|
87
|
+
return f"""
|
88
|
+
<tr>
|
89
|
+
<td style="padding: 8px; border: 1px solid #ddd; font-weight: bold;">{key}</td>
|
90
|
+
<td style="padding: 8px; border: 1px solid #ddd;">{value if value else 'None'}</td>
|
91
|
+
</tr>
|
92
|
+
"""
|
93
|
+
|
94
|
+
def _linkify(self, text: str) -> str:
|
95
|
+
"""Convert URLs in text to clickable links"""
|
96
|
+
url_pattern = r'(https?://[^\s<>"]+|www\.[^\s<>"]+)'
|
97
|
+
return re.sub(
|
98
|
+
url_pattern,
|
99
|
+
r'<a href="\1" target="_blank" style="color: #3b82f6; text-decoration: underline;">\1</a>',
|
100
|
+
text,
|
101
|
+
)
|
102
|
+
|
103
|
+
def _get_spinner(self, status: JobsStatus) -> str:
|
104
|
+
"""Get the current spinner frame if status is running"""
|
105
|
+
if status == JobsStatus.RUNNING:
|
106
|
+
spinner = self.spinner_chars[self.spinner_idx]
|
107
|
+
self.spinner_idx = (self.spinner_idx + 1) % len(self.spinner_chars)
|
108
|
+
return f'<span style="margin-right: 8px;">{spinner}</span>'
|
109
|
+
elif status == JobsStatus.COMPLETED:
|
110
|
+
return '<span style="margin-right: 8px; color: #22c55e;">✓</span>'
|
111
|
+
elif status == JobsStatus.FAILED:
|
112
|
+
return '<span style="margin-right: 8px; color: #ef4444;">✗</span>'
|
113
|
+
return ""
|
114
|
+
|
115
|
+
def _get_html(self, status: JobsStatus = JobsStatus.RUNNING) -> str:
|
116
|
+
"""Generate the complete HTML display"""
|
117
|
+
# Generate table rows for each JobsInfo field
|
118
|
+
info_rows = ""
|
119
|
+
for field, _ in self.jobs_info.__annotations__.items():
|
120
|
+
if field != "pretty_names": # Skip the pretty_names dictionary
|
121
|
+
value = getattr(self.jobs_info, field)
|
122
|
+
value = self._linkify(str(value)) if value else None
|
123
|
+
pretty_name = self.jobs_info.pretty_names.get(
|
124
|
+
field, field.replace("_", " ").title()
|
125
|
+
)
|
126
|
+
info_rows += self._get_table_row(pretty_name, value)
|
127
|
+
|
128
|
+
# Add current message section with spinner
|
129
|
+
message_html = ""
|
130
|
+
if self.current_message:
|
131
|
+
spinner = self._get_spinner(status)
|
132
|
+
message_html = f"""
|
133
|
+
<div style="margin-top: 10px; padding: 8px; background-color: #f8f9fa; border: 1px solid #ddd; border-radius: 4px;">
|
134
|
+
{spinner}<strong>Current Status:</strong> {self._linkify(self.current_message)}
|
135
|
+
</div>
|
136
|
+
"""
|
137
|
+
|
138
|
+
display_style = "block" if self.is_expanded else "none"
|
139
|
+
arrow = "▼" if self.is_expanded else "▶"
|
140
|
+
|
141
|
+
return f"""
|
142
|
+
<div style="font-family: system-ui; max-width: 800px; margin: 10px 0;">
|
143
|
+
<div onclick="document.getElementById('content-{self.log_id}').style.display = document.getElementById('content-{self.log_id}').style.display === 'none' ? 'block' : 'none';
|
144
|
+
document.getElementById('arrow-{self.log_id}').innerHTML = document.getElementById('content-{self.log_id}').style.display === 'none' ? '▶' : '▼';"
|
145
|
+
style="padding: 10px; background: #f5f5f5; border: 1px solid #ddd; border-radius: 4px; cursor: pointer;">
|
146
|
+
<span id="arrow-{self.log_id}">{arrow}</span> Job Status ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
|
147
|
+
</div>
|
148
|
+
<div id="content-{self.log_id}" style="display: {display_style};">
|
149
|
+
<table style="width: 100%; border-collapse: collapse; background: white; border: 1px solid #ddd;">
|
150
|
+
{info_rows}
|
151
|
+
</table>
|
152
|
+
{message_html}
|
153
|
+
</div>
|
154
|
+
</div>
|
155
|
+
"""
|
156
|
+
|
157
|
+
def update(self, message: str, status: JobsStatus = JobsStatus.RUNNING):
|
158
|
+
"""Update the display with new message and current JobsInfo state"""
|
159
|
+
from IPython.display import HTML
|
160
|
+
|
161
|
+
self.current_message = message
|
162
|
+
if self.verbose:
|
163
|
+
self.display_handle.update(HTML(self._get_html(status)))
|
164
|
+
else:
|
165
|
+
return None
|
166
|
+
|
167
|
+
|
168
|
+
class StdOutJobLogger(JobLogger):
|
169
|
+
def __init__(self, verbose=True, **kwargs):
|
170
|
+
super().__init__(verbose=verbose) # Properly call parent's __init__
|
171
|
+
self.messages: List[LogMessage] = []
|
172
|
+
|
173
|
+
def update(self, message: str, status: JobsStatus = JobsStatus.RUNNING):
|
174
|
+
log_msg = LogMessage(text=message, status=status, timestamp=datetime.now())
|
175
|
+
self.messages.append(log_msg)
|
176
|
+
if self.verbose:
|
177
|
+
sys.stdout.write(f"│ {message}\n")
|
178
|
+
sys.stdout.flush()
|
179
|
+
else:
|
180
|
+
return None
|
181
|
+
|
182
|
+
|
183
|
+
class JupyterJobLogger(JobLogger):
|
184
|
+
def __init__(self, verbose=True, **kwargs):
|
185
|
+
from IPython.display import display, HTML
|
186
|
+
|
187
|
+
super().__init__(verbose=verbose)
|
188
|
+
self.messages = []
|
189
|
+
self.log_id = str(uuid.uuid4())
|
190
|
+
self.is_expanded = True
|
191
|
+
self.display_handle = display(HTML(""), display_id=True)
|
192
|
+
|
193
|
+
def _linkify(self, text):
|
194
|
+
url_pattern = r'(https?://[^\s<>"]+|www\.[^\s<>"]+)'
|
195
|
+
return re.sub(
|
196
|
+
url_pattern,
|
197
|
+
r'<a href="\1" target="_blank" style="color: #3b82f6; text-decoration: underline;">\1</a>',
|
198
|
+
text,
|
199
|
+
)
|
200
|
+
|
201
|
+
def _get_html(self):
|
202
|
+
messages_html = "\n".join(
|
203
|
+
[
|
204
|
+
f'<div style="border-left: 3px solid {msg["color"]}; padding: 5px 10px; margin: 5px 0;">{self._linkify(msg["text"])}</div>'
|
205
|
+
for msg in self.messages
|
206
|
+
]
|
207
|
+
)
|
208
|
+
|
209
|
+
display_style = "block" if self.is_expanded else "none"
|
210
|
+
arrow = "▼" if self.is_expanded else "▶"
|
211
|
+
|
212
|
+
return f"""
|
213
|
+
<div style="border: 1px solid #ccc; margin: 10px 0; max-width: 800px;">
|
214
|
+
<div onclick="document.getElementById('content-{self.log_id}').style.display = document.getElementById('content-{self.log_id}').style.display === 'none' ? 'block' : 'none';
|
215
|
+
document.getElementById('arrow-{self.log_id}').innerHTML = document.getElementById('content-{self.log_id}').style.display === 'none' ? '▶' : '▼';"
|
216
|
+
style="padding: 10px; background: #f5f5f5; cursor: pointer;">
|
217
|
+
<span id="arrow-{self.log_id}">{arrow}</span> Remote Job Log ({datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
|
218
|
+
</div>
|
219
|
+
<div id="content-{self.log_id}" style="padding: 10px; display: {display_style};">
|
220
|
+
{messages_html}
|
221
|
+
</div>
|
222
|
+
</div>
|
223
|
+
"""
|
224
|
+
|
225
|
+
def update(self, message, status: JobsStatus = JobsStatus.RUNNING):
|
226
|
+
from IPython.display import HTML
|
227
|
+
|
228
|
+
colors = {"running": "#3b82f6", "completed": "#22c55e", "failed": "#ef4444"}
|
229
|
+
self.messages.append({"text": message, "color": colors.get(status, "#666")})
|
230
|
+
if self.verbose:
|
231
|
+
self.display_handle.update(HTML(self._get_html()))
|
232
|
+
else:
|
233
|
+
return None
|
234
|
+
|
235
|
+
|
236
|
+
if __name__ == "__main__":
|
237
|
+
import doctest
|
238
|
+
|
239
|
+
doctest.testmod()
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from edsl.jobs.FetchInvigilator import FetchInvigilator
|
2
|
+
|
3
|
+
|
4
|
+
class RequestTokenEstimator:
|
5
|
+
"""Estimate the number of tokens that will be required to run the focal task."""
|
6
|
+
|
7
|
+
def __init__(self, interview):
|
8
|
+
self.interview = interview
|
9
|
+
|
10
|
+
def __call__(self, question) -> float:
|
11
|
+
"""Estimate the number of tokens that will be required to run the focal task."""
|
12
|
+
from edsl.scenarios.FileStore import FileStore
|
13
|
+
|
14
|
+
invigilator = FetchInvigilator(self.interview)(question=question)
|
15
|
+
|
16
|
+
# TODO: There should be a way to get a more accurate estimate.
|
17
|
+
combined_text = ""
|
18
|
+
file_tokens = 0
|
19
|
+
for prompt in invigilator.get_prompts().values():
|
20
|
+
if hasattr(prompt, "text"):
|
21
|
+
combined_text += prompt.text
|
22
|
+
elif isinstance(prompt, str):
|
23
|
+
combined_text += prompt
|
24
|
+
elif isinstance(prompt, list):
|
25
|
+
for file in prompt:
|
26
|
+
if isinstance(file, FileStore):
|
27
|
+
file_tokens += file.size * 0.25
|
28
|
+
else:
|
29
|
+
raise ValueError(f"Prompt is of type {type(prompt)}")
|
30
|
+
return len(combined_text) / 4.0 + file_tokens
|
@@ -1,8 +1,15 @@
|
|
1
|
+
from typing import Optional
|
1
2
|
from collections import UserDict
|
2
3
|
from edsl.jobs.buckets.TokenBucket import TokenBucket
|
3
4
|
from edsl.jobs.buckets.ModelBuckets import ModelBuckets
|
4
5
|
|
6
|
+
# from functools import wraps
|
7
|
+
from threading import RLock
|
5
8
|
|
9
|
+
from edsl.jobs.decorators import synchronized_class
|
10
|
+
|
11
|
+
|
12
|
+
@synchronized_class
|
6
13
|
class BucketCollection(UserDict):
|
7
14
|
"""A Jobs object will have a whole collection of model buckets, as multiple models could be used.
|
8
15
|
|
@@ -10,11 +17,43 @@ class BucketCollection(UserDict):
|
|
10
17
|
Models themselves are hashable, so this works.
|
11
18
|
"""
|
12
19
|
|
13
|
-
def __init__(self, infinity_buckets=False):
|
20
|
+
def __init__(self, infinity_buckets: bool = False):
|
21
|
+
"""Create a new BucketCollection.
|
22
|
+
An infinity bucket is a bucket that never runs out of tokens or requests.
|
23
|
+
"""
|
14
24
|
super().__init__()
|
15
25
|
self.infinity_buckets = infinity_buckets
|
16
26
|
self.models_to_services = {}
|
17
27
|
self.services_to_buckets = {}
|
28
|
+
self._lock = RLock()
|
29
|
+
|
30
|
+
from edsl.config import CONFIG
|
31
|
+
import os
|
32
|
+
|
33
|
+
url = os.environ.get("EDSL_REMOTE_TOKEN_BUCKET_URL", None)
|
34
|
+
|
35
|
+
if url == "None" or url is None:
|
36
|
+
self.remote_url = None
|
37
|
+
# print(f"Using remote token bucket URL: {url}")
|
38
|
+
else:
|
39
|
+
self.remote_url = url
|
40
|
+
|
41
|
+
@classmethod
|
42
|
+
def from_models(
|
43
|
+
cls, models_list: list, infinity_buckets: bool = False
|
44
|
+
) -> "BucketCollection":
|
45
|
+
"""Create a BucketCollection from a list of models."""
|
46
|
+
bucket_collection = cls(infinity_buckets=infinity_buckets)
|
47
|
+
for model in models_list:
|
48
|
+
bucket_collection.add_model(model)
|
49
|
+
return bucket_collection
|
50
|
+
|
51
|
+
def get_tokens(
|
52
|
+
self, model: "LanguageModel", bucket_type: str, num_tokens: int
|
53
|
+
) -> int:
|
54
|
+
"""Get the number of tokens remaining in the bucket."""
|
55
|
+
relevant_bucket = getattr(self[model], bucket_type)
|
56
|
+
return relevant_bucket.get_tokens(num_tokens)
|
18
57
|
|
19
58
|
def __repr__(self):
|
20
59
|
return f"BucketCollection({self.data})"
|
@@ -26,8 +65,8 @@ class BucketCollection(UserDict):
|
|
26
65
|
|
27
66
|
# compute the TPS and RPS from the model
|
28
67
|
if not self.infinity_buckets:
|
29
|
-
TPS = model.
|
30
|
-
RPS = model.
|
68
|
+
TPS = model.tpm / 60.0
|
69
|
+
RPS = model.rpm / 60.0
|
31
70
|
else:
|
32
71
|
TPS = float("inf")
|
33
72
|
RPS = float("inf")
|
@@ -40,12 +79,14 @@ class BucketCollection(UserDict):
|
|
40
79
|
bucket_type="requests",
|
41
80
|
capacity=RPS,
|
42
81
|
refill_rate=RPS,
|
82
|
+
remote_url=self.remote_url,
|
43
83
|
)
|
44
84
|
tokens_bucket = TokenBucket(
|
45
85
|
bucket_name=service,
|
46
86
|
bucket_type="tokens",
|
47
87
|
capacity=TPS,
|
48
88
|
refill_rate=TPS,
|
89
|
+
remote_url=self.remote_url,
|
49
90
|
)
|
50
91
|
self.services_to_buckets[service] = ModelBuckets(
|
51
92
|
requests_bucket, tokens_bucket
|
edsl/jobs/buckets/TokenBucket.py
CHANGED
@@ -1,10 +1,55 @@
|
|
1
1
|
from typing import Union, List, Any, Optional
|
2
2
|
import asyncio
|
3
3
|
import time
|
4
|
+
from threading import RLock
|
5
|
+
from edsl.jobs.decorators import synchronized_class
|
4
6
|
|
7
|
+
from typing import Union, List, Any, Optional
|
8
|
+
import asyncio
|
9
|
+
import time
|
10
|
+
from threading import RLock
|
11
|
+
from edsl.jobs.decorators import synchronized_class
|
5
12
|
|
13
|
+
|
14
|
+
@synchronized_class
|
6
15
|
class TokenBucket:
|
7
|
-
"""This is a token bucket used to respect rate limits to services.
|
16
|
+
"""This is a token bucket used to respect rate limits to services.
|
17
|
+
It can operate either locally or remotely via a REST API based on initialization parameters.
|
18
|
+
"""
|
19
|
+
|
20
|
+
def __new__(
|
21
|
+
cls,
|
22
|
+
*,
|
23
|
+
bucket_name: str,
|
24
|
+
bucket_type: str,
|
25
|
+
capacity: Union[int, float],
|
26
|
+
refill_rate: Union[int, float],
|
27
|
+
remote_url: Optional[str] = None,
|
28
|
+
):
|
29
|
+
"""Factory method to create either a local or remote token bucket.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
bucket_name: Name of the bucket
|
33
|
+
bucket_type: Type of the bucket
|
34
|
+
capacity: Maximum number of tokens
|
35
|
+
refill_rate: Rate at which tokens are refilled
|
36
|
+
remote_url: If provided, creates a remote token bucket client
|
37
|
+
"""
|
38
|
+
if remote_url is not None:
|
39
|
+
# Import here to avoid circular imports
|
40
|
+
from edsl.jobs.buckets.TokenBucketClient import TokenBucketClient
|
41
|
+
|
42
|
+
return TokenBucketClient(
|
43
|
+
bucket_name=bucket_name,
|
44
|
+
bucket_type=bucket_type,
|
45
|
+
capacity=capacity,
|
46
|
+
refill_rate=refill_rate,
|
47
|
+
api_base_url=remote_url,
|
48
|
+
)
|
49
|
+
|
50
|
+
# Create a local token bucket
|
51
|
+
instance = super(TokenBucket, cls).__new__(cls)
|
52
|
+
return instance
|
8
53
|
|
9
54
|
def __init__(
|
10
55
|
self,
|
@@ -13,11 +58,17 @@ class TokenBucket:
|
|
13
58
|
bucket_type: str,
|
14
59
|
capacity: Union[int, float],
|
15
60
|
refill_rate: Union[int, float],
|
61
|
+
remote_url: Optional[str] = None,
|
16
62
|
):
|
63
|
+
# Skip initialization if this is a remote bucket
|
64
|
+
if remote_url is not None:
|
65
|
+
return
|
66
|
+
|
17
67
|
self.bucket_name = bucket_name
|
18
68
|
self.bucket_type = bucket_type
|
19
|
-
self.capacity = capacity
|
69
|
+
self.capacity = capacity
|
20
70
|
self.added_tokens = 0
|
71
|
+
self._lock = RLock()
|
21
72
|
|
22
73
|
self.target_rate = (
|
23
74
|
capacity * 60
|
@@ -225,25 +276,6 @@ class TokenBucket:
|
|
225
276
|
|
226
277
|
return (self.num_released / elapsed_time) * 60
|
227
278
|
|
228
|
-
# # Filter log entries within the time window
|
229
|
-
# relevant_log = [(t, tokens) for t, tokens in self.log if t >= start_time]
|
230
|
-
|
231
|
-
# if len(relevant_log) < 2:
|
232
|
-
# return 0 # Not enough data points to calculate throughput
|
233
|
-
|
234
|
-
# # Calculate total tokens used
|
235
|
-
# initial_tokens = relevant_log[0][1]
|
236
|
-
# final_tokens = relevant_log[-1][1]
|
237
|
-
# tokens_used = self.num_released - (final_tokens - initial_tokens)
|
238
|
-
|
239
|
-
# # Calculate actual time elapsed
|
240
|
-
# actual_time_elapsed = relevant_log[-1][0] - relevant_log[0][0]
|
241
|
-
|
242
|
-
# # Calculate throughput in tokens per minute
|
243
|
-
# throughput = (tokens_used / actual_time_elapsed) * 60
|
244
|
-
|
245
|
-
# return throughput
|
246
|
-
|
247
279
|
|
248
280
|
if __name__ == "__main__":
|
249
281
|
import doctest
|
@@ -0,0 +1,211 @@
|
|
1
|
+
from fastapi import FastAPI, HTTPException
|
2
|
+
from pydantic import BaseModel
|
3
|
+
from typing import Union, Dict
|
4
|
+
from typing import Union, List, Any, Optional
|
5
|
+
from threading import RLock
|
6
|
+
from edsl.jobs.buckets.TokenBucket import TokenBucket # Original implementation
|
7
|
+
|
8
|
+
|
9
|
+
def safe_float_for_json(value: float) -> Union[float, str]:
|
10
|
+
"""Convert float('inf') to 'infinity' for JSON serialization.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
value: The float value to convert
|
14
|
+
|
15
|
+
Returns:
|
16
|
+
Either the original float or the string 'infinity' if the value is infinite
|
17
|
+
"""
|
18
|
+
if value == float("inf"):
|
19
|
+
return "infinity"
|
20
|
+
return value
|
21
|
+
|
22
|
+
|
23
|
+
app = FastAPI()
|
24
|
+
|
25
|
+
# In-memory storage for TokenBucket instances
|
26
|
+
buckets: Dict[str, TokenBucket] = {}
|
27
|
+
|
28
|
+
|
29
|
+
class TokenBucketCreate(BaseModel):
|
30
|
+
bucket_name: str
|
31
|
+
bucket_type: str
|
32
|
+
capacity: Union[int, float]
|
33
|
+
refill_rate: Union[int, float]
|
34
|
+
|
35
|
+
|
36
|
+
@app.get("/buckets")
|
37
|
+
async def list_buckets(
|
38
|
+
bucket_type: Optional[str] = None,
|
39
|
+
bucket_name: Optional[str] = None,
|
40
|
+
include_logs: bool = False,
|
41
|
+
):
|
42
|
+
"""List all buckets and their current status.
|
43
|
+
|
44
|
+
Args:
|
45
|
+
bucket_type: Optional filter by bucket type
|
46
|
+
bucket_name: Optional filter by bucket name
|
47
|
+
include_logs: Whether to include the full logs in the response
|
48
|
+
"""
|
49
|
+
result = {}
|
50
|
+
|
51
|
+
for bucket_id, bucket in buckets.items():
|
52
|
+
# Apply filters if specified
|
53
|
+
if bucket_type and bucket.bucket_type != bucket_type:
|
54
|
+
continue
|
55
|
+
if bucket_name and bucket.bucket_name != bucket_name:
|
56
|
+
continue
|
57
|
+
|
58
|
+
# Get basic bucket info
|
59
|
+
bucket_info = {
|
60
|
+
"bucket_name": bucket.bucket_name,
|
61
|
+
"bucket_type": bucket.bucket_type,
|
62
|
+
"tokens": bucket.tokens,
|
63
|
+
"capacity": bucket.capacity,
|
64
|
+
"refill_rate": bucket.refill_rate,
|
65
|
+
"turbo_mode": bucket.turbo_mode,
|
66
|
+
"num_requests": bucket.num_requests,
|
67
|
+
"num_released": bucket.num_released,
|
68
|
+
"tokens_returned": bucket.tokens_returned,
|
69
|
+
}
|
70
|
+
for k, v in bucket_info.items():
|
71
|
+
if isinstance(v, float):
|
72
|
+
bucket_info[k] = safe_float_for_json(v)
|
73
|
+
|
74
|
+
# Only include logs if requested
|
75
|
+
if include_logs:
|
76
|
+
bucket_info["log"] = bucket.log
|
77
|
+
|
78
|
+
result[bucket_id] = bucket_info
|
79
|
+
|
80
|
+
return result
|
81
|
+
|
82
|
+
|
83
|
+
@app.post("/bucket/{bucket_id}/add_tokens")
|
84
|
+
async def add_tokens(bucket_id: str, amount: float):
|
85
|
+
"""Add tokens to an existing bucket."""
|
86
|
+
if bucket_id not in buckets:
|
87
|
+
raise HTTPException(status_code=404, detail="Bucket not found")
|
88
|
+
|
89
|
+
if not isinstance(amount, (int, float)) or amount != amount: # Check for NaN
|
90
|
+
raise HTTPException(status_code=400, detail="Invalid amount specified")
|
91
|
+
|
92
|
+
if amount == float("inf") or amount == float("-inf"):
|
93
|
+
raise HTTPException(status_code=400, detail="Amount cannot be infinite")
|
94
|
+
|
95
|
+
bucket = buckets[bucket_id]
|
96
|
+
bucket.add_tokens(amount)
|
97
|
+
|
98
|
+
# Ensure we return a JSON-serializable float
|
99
|
+
current_tokens = float(bucket.tokens)
|
100
|
+
if not -1e308 <= current_tokens <= 1e308: # Check if within JSON float bounds
|
101
|
+
current_tokens = 0.0 # or some other reasonable default
|
102
|
+
|
103
|
+
return {"status": "success", "current_tokens": safe_float_for_json(current_tokens)}
|
104
|
+
|
105
|
+
|
106
|
+
# @app.post("/bucket")
|
107
|
+
# async def create_bucket(bucket: TokenBucketCreate):
|
108
|
+
# bucket_id = f"{bucket.bucket_name}_{bucket.bucket_type}"
|
109
|
+
# if bucket_id in buckets:
|
110
|
+
# raise HTTPException(status_code=400, detail="Bucket already exists")
|
111
|
+
|
112
|
+
# # Create an actual TokenBucket instance
|
113
|
+
# buckets[bucket_id] = TokenBucket(
|
114
|
+
# bucket_name=bucket.bucket_name,
|
115
|
+
# bucket_type=bucket.bucket_type,
|
116
|
+
# capacity=bucket.capacity,
|
117
|
+
# refill_rate=bucket.refill_rate,
|
118
|
+
# )
|
119
|
+
# return {"status": "created"}
|
120
|
+
|
121
|
+
|
122
|
+
@app.post("/bucket")
|
123
|
+
async def create_bucket(bucket: TokenBucketCreate):
|
124
|
+
if (
|
125
|
+
not isinstance(bucket.capacity, (int, float))
|
126
|
+
or bucket.capacity != bucket.capacity
|
127
|
+
): # Check for NaN
|
128
|
+
raise HTTPException(status_code=400, detail="Invalid capacity value")
|
129
|
+
if (
|
130
|
+
not isinstance(bucket.refill_rate, (int, float))
|
131
|
+
or bucket.refill_rate != bucket.refill_rate
|
132
|
+
): # Check for NaN
|
133
|
+
raise HTTPException(status_code=400, detail="Invalid refill rate value")
|
134
|
+
if bucket.capacity == float("inf") or bucket.refill_rate == float("inf"):
|
135
|
+
raise HTTPException(status_code=400, detail="Values cannot be infinite")
|
136
|
+
bucket_id = f"{bucket.bucket_name}_{bucket.bucket_type}"
|
137
|
+
if bucket_id in buckets:
|
138
|
+
# Instead of error, return success with "existing" status
|
139
|
+
return {
|
140
|
+
"status": "existing",
|
141
|
+
"bucket": {
|
142
|
+
"capacity": safe_float_for_json(buckets[bucket_id].capacity),
|
143
|
+
"refill_rate": safe_float_for_json(buckets[bucket_id].refill_rate),
|
144
|
+
},
|
145
|
+
}
|
146
|
+
|
147
|
+
# Create a new bucket
|
148
|
+
buckets[bucket_id] = TokenBucket(
|
149
|
+
bucket_name=bucket.bucket_name,
|
150
|
+
bucket_type=bucket.bucket_type,
|
151
|
+
capacity=bucket.capacity,
|
152
|
+
refill_rate=bucket.refill_rate,
|
153
|
+
)
|
154
|
+
return {"status": "created"}
|
155
|
+
|
156
|
+
|
157
|
+
@app.post("/bucket/{bucket_id}/get_tokens")
|
158
|
+
async def get_tokens(bucket_id: str, amount: float, cheat_bucket_capacity: bool = True):
|
159
|
+
if bucket_id not in buckets:
|
160
|
+
raise HTTPException(status_code=404, detail="Bucket not found")
|
161
|
+
|
162
|
+
bucket = buckets[bucket_id]
|
163
|
+
await bucket.get_tokens(amount, cheat_bucket_capacity)
|
164
|
+
return {"status": "success"}
|
165
|
+
|
166
|
+
|
167
|
+
@app.post("/bucket/{bucket_id}/turbo_mode/{state}")
|
168
|
+
async def set_turbo_mode(bucket_id: str, state: bool):
|
169
|
+
if bucket_id not in buckets:
|
170
|
+
raise HTTPException(status_code=404, detail="Bucket not found")
|
171
|
+
|
172
|
+
bucket = buckets[bucket_id]
|
173
|
+
if state:
|
174
|
+
bucket.turbo_mode_on()
|
175
|
+
else:
|
176
|
+
bucket.turbo_mode_off()
|
177
|
+
return {"status": "success"}
|
178
|
+
|
179
|
+
|
180
|
+
@app.get("/bucket/{bucket_id}/status")
|
181
|
+
async def get_bucket_status(bucket_id: str):
|
182
|
+
if bucket_id not in buckets:
|
183
|
+
raise HTTPException(status_code=404, detail="Bucket not found")
|
184
|
+
|
185
|
+
bucket = buckets[bucket_id]
|
186
|
+
status = {
|
187
|
+
"tokens": bucket.tokens,
|
188
|
+
"capacity": bucket.capacity,
|
189
|
+
"refill_rate": bucket.refill_rate,
|
190
|
+
"turbo_mode": bucket.turbo_mode,
|
191
|
+
"num_requests": bucket.num_requests,
|
192
|
+
"num_released": bucket.num_released,
|
193
|
+
"tokens_returned": bucket.tokens_returned,
|
194
|
+
"log": bucket.log,
|
195
|
+
}
|
196
|
+
for k, v in status.items():
|
197
|
+
if isinstance(v, float):
|
198
|
+
status[k] = safe_float_for_json(v)
|
199
|
+
|
200
|
+
for index, entry in enumerate(status["log"]):
|
201
|
+
ts, value = entry
|
202
|
+
status["log"][index] = (ts, safe_float_for_json(value))
|
203
|
+
|
204
|
+
# print(status)
|
205
|
+
return status
|
206
|
+
|
207
|
+
|
208
|
+
if __name__ == "__main__":
|
209
|
+
import uvicorn
|
210
|
+
|
211
|
+
uvicorn.run(app, host="0.0.0.0", port=8001)
|