edsl 0.1.38.dev4__py3-none-any.whl → 0.1.39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. edsl/Base.py +197 -116
  2. edsl/__init__.py +15 -7
  3. edsl/__version__.py +1 -1
  4. edsl/agents/Agent.py +351 -147
  5. edsl/agents/AgentList.py +211 -73
  6. edsl/agents/Invigilator.py +101 -50
  7. edsl/agents/InvigilatorBase.py +62 -70
  8. edsl/agents/PromptConstructor.py +143 -225
  9. edsl/agents/QuestionInstructionPromptBuilder.py +128 -0
  10. edsl/agents/QuestionTemplateReplacementsBuilder.py +137 -0
  11. edsl/agents/__init__.py +0 -1
  12. edsl/agents/prompt_helpers.py +3 -3
  13. edsl/agents/question_option_processor.py +172 -0
  14. edsl/auto/AutoStudy.py +18 -5
  15. edsl/auto/StageBase.py +53 -40
  16. edsl/auto/StageQuestions.py +2 -1
  17. edsl/auto/utilities.py +0 -6
  18. edsl/config.py +22 -2
  19. edsl/conversation/car_buying.py +2 -1
  20. edsl/coop/CoopFunctionsMixin.py +15 -0
  21. edsl/coop/ExpectedParrotKeyHandler.py +125 -0
  22. edsl/coop/PriceFetcher.py +1 -1
  23. edsl/coop/coop.py +125 -47
  24. edsl/coop/utils.py +14 -14
  25. edsl/data/Cache.py +45 -27
  26. edsl/data/CacheEntry.py +12 -15
  27. edsl/data/CacheHandler.py +31 -12
  28. edsl/data/RemoteCacheSync.py +154 -46
  29. edsl/data/__init__.py +4 -3
  30. edsl/data_transfer_models.py +2 -1
  31. edsl/enums.py +27 -0
  32. edsl/exceptions/__init__.py +50 -50
  33. edsl/exceptions/agents.py +12 -0
  34. edsl/exceptions/inference_services.py +5 -0
  35. edsl/exceptions/questions.py +24 -6
  36. edsl/exceptions/scenarios.py +7 -0
  37. edsl/inference_services/AnthropicService.py +38 -19
  38. edsl/inference_services/AvailableModelCacheHandler.py +184 -0
  39. edsl/inference_services/AvailableModelFetcher.py +215 -0
  40. edsl/inference_services/AwsBedrock.py +0 -2
  41. edsl/inference_services/AzureAI.py +0 -2
  42. edsl/inference_services/GoogleService.py +7 -12
  43. edsl/inference_services/InferenceServiceABC.py +18 -85
  44. edsl/inference_services/InferenceServicesCollection.py +120 -79
  45. edsl/inference_services/MistralAIService.py +0 -3
  46. edsl/inference_services/OpenAIService.py +47 -35
  47. edsl/inference_services/PerplexityService.py +0 -3
  48. edsl/inference_services/ServiceAvailability.py +135 -0
  49. edsl/inference_services/TestService.py +11 -10
  50. edsl/inference_services/TogetherAIService.py +5 -3
  51. edsl/inference_services/data_structures.py +134 -0
  52. edsl/jobs/AnswerQuestionFunctionConstructor.py +223 -0
  53. edsl/jobs/Answers.py +1 -14
  54. edsl/jobs/FetchInvigilator.py +47 -0
  55. edsl/jobs/InterviewTaskManager.py +98 -0
  56. edsl/jobs/InterviewsConstructor.py +50 -0
  57. edsl/jobs/Jobs.py +356 -431
  58. edsl/jobs/JobsChecks.py +35 -10
  59. edsl/jobs/JobsComponentConstructor.py +189 -0
  60. edsl/jobs/JobsPrompts.py +6 -4
  61. edsl/jobs/JobsRemoteInferenceHandler.py +205 -133
  62. edsl/jobs/JobsRemoteInferenceLogger.py +239 -0
  63. edsl/jobs/RequestTokenEstimator.py +30 -0
  64. edsl/jobs/async_interview_runner.py +138 -0
  65. edsl/jobs/buckets/BucketCollection.py +44 -3
  66. edsl/jobs/buckets/TokenBucket.py +53 -21
  67. edsl/jobs/buckets/TokenBucketAPI.py +211 -0
  68. edsl/jobs/buckets/TokenBucketClient.py +191 -0
  69. edsl/jobs/check_survey_scenario_compatibility.py +85 -0
  70. edsl/jobs/data_structures.py +120 -0
  71. edsl/jobs/decorators.py +35 -0
  72. edsl/jobs/interviews/Interview.py +143 -408
  73. edsl/jobs/jobs_status_enums.py +9 -0
  74. edsl/jobs/loggers/HTMLTableJobLogger.py +304 -0
  75. edsl/jobs/results_exceptions_handler.py +98 -0
  76. edsl/jobs/runners/JobsRunnerAsyncio.py +88 -403
  77. edsl/jobs/runners/JobsRunnerStatus.py +133 -165
  78. edsl/jobs/tasks/QuestionTaskCreator.py +21 -19
  79. edsl/jobs/tasks/TaskHistory.py +38 -18
  80. edsl/jobs/tasks/task_status_enum.py +0 -2
  81. edsl/language_models/ComputeCost.py +63 -0
  82. edsl/language_models/LanguageModel.py +194 -236
  83. edsl/language_models/ModelList.py +28 -19
  84. edsl/language_models/PriceManager.py +127 -0
  85. edsl/language_models/RawResponseHandler.py +106 -0
  86. edsl/language_models/ServiceDataSources.py +0 -0
  87. edsl/language_models/__init__.py +1 -2
  88. edsl/language_models/key_management/KeyLookup.py +63 -0
  89. edsl/language_models/key_management/KeyLookupBuilder.py +273 -0
  90. edsl/language_models/key_management/KeyLookupCollection.py +38 -0
  91. edsl/language_models/key_management/__init__.py +0 -0
  92. edsl/language_models/key_management/models.py +131 -0
  93. edsl/language_models/model.py +256 -0
  94. edsl/language_models/repair.py +2 -2
  95. edsl/language_models/utilities.py +5 -4
  96. edsl/notebooks/Notebook.py +19 -14
  97. edsl/notebooks/NotebookToLaTeX.py +142 -0
  98. edsl/prompts/Prompt.py +29 -39
  99. edsl/questions/ExceptionExplainer.py +77 -0
  100. edsl/questions/HTMLQuestion.py +103 -0
  101. edsl/questions/QuestionBase.py +68 -214
  102. edsl/questions/QuestionBasePromptsMixin.py +7 -3
  103. edsl/questions/QuestionBudget.py +1 -1
  104. edsl/questions/QuestionCheckBox.py +3 -3
  105. edsl/questions/QuestionExtract.py +5 -7
  106. edsl/questions/QuestionFreeText.py +2 -3
  107. edsl/questions/QuestionList.py +10 -18
  108. edsl/questions/QuestionMatrix.py +265 -0
  109. edsl/questions/QuestionMultipleChoice.py +67 -23
  110. edsl/questions/QuestionNumerical.py +2 -4
  111. edsl/questions/QuestionRank.py +7 -17
  112. edsl/questions/SimpleAskMixin.py +4 -3
  113. edsl/questions/__init__.py +2 -1
  114. edsl/questions/{AnswerValidatorMixin.py → answer_validator_mixin.py} +47 -2
  115. edsl/questions/data_structures.py +20 -0
  116. edsl/questions/derived/QuestionLinearScale.py +6 -3
  117. edsl/questions/derived/QuestionTopK.py +1 -1
  118. edsl/questions/descriptors.py +17 -3
  119. edsl/questions/loop_processor.py +149 -0
  120. edsl/questions/{QuestionBaseGenMixin.py → question_base_gen_mixin.py} +57 -50
  121. edsl/questions/question_registry.py +1 -1
  122. edsl/questions/{ResponseValidatorABC.py → response_validator_abc.py} +40 -26
  123. edsl/questions/response_validator_factory.py +34 -0
  124. edsl/questions/templates/matrix/__init__.py +1 -0
  125. edsl/questions/templates/matrix/answering_instructions.jinja +5 -0
  126. edsl/questions/templates/matrix/question_presentation.jinja +20 -0
  127. edsl/results/CSSParameterizer.py +1 -1
  128. edsl/results/Dataset.py +170 -7
  129. edsl/results/DatasetExportMixin.py +168 -305
  130. edsl/results/DatasetTree.py +28 -8
  131. edsl/results/MarkdownToDocx.py +122 -0
  132. edsl/results/MarkdownToPDF.py +111 -0
  133. edsl/results/Result.py +298 -206
  134. edsl/results/Results.py +149 -131
  135. edsl/results/ResultsExportMixin.py +2 -0
  136. edsl/results/TableDisplay.py +98 -171
  137. edsl/results/TextEditor.py +50 -0
  138. edsl/results/__init__.py +1 -1
  139. edsl/results/file_exports.py +252 -0
  140. edsl/results/{Selector.py → results_selector.py} +23 -13
  141. edsl/results/smart_objects.py +96 -0
  142. edsl/results/table_data_class.py +12 -0
  143. edsl/results/table_renderers.py +118 -0
  144. edsl/scenarios/ConstructDownloadLink.py +109 -0
  145. edsl/scenarios/DocumentChunker.py +102 -0
  146. edsl/scenarios/DocxScenario.py +16 -0
  147. edsl/scenarios/FileStore.py +150 -239
  148. edsl/scenarios/PdfExtractor.py +40 -0
  149. edsl/scenarios/Scenario.py +90 -193
  150. edsl/scenarios/ScenarioHtmlMixin.py +4 -3
  151. edsl/scenarios/ScenarioList.py +415 -244
  152. edsl/scenarios/ScenarioListExportMixin.py +0 -7
  153. edsl/scenarios/ScenarioListPdfMixin.py +15 -37
  154. edsl/scenarios/__init__.py +1 -2
  155. edsl/scenarios/directory_scanner.py +96 -0
  156. edsl/scenarios/file_methods.py +85 -0
  157. edsl/scenarios/handlers/__init__.py +13 -0
  158. edsl/scenarios/handlers/csv.py +49 -0
  159. edsl/scenarios/handlers/docx.py +76 -0
  160. edsl/scenarios/handlers/html.py +37 -0
  161. edsl/scenarios/handlers/json.py +111 -0
  162. edsl/scenarios/handlers/latex.py +5 -0
  163. edsl/scenarios/handlers/md.py +51 -0
  164. edsl/scenarios/handlers/pdf.py +68 -0
  165. edsl/scenarios/handlers/png.py +39 -0
  166. edsl/scenarios/handlers/pptx.py +105 -0
  167. edsl/scenarios/handlers/py.py +294 -0
  168. edsl/scenarios/handlers/sql.py +313 -0
  169. edsl/scenarios/handlers/sqlite.py +149 -0
  170. edsl/scenarios/handlers/txt.py +33 -0
  171. edsl/scenarios/{ScenarioJoin.py → scenario_join.py} +10 -6
  172. edsl/scenarios/scenario_selector.py +156 -0
  173. edsl/study/ObjectEntry.py +1 -1
  174. edsl/study/SnapShot.py +1 -1
  175. edsl/study/Study.py +5 -12
  176. edsl/surveys/ConstructDAG.py +92 -0
  177. edsl/surveys/EditSurvey.py +221 -0
  178. edsl/surveys/InstructionHandler.py +100 -0
  179. edsl/surveys/MemoryManagement.py +72 -0
  180. edsl/surveys/Rule.py +5 -4
  181. edsl/surveys/RuleCollection.py +25 -27
  182. edsl/surveys/RuleManager.py +172 -0
  183. edsl/surveys/Simulator.py +75 -0
  184. edsl/surveys/Survey.py +270 -791
  185. edsl/surveys/SurveyCSS.py +20 -8
  186. edsl/surveys/{SurveyFlowVisualizationMixin.py → SurveyFlowVisualization.py} +11 -9
  187. edsl/surveys/SurveyToApp.py +141 -0
  188. edsl/surveys/__init__.py +4 -2
  189. edsl/surveys/descriptors.py +6 -2
  190. edsl/surveys/instructions/ChangeInstruction.py +1 -2
  191. edsl/surveys/instructions/Instruction.py +4 -13
  192. edsl/surveys/instructions/InstructionCollection.py +11 -6
  193. edsl/templates/error_reporting/interview_details.html +1 -1
  194. edsl/templates/error_reporting/report.html +1 -1
  195. edsl/tools/plotting.py +1 -1
  196. edsl/utilities/PrettyList.py +56 -0
  197. edsl/utilities/is_notebook.py +18 -0
  198. edsl/utilities/is_valid_variable_name.py +11 -0
  199. edsl/utilities/remove_edsl_version.py +24 -0
  200. edsl/utilities/utilities.py +35 -23
  201. {edsl-0.1.38.dev4.dist-info → edsl-0.1.39.dist-info}/METADATA +12 -10
  202. edsl-0.1.39.dist-info/RECORD +358 -0
  203. {edsl-0.1.38.dev4.dist-info → edsl-0.1.39.dist-info}/WHEEL +1 -1
  204. edsl/language_models/KeyLookup.py +0 -30
  205. edsl/language_models/registry.py +0 -190
  206. edsl/language_models/unused/ReplicateBase.py +0 -83
  207. edsl/results/ResultsDBMixin.py +0 -238
  208. edsl-0.1.38.dev4.dist-info/RECORD +0 -277
  209. /edsl/questions/{RegisterQuestionsMeta.py → register_questions_meta.py} +0 -0
  210. /edsl/results/{ResultsFetchMixin.py → results_fetch_mixin.py} +0 -0
  211. /edsl/results/{ResultsToolsMixin.py → results_tools_mixin.py} +0 -0
  212. {edsl-0.1.38.dev4.dist-info → edsl-0.1.39.dist-info}/LICENSE +0 -0
@@ -0,0 +1,30 @@
1
+ from edsl.jobs.FetchInvigilator import FetchInvigilator
2
+
3
+
4
+ class RequestTokenEstimator:
5
+ """Estimate the number of tokens that will be required to run the focal task."""
6
+
7
+ def __init__(self, interview):
8
+ self.interview = interview
9
+
10
+ def __call__(self, question) -> float:
11
+ """Estimate the number of tokens that will be required to run the focal task."""
12
+ from edsl.scenarios.FileStore import FileStore
13
+
14
+ invigilator = FetchInvigilator(self.interview)(question=question)
15
+
16
+ # TODO: There should be a way to get a more accurate estimate.
17
+ combined_text = ""
18
+ file_tokens = 0
19
+ for prompt in invigilator.get_prompts().values():
20
+ if hasattr(prompt, "text"):
21
+ combined_text += prompt.text
22
+ elif isinstance(prompt, str):
23
+ combined_text += prompt
24
+ elif isinstance(prompt, list):
25
+ for file in prompt:
26
+ if isinstance(file, FileStore):
27
+ file_tokens += file.size * 0.25
28
+ else:
29
+ raise ValueError(f"Prompt is of type {type(prompt)}")
30
+ return len(combined_text) / 4.0 + file_tokens
@@ -0,0 +1,138 @@
1
+ from collections.abc import AsyncGenerator
2
+ from typing import List, TypeVar, Generator, Tuple, TYPE_CHECKING
3
+ from dataclasses import dataclass
4
+ import asyncio
5
+ from contextlib import asynccontextmanager
6
+ from edsl.data_transfer_models import EDSLResultObjectInput
7
+
8
+ from edsl.results.Result import Result
9
+ from edsl.jobs.interviews.Interview import Interview
10
+
11
+ if TYPE_CHECKING:
12
+ from edsl.jobs.Jobs import Jobs
13
+
14
+
15
+ @dataclass
16
+ class InterviewResult:
17
+ result: Result
18
+ interview: Interview
19
+ order: int
20
+
21
+
22
+ from edsl.jobs.data_structures import RunConfig
23
+
24
+
25
+ class AsyncInterviewRunner:
26
+ MAX_CONCURRENT = 5
27
+
28
+ def __init__(self, jobs: "Jobs", run_config: RunConfig):
29
+ self.jobs = jobs
30
+ self.run_config = run_config
31
+ self._initialized = asyncio.Event()
32
+
33
+ def _expand_interviews(self) -> Generator["Interview", None, None]:
34
+ """Populates self.total_interviews with n copies of each interview.
35
+
36
+ It also has to set the cache for each interview.
37
+
38
+ :param n: how many times to run each interview.
39
+ """
40
+ for interview in self.jobs.generate_interviews():
41
+ for iteration in range(self.run_config.parameters.n):
42
+ if iteration > 0:
43
+ yield interview.duplicate(
44
+ iteration=iteration, cache=self.run_config.environment.cache
45
+ )
46
+ else:
47
+ interview.cache = self.run_config.environment.cache
48
+ yield interview
49
+
50
+ async def _conduct_interview(
51
+ self, interview: "Interview"
52
+ ) -> Tuple["Result", "Interview"]:
53
+ """Conducts an interview and returns the result object, along with the associated interview.
54
+
55
+ We return the interview because it is not populated with exceptions, if any.
56
+
57
+ :param interview: the interview to conduct
58
+ :return: the result of the interview
59
+
60
+ 'extracted_answers' is a dictionary of the answers to the questions in the interview.
61
+ This is not the same as the generated_tokens---it can include substantial cleaning and processing / validation.
62
+ """
63
+ # the model buckets are used to track usage rates
64
+ # model_buckets = self.bucket_collection[interview.model]
65
+ # model_buckets = self.run_config.environment.bucket_collection[interview.model]
66
+
67
+ # get the results of the interview e.g., {'how_are_you':"Good" 'how_are_you_generated_tokens': "Good"}
68
+ extracted_answers: dict[str, str]
69
+ model_response_objects: List[EDSLResultObjectInput]
70
+
71
+ extracted_answers, model_response_objects = (
72
+ await interview.async_conduct_interview(self.run_config)
73
+ )
74
+ result = Result.from_interview(
75
+ interview=interview,
76
+ extracted_answers=extracted_answers,
77
+ model_response_objects=model_response_objects,
78
+ )
79
+ return result, interview
80
+
81
+ async def run(
82
+ self,
83
+ ) -> AsyncGenerator[tuple[Result, Interview], None]:
84
+ """Creates and processes tasks asynchronously, yielding results as they complete.
85
+
86
+ Uses TaskGroup for structured concurrency and automated cleanup.
87
+ Results are yielded as they become available while maintaining controlled concurrency.
88
+ """
89
+ interviews = list(self._expand_interviews())
90
+ self._initialized.set()
91
+
92
+ async def _process_single_interview(
93
+ interview: Interview, idx: int
94
+ ) -> InterviewResult:
95
+ try:
96
+ result, interview = await self._conduct_interview(interview)
97
+ self.run_config.environment.jobs_runner_status.add_completed_interview(
98
+ result
99
+ )
100
+ result.order = idx
101
+ return InterviewResult(result, interview, idx)
102
+ except Exception as e:
103
+ # breakpoint()
104
+ if self.run_config.parameters.stop_on_exception:
105
+ raise
106
+ # logger.error(f"Task failed with error: {e}")
107
+ return None
108
+
109
+ # Process interviews in chunks
110
+ for i in range(0, len(interviews), self.MAX_CONCURRENT):
111
+ chunk = interviews[i : i + self.MAX_CONCURRENT]
112
+ tasks = [
113
+ asyncio.create_task(_process_single_interview(interview, idx))
114
+ for idx, interview in enumerate(chunk, start=i)
115
+ ]
116
+
117
+ try:
118
+ # Wait for all tasks in the chunk to complete
119
+ results = await asyncio.gather(
120
+ *tasks,
121
+ return_exceptions=not self.run_config.parameters.stop_on_exception
122
+ )
123
+
124
+ # Process successful results
125
+ for result in (r for r in results if r is not None):
126
+ yield result.result, result.interview
127
+
128
+ except Exception as e:
129
+ if self.run_config.parameters.stop_on_exception:
130
+ raise
131
+ # logger.error(f"Chunk processing failed with error: {e}")
132
+ continue
133
+
134
+ finally:
135
+ # Clean up any remaining tasks
136
+ for task in tasks:
137
+ if not task.done():
138
+ task.cancel()
@@ -1,8 +1,15 @@
1
+ from typing import Optional
1
2
  from collections import UserDict
2
3
  from edsl.jobs.buckets.TokenBucket import TokenBucket
3
4
  from edsl.jobs.buckets.ModelBuckets import ModelBuckets
4
5
 
6
+ # from functools import wraps
7
+ from threading import RLock
5
8
 
9
+ from edsl.jobs.decorators import synchronized_class
10
+
11
+
12
+ @synchronized_class
6
13
  class BucketCollection(UserDict):
7
14
  """A Jobs object will have a whole collection of model buckets, as multiple models could be used.
8
15
 
@@ -10,11 +17,43 @@ class BucketCollection(UserDict):
10
17
  Models themselves are hashable, so this works.
11
18
  """
12
19
 
13
- def __init__(self, infinity_buckets=False):
20
+ def __init__(self, infinity_buckets: bool = False):
21
+ """Create a new BucketCollection.
22
+ An infinity bucket is a bucket that never runs out of tokens or requests.
23
+ """
14
24
  super().__init__()
15
25
  self.infinity_buckets = infinity_buckets
16
26
  self.models_to_services = {}
17
27
  self.services_to_buckets = {}
28
+ self._lock = RLock()
29
+
30
+ from edsl.config import CONFIG
31
+ import os
32
+
33
+ url = os.environ.get("EDSL_REMOTE_TOKEN_BUCKET_URL", None)
34
+
35
+ if url == "None" or url is None:
36
+ self.remote_url = None
37
+ # print(f"Using remote token bucket URL: {url}")
38
+ else:
39
+ self.remote_url = url
40
+
41
+ @classmethod
42
+ def from_models(
43
+ cls, models_list: list, infinity_buckets: bool = False
44
+ ) -> "BucketCollection":
45
+ """Create a BucketCollection from a list of models."""
46
+ bucket_collection = cls(infinity_buckets=infinity_buckets)
47
+ for model in models_list:
48
+ bucket_collection.add_model(model)
49
+ return bucket_collection
50
+
51
+ def get_tokens(
52
+ self, model: "LanguageModel", bucket_type: str, num_tokens: int
53
+ ) -> int:
54
+ """Get the number of tokens remaining in the bucket."""
55
+ relevant_bucket = getattr(self[model], bucket_type)
56
+ return relevant_bucket.get_tokens(num_tokens)
18
57
 
19
58
  def __repr__(self):
20
59
  return f"BucketCollection({self.data})"
@@ -26,8 +65,8 @@ class BucketCollection(UserDict):
26
65
 
27
66
  # compute the TPS and RPS from the model
28
67
  if not self.infinity_buckets:
29
- TPS = model.TPM / 60.0
30
- RPS = model.RPM / 60.0
68
+ TPS = model.tpm / 60.0
69
+ RPS = model.rpm / 60.0
31
70
  else:
32
71
  TPS = float("inf")
33
72
  RPS = float("inf")
@@ -40,12 +79,14 @@ class BucketCollection(UserDict):
40
79
  bucket_type="requests",
41
80
  capacity=RPS,
42
81
  refill_rate=RPS,
82
+ remote_url=self.remote_url,
43
83
  )
44
84
  tokens_bucket = TokenBucket(
45
85
  bucket_name=service,
46
86
  bucket_type="tokens",
47
87
  capacity=TPS,
48
88
  refill_rate=TPS,
89
+ remote_url=self.remote_url,
49
90
  )
50
91
  self.services_to_buckets[service] = ModelBuckets(
51
92
  requests_bucket, tokens_bucket
@@ -1,10 +1,55 @@
1
1
  from typing import Union, List, Any, Optional
2
2
  import asyncio
3
3
  import time
4
+ from threading import RLock
5
+ from edsl.jobs.decorators import synchronized_class
4
6
 
7
+ from typing import Union, List, Any, Optional
8
+ import asyncio
9
+ import time
10
+ from threading import RLock
11
+ from edsl.jobs.decorators import synchronized_class
5
12
 
13
+
14
+ @synchronized_class
6
15
  class TokenBucket:
7
- """This is a token bucket used to respect rate limits to services."""
16
+ """This is a token bucket used to respect rate limits to services.
17
+ It can operate either locally or remotely via a REST API based on initialization parameters.
18
+ """
19
+
20
+ def __new__(
21
+ cls,
22
+ *,
23
+ bucket_name: str,
24
+ bucket_type: str,
25
+ capacity: Union[int, float],
26
+ refill_rate: Union[int, float],
27
+ remote_url: Optional[str] = None,
28
+ ):
29
+ """Factory method to create either a local or remote token bucket.
30
+
31
+ Args:
32
+ bucket_name: Name of the bucket
33
+ bucket_type: Type of the bucket
34
+ capacity: Maximum number of tokens
35
+ refill_rate: Rate at which tokens are refilled
36
+ remote_url: If provided, creates a remote token bucket client
37
+ """
38
+ if remote_url is not None:
39
+ # Import here to avoid circular imports
40
+ from edsl.jobs.buckets.TokenBucketClient import TokenBucketClient
41
+
42
+ return TokenBucketClient(
43
+ bucket_name=bucket_name,
44
+ bucket_type=bucket_type,
45
+ capacity=capacity,
46
+ refill_rate=refill_rate,
47
+ api_base_url=remote_url,
48
+ )
49
+
50
+ # Create a local token bucket
51
+ instance = super(TokenBucket, cls).__new__(cls)
52
+ return instance
8
53
 
9
54
  def __init__(
10
55
  self,
@@ -13,11 +58,17 @@ class TokenBucket:
13
58
  bucket_type: str,
14
59
  capacity: Union[int, float],
15
60
  refill_rate: Union[int, float],
61
+ remote_url: Optional[str] = None,
16
62
  ):
63
+ # Skip initialization if this is a remote bucket
64
+ if remote_url is not None:
65
+ return
66
+
17
67
  self.bucket_name = bucket_name
18
68
  self.bucket_type = bucket_type
19
- self.capacity = capacity # Maximum number of tokens
69
+ self.capacity = capacity
20
70
  self.added_tokens = 0
71
+ self._lock = RLock()
21
72
 
22
73
  self.target_rate = (
23
74
  capacity * 60
@@ -225,25 +276,6 @@ class TokenBucket:
225
276
 
226
277
  return (self.num_released / elapsed_time) * 60
227
278
 
228
- # # Filter log entries within the time window
229
- # relevant_log = [(t, tokens) for t, tokens in self.log if t >= start_time]
230
-
231
- # if len(relevant_log) < 2:
232
- # return 0 # Not enough data points to calculate throughput
233
-
234
- # # Calculate total tokens used
235
- # initial_tokens = relevant_log[0][1]
236
- # final_tokens = relevant_log[-1][1]
237
- # tokens_used = self.num_released - (final_tokens - initial_tokens)
238
-
239
- # # Calculate actual time elapsed
240
- # actual_time_elapsed = relevant_log[-1][0] - relevant_log[0][0]
241
-
242
- # # Calculate throughput in tokens per minute
243
- # throughput = (tokens_used / actual_time_elapsed) * 60
244
-
245
- # return throughput
246
-
247
279
 
248
280
  if __name__ == "__main__":
249
281
  import doctest
@@ -0,0 +1,211 @@
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from typing import Union, Dict
4
+ from typing import Union, List, Any, Optional
5
+ from threading import RLock
6
+ from edsl.jobs.buckets.TokenBucket import TokenBucket # Original implementation
7
+
8
+
9
+ def safe_float_for_json(value: float) -> Union[float, str]:
10
+ """Convert float('inf') to 'infinity' for JSON serialization.
11
+
12
+ Args:
13
+ value: The float value to convert
14
+
15
+ Returns:
16
+ Either the original float or the string 'infinity' if the value is infinite
17
+ """
18
+ if value == float("inf"):
19
+ return "infinity"
20
+ return value
21
+
22
+
23
+ app = FastAPI()
24
+
25
+ # In-memory storage for TokenBucket instances
26
+ buckets: Dict[str, TokenBucket] = {}
27
+
28
+
29
+ class TokenBucketCreate(BaseModel):
30
+ bucket_name: str
31
+ bucket_type: str
32
+ capacity: Union[int, float]
33
+ refill_rate: Union[int, float]
34
+
35
+
36
+ @app.get("/buckets")
37
+ async def list_buckets(
38
+ bucket_type: Optional[str] = None,
39
+ bucket_name: Optional[str] = None,
40
+ include_logs: bool = False,
41
+ ):
42
+ """List all buckets and their current status.
43
+
44
+ Args:
45
+ bucket_type: Optional filter by bucket type
46
+ bucket_name: Optional filter by bucket name
47
+ include_logs: Whether to include the full logs in the response
48
+ """
49
+ result = {}
50
+
51
+ for bucket_id, bucket in buckets.items():
52
+ # Apply filters if specified
53
+ if bucket_type and bucket.bucket_type != bucket_type:
54
+ continue
55
+ if bucket_name and bucket.bucket_name != bucket_name:
56
+ continue
57
+
58
+ # Get basic bucket info
59
+ bucket_info = {
60
+ "bucket_name": bucket.bucket_name,
61
+ "bucket_type": bucket.bucket_type,
62
+ "tokens": bucket.tokens,
63
+ "capacity": bucket.capacity,
64
+ "refill_rate": bucket.refill_rate,
65
+ "turbo_mode": bucket.turbo_mode,
66
+ "num_requests": bucket.num_requests,
67
+ "num_released": bucket.num_released,
68
+ "tokens_returned": bucket.tokens_returned,
69
+ }
70
+ for k, v in bucket_info.items():
71
+ if isinstance(v, float):
72
+ bucket_info[k] = safe_float_for_json(v)
73
+
74
+ # Only include logs if requested
75
+ if include_logs:
76
+ bucket_info["log"] = bucket.log
77
+
78
+ result[bucket_id] = bucket_info
79
+
80
+ return result
81
+
82
+
83
+ @app.post("/bucket/{bucket_id}/add_tokens")
84
+ async def add_tokens(bucket_id: str, amount: float):
85
+ """Add tokens to an existing bucket."""
86
+ if bucket_id not in buckets:
87
+ raise HTTPException(status_code=404, detail="Bucket not found")
88
+
89
+ if not isinstance(amount, (int, float)) or amount != amount: # Check for NaN
90
+ raise HTTPException(status_code=400, detail="Invalid amount specified")
91
+
92
+ if amount == float("inf") or amount == float("-inf"):
93
+ raise HTTPException(status_code=400, detail="Amount cannot be infinite")
94
+
95
+ bucket = buckets[bucket_id]
96
+ bucket.add_tokens(amount)
97
+
98
+ # Ensure we return a JSON-serializable float
99
+ current_tokens = float(bucket.tokens)
100
+ if not -1e308 <= current_tokens <= 1e308: # Check if within JSON float bounds
101
+ current_tokens = 0.0 # or some other reasonable default
102
+
103
+ return {"status": "success", "current_tokens": safe_float_for_json(current_tokens)}
104
+
105
+
106
+ # @app.post("/bucket")
107
+ # async def create_bucket(bucket: TokenBucketCreate):
108
+ # bucket_id = f"{bucket.bucket_name}_{bucket.bucket_type}"
109
+ # if bucket_id in buckets:
110
+ # raise HTTPException(status_code=400, detail="Bucket already exists")
111
+
112
+ # # Create an actual TokenBucket instance
113
+ # buckets[bucket_id] = TokenBucket(
114
+ # bucket_name=bucket.bucket_name,
115
+ # bucket_type=bucket.bucket_type,
116
+ # capacity=bucket.capacity,
117
+ # refill_rate=bucket.refill_rate,
118
+ # )
119
+ # return {"status": "created"}
120
+
121
+
122
+ @app.post("/bucket")
123
+ async def create_bucket(bucket: TokenBucketCreate):
124
+ if (
125
+ not isinstance(bucket.capacity, (int, float))
126
+ or bucket.capacity != bucket.capacity
127
+ ): # Check for NaN
128
+ raise HTTPException(status_code=400, detail="Invalid capacity value")
129
+ if (
130
+ not isinstance(bucket.refill_rate, (int, float))
131
+ or bucket.refill_rate != bucket.refill_rate
132
+ ): # Check for NaN
133
+ raise HTTPException(status_code=400, detail="Invalid refill rate value")
134
+ if bucket.capacity == float("inf") or bucket.refill_rate == float("inf"):
135
+ raise HTTPException(status_code=400, detail="Values cannot be infinite")
136
+ bucket_id = f"{bucket.bucket_name}_{bucket.bucket_type}"
137
+ if bucket_id in buckets:
138
+ # Instead of error, return success with "existing" status
139
+ return {
140
+ "status": "existing",
141
+ "bucket": {
142
+ "capacity": safe_float_for_json(buckets[bucket_id].capacity),
143
+ "refill_rate": safe_float_for_json(buckets[bucket_id].refill_rate),
144
+ },
145
+ }
146
+
147
+ # Create a new bucket
148
+ buckets[bucket_id] = TokenBucket(
149
+ bucket_name=bucket.bucket_name,
150
+ bucket_type=bucket.bucket_type,
151
+ capacity=bucket.capacity,
152
+ refill_rate=bucket.refill_rate,
153
+ )
154
+ return {"status": "created"}
155
+
156
+
157
+ @app.post("/bucket/{bucket_id}/get_tokens")
158
+ async def get_tokens(bucket_id: str, amount: float, cheat_bucket_capacity: bool = True):
159
+ if bucket_id not in buckets:
160
+ raise HTTPException(status_code=404, detail="Bucket not found")
161
+
162
+ bucket = buckets[bucket_id]
163
+ await bucket.get_tokens(amount, cheat_bucket_capacity)
164
+ return {"status": "success"}
165
+
166
+
167
+ @app.post("/bucket/{bucket_id}/turbo_mode/{state}")
168
+ async def set_turbo_mode(bucket_id: str, state: bool):
169
+ if bucket_id not in buckets:
170
+ raise HTTPException(status_code=404, detail="Bucket not found")
171
+
172
+ bucket = buckets[bucket_id]
173
+ if state:
174
+ bucket.turbo_mode_on()
175
+ else:
176
+ bucket.turbo_mode_off()
177
+ return {"status": "success"}
178
+
179
+
180
+ @app.get("/bucket/{bucket_id}/status")
181
+ async def get_bucket_status(bucket_id: str):
182
+ if bucket_id not in buckets:
183
+ raise HTTPException(status_code=404, detail="Bucket not found")
184
+
185
+ bucket = buckets[bucket_id]
186
+ status = {
187
+ "tokens": bucket.tokens,
188
+ "capacity": bucket.capacity,
189
+ "refill_rate": bucket.refill_rate,
190
+ "turbo_mode": bucket.turbo_mode,
191
+ "num_requests": bucket.num_requests,
192
+ "num_released": bucket.num_released,
193
+ "tokens_returned": bucket.tokens_returned,
194
+ "log": bucket.log,
195
+ }
196
+ for k, v in status.items():
197
+ if isinstance(v, float):
198
+ status[k] = safe_float_for_json(v)
199
+
200
+ for index, entry in enumerate(status["log"]):
201
+ ts, value = entry
202
+ status["log"][index] = (ts, safe_float_for_json(value))
203
+
204
+ # print(status)
205
+ return status
206
+
207
+
208
+ if __name__ == "__main__":
209
+ import uvicorn
210
+
211
+ uvicorn.run(app, host="0.0.0.0", port=8001)