lionagi 0.0.111__py3-none-any.whl → 0.0.113__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/__init__.py +7 -2
- lionagi/bridge/__init__.py +7 -0
- lionagi/bridge/langchain.py +131 -0
- lionagi/bridge/llama_index.py +157 -0
- lionagi/configs/__init__.py +7 -0
- lionagi/configs/oai_configs.py +49 -0
- lionagi/configs/openrouter_config.py +49 -0
- lionagi/core/__init__.py +15 -0
- lionagi/{session/conversation.py → core/conversations.py} +10 -17
- lionagi/core/flows.py +1 -0
- lionagi/core/instruction_sets.py +1 -0
- lionagi/{session/message.py → core/messages.py} +5 -5
- lionagi/core/sessions.py +262 -0
- lionagi/datastore/__init__.py +1 -0
- lionagi/datastore/chroma.py +1 -0
- lionagi/datastore/deeplake.py +1 -0
- lionagi/datastore/elasticsearch.py +1 -0
- lionagi/datastore/lantern.py +1 -0
- lionagi/datastore/pinecone.py +1 -0
- lionagi/datastore/postgres.py +1 -0
- lionagi/datastore/qdrant.py +1 -0
- lionagi/loader/__init__.py +12 -0
- lionagi/loader/chunker.py +157 -0
- lionagi/loader/reader.py +124 -0
- lionagi/objs/__init__.py +7 -0
- lionagi/objs/messenger.py +163 -0
- lionagi/objs/tool_registry.py +247 -0
- lionagi/schema/__init__.py +11 -0
- lionagi/schema/base_condition.py +1 -0
- lionagi/schema/base_schema.py +239 -0
- lionagi/schema/base_tool.py +9 -0
- lionagi/schema/data_logger.py +94 -0
- lionagi/services/__init__.py +14 -0
- lionagi/services/anthropic.py +1 -0
- lionagi/services/anyscale.py +0 -0
- lionagi/services/azure.py +1 -0
- lionagi/{api/oai_service.py → services/base_api_service.py} +74 -148
- lionagi/services/bedrock.py +0 -0
- lionagi/services/chatcompletion.py +48 -0
- lionagi/services/everlyai.py +0 -0
- lionagi/services/gemini.py +0 -0
- lionagi/services/gpt4all.py +0 -0
- lionagi/services/huggingface.py +0 -0
- lionagi/services/litellm.py +1 -0
- lionagi/services/localai.py +0 -0
- lionagi/services/mistralai.py +0 -0
- lionagi/services/oai.py +34 -0
- lionagi/services/ollama.py +1 -0
- lionagi/services/openllm.py +0 -0
- lionagi/services/openrouter.py +32 -0
- lionagi/services/perplexity.py +0 -0
- lionagi/services/predibase.py +0 -0
- lionagi/services/rungpt.py +0 -0
- lionagi/services/service_objs.py +282 -0
- lionagi/services/vllm.py +0 -0
- lionagi/services/xinference.py +0 -0
- lionagi/structure/__init__.py +7 -0
- lionagi/structure/relationship.py +128 -0
- lionagi/structure/structure.py +160 -0
- lionagi/tests/__init__.py +0 -0
- lionagi/tests/test_flatten_util.py +426 -0
- lionagi/tools/__init__.py +0 -0
- lionagi/tools/coder.py +1 -0
- lionagi/tools/planner.py +1 -0
- lionagi/tools/prompter.py +1 -0
- lionagi/tools/sandbox.py +1 -0
- lionagi/tools/scorer.py +1 -0
- lionagi/tools/summarizer.py +1 -0
- lionagi/tools/validator.py +1 -0
- lionagi/utils/__init__.py +46 -8
- lionagi/utils/api_util.py +63 -416
- lionagi/utils/call_util.py +347 -0
- lionagi/utils/flat_util.py +540 -0
- lionagi/utils/io_util.py +102 -0
- lionagi/utils/load_utils.py +190 -0
- lionagi/utils/sys_util.py +85 -660
- lionagi/utils/tool_util.py +82 -199
- lionagi/utils/type_util.py +81 -0
- lionagi/version.py +1 -1
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/METADATA +44 -15
- lionagi-0.0.113.dist-info/RECORD +84 -0
- lionagi/api/__init__.py +0 -8
- lionagi/api/oai_config.py +0 -16
- lionagi/session/__init__.py +0 -7
- lionagi/session/session.py +0 -380
- lionagi/utils/doc_util.py +0 -331
- lionagi/utils/log_util.py +0 -86
- lionagi-0.0.111.dist-info/RECORD +0 -20
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/LICENSE +0 -0
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/WHEEL +0 -0
- {lionagi-0.0.111.dist-info → lionagi-0.0.113.dist-info}/top_level.txt +0 -0
lionagi/utils/api_util.py
CHANGED
@@ -1,439 +1,86 @@
|
|
1
|
-
import asyncio
|
2
1
|
import logging
|
3
2
|
import re
|
4
|
-
from
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
from .sys_util import append_to_jsonl
|
9
|
-
|
10
|
-
|
11
|
-
@dataclass
|
12
|
-
class StatusTracker:
|
13
|
-
"""
|
14
|
-
Class for keeping track of various task statuses.
|
15
|
-
|
16
|
-
This class serves as a simple way to monitor different types of task
|
17
|
-
outcomes and errors within a system. It uses dataclasses for easy
|
18
|
-
creation and management of state.
|
19
|
-
|
20
|
-
Attributes:
|
21
|
-
num_tasks_started:
|
22
|
-
The number of tasks that have been initiated.
|
23
|
-
num_tasks_in_progress:
|
24
|
-
The number of tasks currently being processed.
|
25
|
-
num_tasks_succeeded:
|
26
|
-
The number of tasks that have completed successfully.
|
27
|
-
num_tasks_failed:
|
28
|
-
The number of tasks that have failed.
|
29
|
-
num_rate_limit_errors:
|
30
|
-
The number of tasks that failed due to rate limiting.
|
31
|
-
num_api_errors:
|
32
|
-
The number of tasks that failed due to API errors.
|
33
|
-
num_other_errors:
|
34
|
-
The number of tasks that failed due to other errors.
|
3
|
+
from typing import Callable
|
4
|
+
|
5
|
+
|
6
|
+
def api_method(http_session, method: str = "post") -> Callable:
|
35
7
|
"""
|
36
|
-
|
37
|
-
num_tasks_in_progress: int = 0
|
38
|
-
num_tasks_succeeded: int = 0
|
39
|
-
num_tasks_failed: int = 0
|
40
|
-
num_rate_limit_errors: int = 0
|
41
|
-
num_api_errors: int = 0
|
42
|
-
num_other_errors: int = 0
|
43
|
-
|
8
|
+
Retrieves the appropriate HTTP method from an HTTP session object.
|
44
9
|
|
45
|
-
|
46
|
-
|
47
|
-
|
10
|
+
Parameters:
|
11
|
+
http_session: The HTTP session object from which to retrieve the method.
|
12
|
+
method (str): The HTTP method to retrieve. Defaults to 'post'.
|
48
13
|
|
49
|
-
|
50
|
-
|
51
|
-
concurrent task processing in an orderly and controlled manner.
|
14
|
+
Returns:
|
15
|
+
Callable: The HTTP method function from the session object.
|
52
16
|
|
53
|
-
|
54
|
-
|
55
|
-
A queue to hold items for asynchronous processing.
|
56
|
-
_stop_event (asyncio.Event):
|
57
|
-
An event to signal when the queue should stop processing.
|
17
|
+
Raises:
|
18
|
+
ValueError: If the provided method is not one of ['post', 'delete', 'head', 'options', 'patch'].
|
58
19
|
|
59
|
-
|
60
|
-
|
61
|
-
Add an item to the queue for processing.
|
62
|
-
dequeue():
|
63
|
-
Remove and return an item from the queue.
|
64
|
-
join():
|
65
|
-
Wait until all items in the queue have been processed.
|
66
|
-
stop():
|
67
|
-
Signal to stop processing new items in the queue.
|
68
|
-
stopped():
|
69
|
-
Check if the queue has been signaled to stop.
|
70
|
-
process_requests(func):
|
71
|
-
Process items using a provided function.
|
20
|
+
Examples:
|
21
|
+
api_method = api_methods(session, "post") # Retrieves the 'post' method from the session
|
72
22
|
"""
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
Example:
|
89
|
-
>>> async_queue = AsyncQueue()
|
90
|
-
>>> asyncio.run(async_queue.enqueue('Task 1'))
|
91
|
-
"""
|
92
|
-
await self.queue.put(item)
|
93
|
-
|
94
|
-
async def dequeue(self) -> Any:
|
95
|
-
"""
|
96
|
-
Asynchronously remove and return an item from the queue.
|
97
|
-
|
98
|
-
If the queue is empty, this method will wait until an item is available.
|
99
|
-
|
100
|
-
Returns:
|
101
|
-
Any: The next item from the queue.
|
102
|
-
|
103
|
-
Example:
|
104
|
-
>>> async_queue = AsyncQueue()
|
105
|
-
>>> asyncio.run(async_queue.enqueue('Task 1'))
|
106
|
-
>>> asyncio.run(async_queue.dequeue())
|
107
|
-
'Task 1'
|
108
|
-
"""
|
109
|
-
return await self.queue.get()
|
110
|
-
|
111
|
-
async def join(self) -> None:
|
112
|
-
"""
|
113
|
-
Asynchronously wait until all items in the queue have been processed.
|
114
|
-
|
115
|
-
This method blocks until every item that has been enqueued is processed,
|
116
|
-
ensuring that all tasks are completed.
|
117
|
-
|
118
|
-
Example:
|
119
|
-
>>> async_queue = AsyncQueue()
|
120
|
-
>>> asyncio.run(async_queue.enqueue('Task 1'))
|
121
|
-
>>> asyncio.run(async_queue.join()) # This will block until 'Task 1' is processed.
|
122
|
-
"""
|
123
|
-
await self.queue.join()
|
124
|
-
|
125
|
-
async def stop(self) -> None:
|
126
|
-
"""
|
127
|
-
Signal the queue to stop processing new items.
|
128
|
-
|
129
|
-
Once called, the queue will not process any new items after the current ones
|
130
|
-
are completed, allowing for a graceful shutdown.
|
131
|
-
|
132
|
-
Example:
|
133
|
-
>>> async_queue = AsyncQueue()
|
134
|
-
>>> asyncio.run(async_queue.stop()) # This signals the queue to stop processing.
|
135
|
-
"""
|
136
|
-
self._stop_event.set()
|
137
|
-
|
138
|
-
def stopped(self) -> bool:
|
139
|
-
"""
|
140
|
-
Check if the queue has been signaled to stop processing.
|
141
|
-
|
142
|
-
Returns:
|
143
|
-
bool: True if a stop has been signaled, False otherwise.
|
144
|
-
|
145
|
-
Example:
|
146
|
-
>>> async_queue = AsyncQueue()
|
147
|
-
>>> asyncio.run(async_queue.stop())
|
148
|
-
>>> async_queue.stopped()
|
149
|
-
True
|
150
|
-
"""
|
151
|
-
return self._stop_event.is_set()
|
152
|
-
|
153
|
-
async def process_requests(self, func: Callable[[Any], Any]) -> None:
|
154
|
-
"""
|
155
|
-
Asynchronously process items from the queue using the provided function.
|
156
|
-
|
157
|
-
Continuously dequeues items and applies the given function to each.
|
158
|
-
The processing stops when the queue is signaled to stop or a sentinel value (`None`) is dequeued.
|
159
|
-
|
160
|
-
Parameters:
|
161
|
-
func (Callable[[Any], Any]): A coroutine function to process items from the queue.
|
162
|
-
|
163
|
-
Example:
|
164
|
-
>>> async def sample_processing(task):
|
165
|
-
... print("Processing:", task)
|
166
|
-
>>> async_queue = AsyncQueue()
|
167
|
-
>>> asyncio.run(async_queue.enqueue('Task 1'))
|
168
|
-
>>> asyncio.run(async_queue.process_requests(sample_processing))
|
169
|
-
Processing: Task 1
|
170
|
-
"""
|
171
|
-
while not self.stopped():
|
172
|
-
item = await self.dequeue()
|
173
|
-
if item is None: # Using `None` as a sentinel value to cease processing.
|
174
|
-
await self.stop()
|
175
|
-
break
|
176
|
-
await func(item)
|
177
|
-
|
178
|
-
|
179
|
-
class RateLimiter(ABC):
|
23
|
+
if method not in ["post", "delete", "head", "options", "patch"]:
|
24
|
+
raise ValueError("Invalid request, method must be in ['post', 'delete', 'head', 'options', 'patch']")
|
25
|
+
elif method == "post":
|
26
|
+
return http_session.post
|
27
|
+
elif method == "delete":
|
28
|
+
return http_session.delete
|
29
|
+
elif method == "head":
|
30
|
+
return http_session.head
|
31
|
+
elif method == "options":
|
32
|
+
return http_session.options
|
33
|
+
elif method == "patch":
|
34
|
+
return http_session.patch
|
35
|
+
|
36
|
+
def api_endpoint_from_url(request_url: str) -> str:
|
180
37
|
"""
|
181
|
-
|
38
|
+
Extracts the API endpoint from a given URL.
|
182
39
|
|
183
|
-
|
184
|
-
|
40
|
+
Parameters:
|
41
|
+
request_url (str): The URL from which to extract the API endpoint.
|
185
42
|
|
186
|
-
|
187
|
-
|
188
|
-
Maximum number of requests permitted per minute.
|
189
|
-
max_tokens_per_minute (int):
|
190
|
-
Maximum number of tokens that can accumulate per minute.
|
191
|
-
available_request_capacity (int):
|
192
|
-
Current number of available request slots.
|
193
|
-
available_token_capacity (int):
|
194
|
-
Current number of available tokens.
|
43
|
+
Returns:
|
44
|
+
str: The extracted API endpoint, or an empty string if no match is found.
|
195
45
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
calculate_num_token:
|
200
|
-
Method to calculate required tokens for a request.
|
46
|
+
Examples:
|
47
|
+
endpoint = api_endpoint_from_url("https://api.example.com/v1/users")
|
48
|
+
# endpoint will be 'users'
|
201
49
|
"""
|
50
|
+
match = re.search(r"^https://[^/]+/v\d+/(.+)$", request_url)
|
51
|
+
return match.group(1) if match else ""
|
202
52
|
|
203
|
-
|
204
|
-
"""
|
205
|
-
Initializes the RateLimiter with specified maximum request and token limits.
|
206
|
-
|
207
|
-
Parameters:
|
208
|
-
max_requests_per_minute (int): Maximum requests allowed per minute.
|
209
|
-
|
210
|
-
max_tokens_per_minute (int): Maximum tokens allowed to accumulate per minute.
|
211
|
-
|
212
|
-
Example:
|
213
|
-
>>> class MyRateLimiter(RateLimiter):
|
214
|
-
... async def rate_limit_replenisher(self) -> NoReturn:
|
215
|
-
... # Implementation for rate replenishment.
|
216
|
-
... def calculate_num_token(self, payload: Dict[str, Any], api_endpoint: str) -> int:
|
217
|
-
... # Implementation for token calculation.
|
218
|
-
...
|
219
|
-
>>> limiter = MyRateLimiter(100, 200)
|
220
|
-
"""
|
221
|
-
self.max_requests_per_minute = max_requests_per_minute
|
222
|
-
self.max_tokens_per_minute = max_tokens_per_minute
|
223
|
-
self.available_request_capacity = max_requests_per_minute
|
224
|
-
self.available_token_capacity = max_tokens_per_minute
|
225
|
-
|
226
|
-
@abstractmethod
|
227
|
-
async def rate_limit_replenisher(self) -> NoReturn:
|
228
|
-
"""
|
229
|
-
Asynchronously replenishes rate limit capacities.
|
230
|
-
|
231
|
-
This coroutine should be implemented to periodically restore `available_request_capacity`
|
232
|
-
and `available_token_capacity` according to specific rules defined in subclasses.
|
233
|
-
|
234
|
-
Example:
|
235
|
-
>>> class MyRateLimiter(RateLimiter):
|
236
|
-
... async def rate_limit_replenisher(self) -> NoReturn:
|
237
|
-
... while True:
|
238
|
-
... # Replenishment logic here
|
239
|
-
...
|
240
|
-
>>> limiter = MyRateLimiter(100, 200)
|
241
|
-
"""
|
242
|
-
|
243
|
-
...
|
244
|
-
|
245
|
-
@abstractmethod
|
246
|
-
def calculate_num_token(self, payload: Dict[str, Any], api_endpoint: str) -> int:
|
247
|
-
"""
|
248
|
-
Calculates required tokens for a request.
|
249
|
-
|
250
|
-
Subclasses should implement this method to determine the number of tokens needed based
|
251
|
-
on the request payload and target endpoint.
|
252
|
-
|
253
|
-
Parameters:
|
254
|
-
payload (Dict[str, Any]): Payload of the request.
|
255
|
-
|
256
|
-
api_endpoint (str): Target API endpoint for the request.
|
257
|
-
|
258
|
-
Returns:
|
259
|
-
int: Calculated number of tokens required for the request.
|
260
|
-
|
261
|
-
Example:
|
262
|
-
>>> class MyRateLimiter(RateLimiter):
|
263
|
-
... def calculate_num_token(self, payload: Dict[str, Any], api_endpoint: str) -> int:
|
264
|
-
... return len(payload.get('data', '')) // 10
|
265
|
-
...
|
266
|
-
>>> limiter = MyRateLimiter(100, 200)
|
267
|
-
>>> limiter.calculate_num_token({'data': '12345'}, 'api/send')
|
268
|
-
0
|
269
|
-
"""
|
270
|
-
|
271
|
-
...
|
272
|
-
|
273
|
-
|
274
|
-
class BaseAPIService(ABC):
|
53
|
+
def api_error(response_json: dict) -> bool:
|
275
54
|
"""
|
276
|
-
|
55
|
+
Logs a warning and returns True if an error is found in the API response.
|
277
56
|
|
278
|
-
|
279
|
-
|
280
|
-
subclassed for concrete implementations of specific API service interactions.
|
57
|
+
Parameters:
|
58
|
+
response_json (dict): The JSON response from the API call.
|
281
59
|
|
282
|
-
|
283
|
-
|
284
|
-
The API key used for authenticating with the API service.
|
285
|
-
token_encoding_name (str):
|
286
|
-
The encoding for the API token.
|
287
|
-
max_attempts (int):
|
288
|
-
The maximum number of retry attempts for API calls.
|
289
|
-
status_tracker (StatusTracker):
|
290
|
-
Tracker for API call statuses.
|
291
|
-
queue (AsyncQueue):
|
292
|
-
Queue for managing API call tasks.
|
293
|
-
rate_limiter (RateLimiter):
|
294
|
-
Limiter to control the rate of API calls.
|
295
|
-
append_to_jsonl (callable):
|
296
|
-
Callable for appending data to a file in JSONL format.
|
60
|
+
Returns:
|
61
|
+
bool: True if an error is present in the response, False otherwise.
|
297
62
|
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
handle_error:
|
302
|
-
Handle errors by logging and saving details to a JSONL file.
|
303
|
-
api_endpoint_from_url:
|
304
|
-
Extract the API endpoint from a URL.
|
305
|
-
task_id_generator_function:
|
306
|
-
Generate a sequence of unique task IDs.
|
63
|
+
Examples:
|
64
|
+
if api_error(response):
|
65
|
+
# Handle the error
|
307
66
|
"""
|
67
|
+
if "error" in response_json:
|
68
|
+
logging.warning(f"API call failed with error: {response_json['error']}")
|
69
|
+
return True
|
70
|
+
return False
|
308
71
|
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
token_encoding_name: str,
|
313
|
-
max_attempts: int,
|
314
|
-
max_requests_per_minute: int,
|
315
|
-
max_tokens_per_minute: int,
|
316
|
-
ratelimiter,
|
317
|
-
status_tracker: Optional[StatusTracker] = None,
|
318
|
-
queue: Optional[AsyncQueue] = None,
|
319
|
-
) -> None:
|
320
|
-
"""
|
321
|
-
Initializes the BaseAPIService with necessary configuration.
|
322
|
-
|
323
|
-
Parameters:
|
324
|
-
api_key (str): The API key for authentication.
|
325
|
-
|
326
|
-
token_encoding_name (str): Encoding name for the API token.
|
327
|
-
|
328
|
-
max_attempts (int): Maximum number of attempts for an API call.
|
329
|
-
|
330
|
-
status_tracker (Optional[StatusTracker]): Tracker for API call statuses.
|
331
|
-
|
332
|
-
ratelimiter: Limiter for API call rates.
|
333
|
-
|
334
|
-
queue (Optional[AsyncQueue]): Queue for managing API tasks.
|
335
|
-
|
336
|
-
Example:
|
337
|
-
>>> class MyAPIService(BaseAPIService):
|
338
|
-
... # Implementation details here
|
339
|
-
...
|
340
|
-
>>> service = MyAPIService(api_key="12345", token_encoding_name="utf-8",
|
341
|
-
... max_attempts=3, status_tracker=None,
|
342
|
-
... rate_limiter=ratelimiter, queue=None)
|
343
|
-
"""
|
344
|
-
self.api_key = api_key
|
345
|
-
self.token_encoding_name = token_encoding_name
|
346
|
-
self.max_attempts = max_attempts
|
347
|
-
self.status_tracker = status_tracker or StatusTracker()
|
348
|
-
self.queue = queue or AsyncQueue()
|
349
|
-
self.rate_limiter = ratelimiter(max_requests_per_minute, max_tokens_per_minute)
|
350
|
-
self.append_to_jsonl = append_to_jsonl
|
351
|
-
|
352
|
-
@abstractmethod
|
353
|
-
async def call_api(self) -> Any:
|
354
|
-
"""
|
355
|
-
Abstract method to be implemented for making specific API calls.
|
356
|
-
|
357
|
-
This method should define the logic for interacting with an API endpoint
|
358
|
-
and must be implemented in subclasses.
|
359
|
-
|
360
|
-
Example:
|
361
|
-
>>> class MyAPIService(BaseAPIService):
|
362
|
-
... async def call_api(self):
|
363
|
-
... # Implementation details here
|
364
|
-
...
|
365
|
-
"""
|
366
|
-
|
367
|
-
...
|
368
|
-
|
369
|
-
def handle_error(
|
370
|
-
self,
|
371
|
-
error: Exception,
|
372
|
-
payload: Any,
|
373
|
-
metadata: Any,
|
374
|
-
save_filepath: str
|
375
|
-
) -> None:
|
376
|
-
"""
|
377
|
-
Handles exceptions that occur during the API call process.
|
378
|
-
|
379
|
-
Updates the status tracker to indicate the error and saves details to a JSONL file.
|
380
|
-
|
381
|
-
Parameters:
|
382
|
-
error (Exception): The exception that was raised during the API call.
|
383
|
-
|
384
|
-
payload (Any): The data payload that was used for the API call.
|
385
|
-
|
386
|
-
metadata (Any): Additional metadata related to the API call.
|
387
|
-
|
388
|
-
save_filepath (str): The file path where error details should be saved.
|
389
|
-
"""
|
390
|
-
self.status_tracker.num_tasks_in_progress -= 1
|
391
|
-
self.status_tracker.num_tasks_failed += 1
|
392
|
-
data = (
|
393
|
-
[payload, [str(error)], metadata]
|
394
|
-
if metadata
|
395
|
-
else [payload, [str(error)]]
|
396
|
-
)
|
397
|
-
self.append_to_jsonl(data, save_filepath)
|
398
|
-
logging.error(f"Request failed after all attempts. Saving errors: {data}")
|
399
|
-
|
400
|
-
@staticmethod
|
401
|
-
def api_endpoint_from_url(request_url: str) -> str:
|
402
|
-
"""
|
403
|
-
Extracts the endpoint from an API request URL.
|
404
|
-
|
405
|
-
Parameters:
|
406
|
-
request_url (str): The URL from which to extract the API endpoint.
|
407
|
-
|
408
|
-
Returns:
|
409
|
-
str: The extracted API endpoint.
|
72
|
+
def api_rate_limit_error(response_json: dict) -> bool:
|
73
|
+
"""
|
74
|
+
Checks if the API response indicates a rate limit error.
|
410
75
|
|
411
|
-
|
412
|
-
|
413
|
-
'test_endpoint'
|
414
|
-
"""
|
415
|
-
match = re.search(r"^https://[^/]+/v\d+/(.+)$", request_url)
|
416
|
-
if match:
|
417
|
-
return match.group(1)
|
418
|
-
else:
|
419
|
-
return ""
|
76
|
+
Parameters:
|
77
|
+
response_json (dict): The JSON response from the API call.
|
420
78
|
|
421
|
-
|
422
|
-
|
423
|
-
"""
|
424
|
-
Generates a continuous sequence of integers for task IDs.
|
425
|
-
|
426
|
-
Yields:
|
427
|
-
int: The next task ID in the sequence (0, 1, 2, ...).
|
79
|
+
Returns:
|
80
|
+
bool: True if the response contains a rate limit error message, False otherwise.
|
428
81
|
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
1
|
435
|
-
"""
|
436
|
-
task_id = 0
|
437
|
-
while True:
|
438
|
-
yield task_id
|
439
|
-
task_id += 1
|
82
|
+
Examples:
|
83
|
+
if rate_limit_error(response):
|
84
|
+
# Handle the rate limit error
|
85
|
+
"""
|
86
|
+
return "Rate limit" in response_json["error"].get("message", "")
|