kailash 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +31 -0
- kailash/__main__.py +11 -0
- kailash/cli/__init__.py +5 -0
- kailash/cli/commands.py +563 -0
- kailash/manifest.py +778 -0
- kailash/nodes/__init__.py +23 -0
- kailash/nodes/ai/__init__.py +26 -0
- kailash/nodes/ai/agents.py +417 -0
- kailash/nodes/ai/models.py +488 -0
- kailash/nodes/api/__init__.py +52 -0
- kailash/nodes/api/auth.py +567 -0
- kailash/nodes/api/graphql.py +480 -0
- kailash/nodes/api/http.py +598 -0
- kailash/nodes/api/rate_limiting.py +572 -0
- kailash/nodes/api/rest.py +665 -0
- kailash/nodes/base.py +1032 -0
- kailash/nodes/base_async.py +128 -0
- kailash/nodes/code/__init__.py +32 -0
- kailash/nodes/code/python.py +1021 -0
- kailash/nodes/data/__init__.py +125 -0
- kailash/nodes/data/readers.py +496 -0
- kailash/nodes/data/sharepoint_graph.py +623 -0
- kailash/nodes/data/sql.py +380 -0
- kailash/nodes/data/streaming.py +1168 -0
- kailash/nodes/data/vector_db.py +964 -0
- kailash/nodes/data/writers.py +529 -0
- kailash/nodes/logic/__init__.py +6 -0
- kailash/nodes/logic/async_operations.py +702 -0
- kailash/nodes/logic/operations.py +551 -0
- kailash/nodes/transform/__init__.py +5 -0
- kailash/nodes/transform/processors.py +379 -0
- kailash/runtime/__init__.py +6 -0
- kailash/runtime/async_local.py +356 -0
- kailash/runtime/docker.py +697 -0
- kailash/runtime/local.py +434 -0
- kailash/runtime/parallel.py +557 -0
- kailash/runtime/runner.py +110 -0
- kailash/runtime/testing.py +347 -0
- kailash/sdk_exceptions.py +307 -0
- kailash/tracking/__init__.py +7 -0
- kailash/tracking/manager.py +885 -0
- kailash/tracking/metrics_collector.py +342 -0
- kailash/tracking/models.py +535 -0
- kailash/tracking/storage/__init__.py +0 -0
- kailash/tracking/storage/base.py +113 -0
- kailash/tracking/storage/database.py +619 -0
- kailash/tracking/storage/filesystem.py +543 -0
- kailash/utils/__init__.py +0 -0
- kailash/utils/export.py +924 -0
- kailash/utils/templates.py +680 -0
- kailash/visualization/__init__.py +62 -0
- kailash/visualization/api.py +732 -0
- kailash/visualization/dashboard.py +951 -0
- kailash/visualization/performance.py +808 -0
- kailash/visualization/reports.py +1471 -0
- kailash/workflow/__init__.py +15 -0
- kailash/workflow/builder.py +245 -0
- kailash/workflow/graph.py +827 -0
- kailash/workflow/mermaid_visualizer.py +628 -0
- kailash/workflow/mock_registry.py +63 -0
- kailash/workflow/runner.py +302 -0
- kailash/workflow/state.py +238 -0
- kailash/workflow/visualization.py +588 -0
- kailash-0.1.0.dist-info/METADATA +710 -0
- kailash-0.1.0.dist-info/RECORD +69 -0
- kailash-0.1.0.dist-info/WHEEL +5 -0
- kailash-0.1.0.dist-info/entry_points.txt +2 -0
- kailash-0.1.0.dist-info/licenses/LICENSE +21 -0
- kailash-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,572 @@
|
|
1
|
+
"""Rate limiting and throttling utilities for API requests in the Kailash SDK.
|
2
|
+
|
3
|
+
This module provides rate limiting capabilities to prevent API abuse and respect
|
4
|
+
service limits. It supports various rate limiting strategies including token bucket,
|
5
|
+
sliding window, and fixed window approaches.
|
6
|
+
|
7
|
+
Key Components:
|
8
|
+
- RateLimiter: Base rate limiter interface
|
9
|
+
- TokenBucketRateLimiter: Token bucket algorithm implementation
|
10
|
+
- SlidingWindowRateLimiter: Sliding window algorithm implementation
|
11
|
+
- RateLimitedAPINode: Wrapper node that adds rate limiting to any API node
|
12
|
+
"""
|
13
|
+
|
14
|
+
import asyncio
|
15
|
+
import threading
|
16
|
+
import time
|
17
|
+
from abc import ABC, abstractmethod
|
18
|
+
from collections import deque
|
19
|
+
from dataclasses import dataclass
|
20
|
+
from typing import Any, Dict, Optional
|
21
|
+
|
22
|
+
from kailash.nodes.base import Node, NodeParameter, register_node
|
23
|
+
from kailash.nodes.base_async import AsyncNode
|
24
|
+
from kailash.sdk_exceptions import NodeExecutionError
|
25
|
+
|
26
|
+
|
27
|
+
@dataclass
|
28
|
+
class RateLimitConfig:
|
29
|
+
"""Configuration for rate limiting behavior.
|
30
|
+
|
31
|
+
This class defines the parameters for various rate limiting strategies.
|
32
|
+
"""
|
33
|
+
|
34
|
+
max_requests: int = 100 # Maximum requests allowed
|
35
|
+
time_window: float = 60.0 # Time window in seconds
|
36
|
+
strategy: str = "token_bucket" # Rate limiting strategy
|
37
|
+
burst_limit: Optional[int] = None # Maximum burst requests (for token bucket)
|
38
|
+
backoff_factor: float = 1.0 # Backoff factor when rate limited
|
39
|
+
max_backoff: float = 300.0 # Maximum backoff time in seconds
|
40
|
+
|
41
|
+
|
42
|
+
class RateLimiter(ABC):
|
43
|
+
"""Abstract base class for rate limiters.
|
44
|
+
|
45
|
+
This class defines the interface that all rate limiters must implement.
|
46
|
+
"""
|
47
|
+
|
48
|
+
def __init__(self, config: RateLimitConfig):
|
49
|
+
"""Initialize the rate limiter.
|
50
|
+
|
51
|
+
Args:
|
52
|
+
config: Rate limiting configuration
|
53
|
+
"""
|
54
|
+
self.config = config
|
55
|
+
self._lock = threading.Lock()
|
56
|
+
|
57
|
+
@abstractmethod
|
58
|
+
def can_proceed(self) -> bool:
|
59
|
+
"""Check if a request can proceed without hitting rate limits.
|
60
|
+
|
61
|
+
Returns:
|
62
|
+
True if request can proceed, False if rate limited
|
63
|
+
"""
|
64
|
+
pass
|
65
|
+
|
66
|
+
@abstractmethod
|
67
|
+
def wait_time(self) -> float:
|
68
|
+
"""Get the time to wait before the next request can proceed.
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
Wait time in seconds (0 if can proceed immediately)
|
72
|
+
"""
|
73
|
+
pass
|
74
|
+
|
75
|
+
@abstractmethod
|
76
|
+
def consume(self) -> bool:
|
77
|
+
"""Consume a rate limit token for a request.
|
78
|
+
|
79
|
+
Returns:
|
80
|
+
True if token was consumed, False if rate limited
|
81
|
+
"""
|
82
|
+
pass
|
83
|
+
|
84
|
+
@abstractmethod
|
85
|
+
def reset(self) -> None:
|
86
|
+
"""Reset the rate limiter state."""
|
87
|
+
pass
|
88
|
+
|
89
|
+
|
90
|
+
class TokenBucketRateLimiter(RateLimiter):
|
91
|
+
"""Token bucket rate limiter implementation.
|
92
|
+
|
93
|
+
This rate limiter uses the token bucket algorithm, which allows for burst
|
94
|
+
requests up to the bucket capacity while maintaining a steady refill rate.
|
95
|
+
|
96
|
+
Design Purpose:
|
97
|
+
- Allow burst requests when quota is available
|
98
|
+
- Smooth out request rate over time
|
99
|
+
- Simple and efficient implementation
|
100
|
+
"""
|
101
|
+
|
102
|
+
def __init__(self, config: RateLimitConfig):
|
103
|
+
"""Initialize the token bucket rate limiter.
|
104
|
+
|
105
|
+
Args:
|
106
|
+
config: Rate limiting configuration
|
107
|
+
"""
|
108
|
+
super().__init__(config)
|
109
|
+
self.bucket_size = config.burst_limit or config.max_requests
|
110
|
+
self.tokens = float(self.bucket_size)
|
111
|
+
self.refill_rate = config.max_requests / config.time_window
|
112
|
+
self.last_refill = time.time()
|
113
|
+
|
114
|
+
def _refill_tokens(self) -> None:
|
115
|
+
"""Refill tokens based on elapsed time."""
|
116
|
+
now = time.time()
|
117
|
+
elapsed = now - self.last_refill
|
118
|
+
|
119
|
+
# Add tokens based on elapsed time and refill rate
|
120
|
+
tokens_to_add = elapsed * self.refill_rate
|
121
|
+
self.tokens = min(self.bucket_size, self.tokens + tokens_to_add)
|
122
|
+
self.last_refill = now
|
123
|
+
|
124
|
+
def can_proceed(self) -> bool:
|
125
|
+
"""Check if a request can proceed without hitting rate limits.
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
True if request can proceed, False if rate limited
|
129
|
+
"""
|
130
|
+
with self._lock:
|
131
|
+
self._refill_tokens()
|
132
|
+
return self.tokens >= 1.0
|
133
|
+
|
134
|
+
def wait_time(self) -> float:
|
135
|
+
"""Get the time to wait before the next request can proceed.
|
136
|
+
|
137
|
+
Returns:
|
138
|
+
Wait time in seconds (0 if can proceed immediately)
|
139
|
+
"""
|
140
|
+
with self._lock:
|
141
|
+
self._refill_tokens()
|
142
|
+
if self.tokens >= 1.0:
|
143
|
+
return 0.0
|
144
|
+
|
145
|
+
# Calculate time needed to get one token
|
146
|
+
tokens_needed = 1.0 - self.tokens
|
147
|
+
return tokens_needed / self.refill_rate
|
148
|
+
|
149
|
+
def consume(self) -> bool:
|
150
|
+
"""Consume a rate limit token for a request.
|
151
|
+
|
152
|
+
Returns:
|
153
|
+
True if token was consumed, False if rate limited
|
154
|
+
"""
|
155
|
+
with self._lock:
|
156
|
+
self._refill_tokens()
|
157
|
+
if self.tokens >= 1.0:
|
158
|
+
self.tokens -= 1.0
|
159
|
+
return True
|
160
|
+
return False
|
161
|
+
|
162
|
+
def reset(self) -> None:
|
163
|
+
"""Reset the rate limiter state."""
|
164
|
+
with self._lock:
|
165
|
+
self.tokens = float(self.bucket_size)
|
166
|
+
self.last_refill = time.time()
|
167
|
+
|
168
|
+
|
169
|
+
class SlidingWindowRateLimiter(RateLimiter):
|
170
|
+
"""Sliding window rate limiter implementation.
|
171
|
+
|
172
|
+
This rate limiter tracks request timestamps in a sliding window and
|
173
|
+
enforces the rate limit based on the number of requests in the current window.
|
174
|
+
|
175
|
+
Design Purpose:
|
176
|
+
- More accurate rate limiting than fixed windows
|
177
|
+
- Prevents sudden bursts at window boundaries
|
178
|
+
- Memory usage grows with request rate
|
179
|
+
"""
|
180
|
+
|
181
|
+
def __init__(self, config: RateLimitConfig):
|
182
|
+
"""Initialize the sliding window rate limiter.
|
183
|
+
|
184
|
+
Args:
|
185
|
+
config: Rate limiting configuration
|
186
|
+
"""
|
187
|
+
super().__init__(config)
|
188
|
+
self.requests: deque = deque()
|
189
|
+
|
190
|
+
def _cleanup_old_requests(self) -> None:
|
191
|
+
"""Remove requests outside the current time window."""
|
192
|
+
now = time.time()
|
193
|
+
cutoff = now - self.config.time_window
|
194
|
+
|
195
|
+
while self.requests and self.requests[0] < cutoff:
|
196
|
+
self.requests.popleft()
|
197
|
+
|
198
|
+
def can_proceed(self) -> bool:
|
199
|
+
"""Check if a request can proceed without hitting rate limits.
|
200
|
+
|
201
|
+
Returns:
|
202
|
+
True if request can proceed, False if rate limited
|
203
|
+
"""
|
204
|
+
with self._lock:
|
205
|
+
self._cleanup_old_requests()
|
206
|
+
return len(self.requests) < self.config.max_requests
|
207
|
+
|
208
|
+
def wait_time(self) -> float:
|
209
|
+
"""Get the time to wait before the next request can proceed.
|
210
|
+
|
211
|
+
Returns:
|
212
|
+
Wait time in seconds (0 if can proceed immediately)
|
213
|
+
"""
|
214
|
+
with self._lock:
|
215
|
+
self._cleanup_old_requests()
|
216
|
+
if len(self.requests) < self.config.max_requests:
|
217
|
+
return 0.0
|
218
|
+
|
219
|
+
# Wait until the oldest request falls outside the window
|
220
|
+
oldest_request = self.requests[0]
|
221
|
+
return oldest_request + self.config.time_window - time.time()
|
222
|
+
|
223
|
+
def consume(self) -> bool:
|
224
|
+
"""Consume a rate limit token for a request.
|
225
|
+
|
226
|
+
Returns:
|
227
|
+
True if token was consumed, False if rate limited
|
228
|
+
"""
|
229
|
+
with self._lock:
|
230
|
+
self._cleanup_old_requests()
|
231
|
+
if len(self.requests) < self.config.max_requests:
|
232
|
+
self.requests.append(time.time())
|
233
|
+
return True
|
234
|
+
return False
|
235
|
+
|
236
|
+
def reset(self) -> None:
|
237
|
+
"""Reset the rate limiter state."""
|
238
|
+
with self._lock:
|
239
|
+
self.requests.clear()
|
240
|
+
|
241
|
+
|
242
|
+
def create_rate_limiter(config: RateLimitConfig) -> RateLimiter:
|
243
|
+
"""Factory function to create rate limiters.
|
244
|
+
|
245
|
+
Args:
|
246
|
+
config: Rate limiting configuration
|
247
|
+
|
248
|
+
Returns:
|
249
|
+
Configured rate limiter instance
|
250
|
+
|
251
|
+
Raises:
|
252
|
+
ValueError: If strategy is not supported
|
253
|
+
"""
|
254
|
+
if config.strategy == "token_bucket":
|
255
|
+
return TokenBucketRateLimiter(config)
|
256
|
+
elif config.strategy == "sliding_window":
|
257
|
+
return SlidingWindowRateLimiter(config)
|
258
|
+
else:
|
259
|
+
raise ValueError(f"Unsupported rate limiting strategy: {config.strategy}")
|
260
|
+
|
261
|
+
|
262
|
+
@register_node(alias="RateLimitedAPI")
|
263
|
+
class RateLimitedAPINode(Node):
|
264
|
+
"""Wrapper node that adds rate limiting to any API node.
|
265
|
+
|
266
|
+
This node wraps any other API node and applies rate limiting to its execution.
|
267
|
+
It can be used to ensure API calls respect service limits and prevent abuse.
|
268
|
+
|
269
|
+
Design Purpose:
|
270
|
+
- Add rate limiting to existing API nodes without modification
|
271
|
+
- Support multiple rate limiting strategies
|
272
|
+
- Provide automatic backoff and retry with rate limiting
|
273
|
+
- Enable configuration-driven rate limiting policies
|
274
|
+
|
275
|
+
Upstream Usage:
|
276
|
+
- Workflow: Creates and configures with rate limiting policies
|
277
|
+
- API integration workflows: Wraps other API nodes
|
278
|
+
|
279
|
+
Downstream Consumers:
|
280
|
+
- Same as the wrapped node
|
281
|
+
"""
|
282
|
+
|
283
|
+
def __init__(
|
284
|
+
self, wrapped_node: Node, rate_limit_config: RateLimitConfig, **kwargs
|
285
|
+
):
|
286
|
+
"""Initialize the rate limited API node.
|
287
|
+
|
288
|
+
Args:
|
289
|
+
wrapped_node: The node to wrap with rate limiting
|
290
|
+
rate_limit_config: Rate limiting configuration
|
291
|
+
**kwargs: Additional parameters passed to base Node
|
292
|
+
"""
|
293
|
+
super().__init__(**kwargs)
|
294
|
+
self.wrapped_node = wrapped_node
|
295
|
+
self.rate_limiter = create_rate_limiter(rate_limit_config)
|
296
|
+
self.config = rate_limit_config
|
297
|
+
|
298
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
299
|
+
"""Define the parameters this node accepts.
|
300
|
+
|
301
|
+
Returns:
|
302
|
+
Dictionary of parameter definitions (same as wrapped node)
|
303
|
+
"""
|
304
|
+
# Return the same parameters as the wrapped node
|
305
|
+
params = self.wrapped_node.get_parameters().copy()
|
306
|
+
|
307
|
+
# Add rate limiting specific parameters
|
308
|
+
params.update(
|
309
|
+
{
|
310
|
+
"respect_rate_limits": NodeParameter(
|
311
|
+
name="respect_rate_limits",
|
312
|
+
type=bool,
|
313
|
+
required=False,
|
314
|
+
default=True,
|
315
|
+
description="Whether to respect rate limits (if False, acts as passthrough)",
|
316
|
+
),
|
317
|
+
"wait_on_rate_limit": NodeParameter(
|
318
|
+
name="wait_on_rate_limit",
|
319
|
+
type=bool,
|
320
|
+
required=False,
|
321
|
+
default=True,
|
322
|
+
description="Whether to wait when rate limited (if False, raises exception)",
|
323
|
+
),
|
324
|
+
}
|
325
|
+
)
|
326
|
+
|
327
|
+
return params
|
328
|
+
|
329
|
+
def get_output_schema(self) -> Dict[str, NodeParameter]:
|
330
|
+
"""Define the output schema for this node.
|
331
|
+
|
332
|
+
Returns:
|
333
|
+
Dictionary of output parameter definitions (same as wrapped node)
|
334
|
+
"""
|
335
|
+
# Return the same output schema as the wrapped node
|
336
|
+
schema = self.wrapped_node.get_output_schema().copy()
|
337
|
+
|
338
|
+
# Add rate limiting metadata
|
339
|
+
schema["rate_limit_metadata"] = NodeParameter(
|
340
|
+
name="rate_limit_metadata",
|
341
|
+
type=dict,
|
342
|
+
required=False,
|
343
|
+
description="Rate limiting metadata and statistics",
|
344
|
+
)
|
345
|
+
|
346
|
+
return schema
|
347
|
+
|
348
|
+
def run(self, **kwargs) -> Dict[str, Any]:
|
349
|
+
"""Execute the wrapped node with rate limiting.
|
350
|
+
|
351
|
+
Args:
|
352
|
+
respect_rate_limits (bool, optional): Whether to respect rate limits
|
353
|
+
wait_on_rate_limit (bool, optional): Whether to wait when rate limited
|
354
|
+
**kwargs: Parameters passed to the wrapped node
|
355
|
+
|
356
|
+
Returns:
|
357
|
+
Dictionary containing the wrapped node's output plus rate limiting metadata
|
358
|
+
|
359
|
+
Raises:
|
360
|
+
NodeExecutionError: If rate limited and wait_on_rate_limit is False
|
361
|
+
"""
|
362
|
+
respect_rate_limits = kwargs.pop("respect_rate_limits", True)
|
363
|
+
wait_on_rate_limit = kwargs.pop("wait_on_rate_limit", True)
|
364
|
+
|
365
|
+
# If rate limiting is disabled, just pass through
|
366
|
+
if not respect_rate_limits:
|
367
|
+
result = self.wrapped_node.run(**kwargs)
|
368
|
+
result["rate_limit_metadata"] = {"rate_limiting_active": False}
|
369
|
+
return result
|
370
|
+
|
371
|
+
# Check rate limits
|
372
|
+
attempts = 0
|
373
|
+
max_attempts = 5
|
374
|
+
total_wait_time = 0.0
|
375
|
+
|
376
|
+
while attempts < max_attempts:
|
377
|
+
if self.rate_limiter.can_proceed():
|
378
|
+
# Consume rate limit token and execute
|
379
|
+
if self.rate_limiter.consume():
|
380
|
+
start_time = time.time()
|
381
|
+
try:
|
382
|
+
result = self.wrapped_node.run(**kwargs)
|
383
|
+
execution_time = time.time() - start_time
|
384
|
+
|
385
|
+
# Add rate limiting metadata
|
386
|
+
result["rate_limit_metadata"] = {
|
387
|
+
"rate_limiting_active": True,
|
388
|
+
"attempts": attempts + 1,
|
389
|
+
"total_wait_time": total_wait_time,
|
390
|
+
"execution_time": execution_time,
|
391
|
+
"rate_limit_strategy": self.config.strategy,
|
392
|
+
}
|
393
|
+
|
394
|
+
return result
|
395
|
+
|
396
|
+
except Exception:
|
397
|
+
# If the wrapped node fails, we still consumed a token
|
398
|
+
# but we don't want to count this against rate limits
|
399
|
+
raise
|
400
|
+
|
401
|
+
# Rate limited - decide what to do
|
402
|
+
if not wait_on_rate_limit:
|
403
|
+
raise NodeExecutionError(
|
404
|
+
f"Rate limited: too many requests. "
|
405
|
+
f"Max {self.config.max_requests} requests per {self.config.time_window}s"
|
406
|
+
)
|
407
|
+
|
408
|
+
# Calculate wait time with backoff
|
409
|
+
wait_time = self.rate_limiter.wait_time()
|
410
|
+
if attempts > 0:
|
411
|
+
wait_time *= self.config.backoff_factor**attempts
|
412
|
+
|
413
|
+
wait_time = min(wait_time, self.config.max_backoff)
|
414
|
+
|
415
|
+
self.logger.info(
|
416
|
+
f"Rate limited, waiting {wait_time:.2f}s before retry (attempt {attempts + 1})"
|
417
|
+
)
|
418
|
+
|
419
|
+
time.sleep(wait_time)
|
420
|
+
total_wait_time += wait_time
|
421
|
+
attempts += 1
|
422
|
+
|
423
|
+
# If we've exhausted all attempts, raise an error
|
424
|
+
raise NodeExecutionError(
|
425
|
+
f"Rate limited after {max_attempts} attempts. "
|
426
|
+
f"Total wait time: {total_wait_time:.2f}s"
|
427
|
+
)
|
428
|
+
|
429
|
+
|
430
|
+
@register_node(alias="AsyncRateLimitedAPI")
|
431
|
+
class AsyncRateLimitedAPINode(AsyncNode):
|
432
|
+
"""Asynchronous wrapper node that adds rate limiting to any async API node.
|
433
|
+
|
434
|
+
This node provides the same functionality as RateLimitedAPINode but for
|
435
|
+
asynchronous execution. It uses async/await for non-blocking operation.
|
436
|
+
|
437
|
+
Design Purpose:
|
438
|
+
- Add rate limiting to async API nodes without blocking the event loop
|
439
|
+
- Support high-concurrency scenarios with rate limiting
|
440
|
+
- Provide the same interface as RateLimitedAPINode but with async execution
|
441
|
+
|
442
|
+
Upstream Usage:
|
443
|
+
- AsyncLocalRuntime: Executes workflow with async support
|
444
|
+
- Async API integration workflows: Wraps other async API nodes
|
445
|
+
|
446
|
+
Downstream Consumers:
|
447
|
+
- Same as the wrapped async node
|
448
|
+
"""
|
449
|
+
|
450
|
+
def __init__(
|
451
|
+
self, wrapped_node: AsyncNode, rate_limit_config: RateLimitConfig, **kwargs
|
452
|
+
):
|
453
|
+
"""Initialize the async rate limited API node.
|
454
|
+
|
455
|
+
Args:
|
456
|
+
wrapped_node: The async node to wrap with rate limiting
|
457
|
+
rate_limit_config: Rate limiting configuration
|
458
|
+
**kwargs: Additional parameters passed to base AsyncNode
|
459
|
+
"""
|
460
|
+
super().__init__(**kwargs)
|
461
|
+
self.wrapped_node = wrapped_node
|
462
|
+
self.rate_limiter = create_rate_limiter(rate_limit_config)
|
463
|
+
self.config = rate_limit_config
|
464
|
+
self.sync_node = RateLimitedAPINode(wrapped_node, rate_limit_config, **kwargs)
|
465
|
+
|
466
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
467
|
+
"""Define the parameters this node accepts.
|
468
|
+
|
469
|
+
Returns:
|
470
|
+
Dictionary of parameter definitions
|
471
|
+
"""
|
472
|
+
return self.sync_node.get_parameters()
|
473
|
+
|
474
|
+
def get_output_schema(self) -> Dict[str, NodeParameter]:
|
475
|
+
"""Define the output schema for this node.
|
476
|
+
|
477
|
+
Returns:
|
478
|
+
Dictionary of output parameter definitions
|
479
|
+
"""
|
480
|
+
return self.sync_node.get_output_schema()
|
481
|
+
|
482
|
+
def run(self, **kwargs) -> Dict[str, Any]:
|
483
|
+
"""Synchronous version for compatibility.
|
484
|
+
|
485
|
+
Args:
|
486
|
+
**kwargs: Parameters for the wrapped node
|
487
|
+
|
488
|
+
Returns:
|
489
|
+
Same as RateLimitedAPINode.run()
|
490
|
+
"""
|
491
|
+
return self.sync_node.run(**kwargs)
|
492
|
+
|
493
|
+
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
494
|
+
"""Execute the wrapped async node with rate limiting.
|
495
|
+
|
496
|
+
Args:
|
497
|
+
respect_rate_limits (bool, optional): Whether to respect rate limits
|
498
|
+
wait_on_rate_limit (bool, optional): Whether to wait when rate limited
|
499
|
+
**kwargs: Parameters passed to the wrapped node
|
500
|
+
|
501
|
+
Returns:
|
502
|
+
Dictionary containing the wrapped node's output plus rate limiting metadata
|
503
|
+
|
504
|
+
Raises:
|
505
|
+
NodeExecutionError: If rate limited and wait_on_rate_limit is False
|
506
|
+
"""
|
507
|
+
respect_rate_limits = kwargs.pop("respect_rate_limits", True)
|
508
|
+
wait_on_rate_limit = kwargs.pop("wait_on_rate_limit", True)
|
509
|
+
|
510
|
+
# If rate limiting is disabled, just pass through
|
511
|
+
if not respect_rate_limits:
|
512
|
+
result = await self.wrapped_node.async_run(**kwargs)
|
513
|
+
result["rate_limit_metadata"] = {"rate_limiting_active": False}
|
514
|
+
return result
|
515
|
+
|
516
|
+
# Check rate limits
|
517
|
+
attempts = 0
|
518
|
+
max_attempts = 5
|
519
|
+
total_wait_time = 0.0
|
520
|
+
|
521
|
+
while attempts < max_attempts:
|
522
|
+
if self.rate_limiter.can_proceed():
|
523
|
+
# Consume rate limit token and execute
|
524
|
+
if self.rate_limiter.consume():
|
525
|
+
start_time = time.time()
|
526
|
+
try:
|
527
|
+
result = await self.wrapped_node.async_run(**kwargs)
|
528
|
+
execution_time = time.time() - start_time
|
529
|
+
|
530
|
+
# Add rate limiting metadata
|
531
|
+
result["rate_limit_metadata"] = {
|
532
|
+
"rate_limiting_active": True,
|
533
|
+
"attempts": attempts + 1,
|
534
|
+
"total_wait_time": total_wait_time,
|
535
|
+
"execution_time": execution_time,
|
536
|
+
"rate_limit_strategy": self.config.strategy,
|
537
|
+
}
|
538
|
+
|
539
|
+
return result
|
540
|
+
|
541
|
+
except Exception:
|
542
|
+
# If the wrapped node fails, we still consumed a token
|
543
|
+
# but we don't want to count this against rate limits
|
544
|
+
raise
|
545
|
+
|
546
|
+
# Rate limited - decide what to do
|
547
|
+
if not wait_on_rate_limit:
|
548
|
+
raise NodeExecutionError(
|
549
|
+
f"Rate limited: too many requests. "
|
550
|
+
f"Max {self.config.max_requests} requests per {self.config.time_window}s"
|
551
|
+
)
|
552
|
+
|
553
|
+
# Calculate wait time with backoff
|
554
|
+
wait_time = self.rate_limiter.wait_time()
|
555
|
+
if attempts > 0:
|
556
|
+
wait_time *= self.config.backoff_factor**attempts
|
557
|
+
|
558
|
+
wait_time = min(wait_time, self.config.max_backoff)
|
559
|
+
|
560
|
+
self.logger.info(
|
561
|
+
f"Rate limited, waiting {wait_time:.2f}s before retry (attempt {attempts + 1})"
|
562
|
+
)
|
563
|
+
|
564
|
+
await asyncio.sleep(wait_time)
|
565
|
+
total_wait_time += wait_time
|
566
|
+
attempts += 1
|
567
|
+
|
568
|
+
# If we've exhausted all attempts, raise an error
|
569
|
+
raise NodeExecutionError(
|
570
|
+
f"Rate limited after {max_attempts} attempts. "
|
571
|
+
f"Total wait time: {total_wait_time:.2f}s"
|
572
|
+
)
|