lionagi 0.14.3__py3-none-any.whl → 0.14.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/adapters/async_postgres_adapter.py +172 -0
- lionagi/libs/concurrency/__init__.py +25 -1
- lionagi/libs/concurrency/patterns.py +145 -138
- lionagi/libs/concurrency/primitives.py +145 -97
- lionagi/libs/concurrency/resource_tracker.py +182 -0
- lionagi/libs/concurrency/task.py +4 -2
- lionagi/protocols/generic/pile.py +16 -38
- lionagi/protocols/generic/processor.py +53 -26
- lionagi/service/rate_limited_processor.py +53 -35
- lionagi/session/branch.py +0 -5
- lionagi/utils.py +56 -174
- lionagi/version.py +1 -1
- {lionagi-0.14.3.dist-info → lionagi-0.14.5.dist-info}/METADATA +7 -3
- {lionagi-0.14.3.dist-info → lionagi-0.14.5.dist-info}/RECORD +16 -15
- {lionagi-0.14.3.dist-info → lionagi-0.14.5.dist-info}/WHEEL +0 -0
- {lionagi-0.14.3.dist-info → lionagi-0.14.5.dist-info}/licenses/LICENSE +0 -0
@@ -5,7 +5,6 @@
|
|
5
5
|
from __future__ import annotations
|
6
6
|
|
7
7
|
import asyncio
|
8
|
-
import threading
|
9
8
|
from collections import deque
|
10
9
|
from collections.abc import (
|
11
10
|
AsyncIterator,
|
@@ -25,6 +24,7 @@ from pydapter import Adaptable, AsyncAdaptable
|
|
25
24
|
from typing_extensions import Self, override
|
26
25
|
|
27
26
|
from lionagi._errors import ItemExistsError, ItemNotFoundError
|
27
|
+
from lionagi.libs.concurrency import Lock as ConcurrencyLock
|
28
28
|
from lionagi.utils import UNDEFINED, is_same_dtype, to_list
|
29
29
|
|
30
30
|
from .._concepts import Observable
|
@@ -38,19 +38,10 @@ T = TypeVar("T", bound=E)
|
|
38
38
|
__all__ = ("Pile",)
|
39
39
|
|
40
40
|
|
41
|
-
def synchronized(func: Callable):
|
42
|
-
@wraps(func)
|
43
|
-
def wrapper(self: Pile, *args, **kwargs):
|
44
|
-
with self.lock:
|
45
|
-
return func(self, *args, **kwargs)
|
46
|
-
|
47
|
-
return wrapper
|
48
|
-
|
49
|
-
|
50
41
|
def async_synchronized(func: Callable):
|
51
42
|
@wraps(func)
|
52
43
|
async def wrapper(self: Pile, *args, **kwargs):
|
53
|
-
async with self.
|
44
|
+
async with self.lock:
|
54
45
|
return await func(self, *args, **kwargs)
|
55
46
|
|
56
47
|
return wrapper
|
@@ -91,8 +82,7 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
91
82
|
|
92
83
|
def __pydantic_extra__(self) -> dict[str, FieldInfo]:
|
93
84
|
return {
|
94
|
-
"_lock": Field(default_factory=
|
95
|
-
"_async": Field(default_factory=asyncio.Lock),
|
85
|
+
"_lock": Field(default_factory=ConcurrencyLock),
|
96
86
|
}
|
97
87
|
|
98
88
|
def __pydantic_private__(self) -> dict[str, FieldInfo]:
|
@@ -172,7 +162,6 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
172
162
|
"""
|
173
163
|
self._setitem(key, item)
|
174
164
|
|
175
|
-
@synchronized
|
176
165
|
def pop(
|
177
166
|
self,
|
178
167
|
key: ID.Ref | ID.RefSeq | int | slice,
|
@@ -235,7 +224,6 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
235
224
|
"""
|
236
225
|
self._exclude(item)
|
237
226
|
|
238
|
-
@synchronized
|
239
227
|
def clear(self) -> None:
|
240
228
|
"""Remove all items."""
|
241
229
|
self._clear()
|
@@ -255,7 +243,6 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
255
243
|
"""
|
256
244
|
self._update(other)
|
257
245
|
|
258
|
-
@synchronized
|
259
246
|
def insert(self, index: int, item: T, /) -> None:
|
260
247
|
"""Insert item at position.
|
261
248
|
|
@@ -269,7 +256,6 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
269
256
|
"""
|
270
257
|
self._insert(index, item)
|
271
258
|
|
272
|
-
@synchronized
|
273
259
|
def append(self, item: T, /) -> None:
|
274
260
|
"""Append item to end (alias for include).
|
275
261
|
|
@@ -281,7 +267,6 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
281
267
|
"""
|
282
268
|
self.update(item)
|
283
269
|
|
284
|
-
@synchronized
|
285
270
|
def get(
|
286
271
|
self,
|
287
272
|
key: ID.Ref | ID.RefSeq | int | slice,
|
@@ -321,11 +306,12 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
321
306
|
|
322
307
|
def __iter__(self) -> Iterator[T]:
|
323
308
|
"""Iterate over items safely."""
|
324
|
-
|
325
|
-
|
309
|
+
# Take a snapshot of the current order to avoid holding lock during iteration
|
310
|
+
current_order = list(self.progression)
|
326
311
|
|
327
312
|
for key in current_order:
|
328
|
-
|
313
|
+
if key in self.collections:
|
314
|
+
yield self.collections[key]
|
329
315
|
|
330
316
|
def __next__(self) -> T:
|
331
317
|
"""Get next item."""
|
@@ -478,29 +464,20 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
478
464
|
"""Prepare for pickling."""
|
479
465
|
state = self.__dict__.copy()
|
480
466
|
state["_lock"] = None
|
481
|
-
state["_async_lock"] = None
|
482
467
|
return state
|
483
468
|
|
484
469
|
def __setstate__(self, state):
|
485
470
|
"""Restore after unpickling."""
|
486
471
|
self.__dict__.update(state)
|
487
|
-
self._lock =
|
488
|
-
self._async_lock = asyncio.Lock()
|
472
|
+
self._lock = ConcurrencyLock()
|
489
473
|
|
490
474
|
@property
|
491
475
|
def lock(self):
|
492
|
-
"""
|
476
|
+
"""Unified concurrency lock for both sync and async operations."""
|
493
477
|
if not hasattr(self, "_lock") or self._lock is None:
|
494
|
-
self._lock =
|
478
|
+
self._lock = ConcurrencyLock()
|
495
479
|
return self._lock
|
496
480
|
|
497
|
-
@property
|
498
|
-
def async_lock(self):
|
499
|
-
"""Async lock."""
|
500
|
-
if not hasattr(self, "_async_lock") or self._async_lock is None:
|
501
|
-
self._async_lock = asyncio.Lock()
|
502
|
-
return self._async_lock
|
503
|
-
|
504
481
|
# Async Interface methods
|
505
482
|
@async_synchronized
|
506
483
|
async def asetitem(
|
@@ -577,12 +554,13 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
577
554
|
|
578
555
|
async def __aiter__(self) -> AsyncIterator[T]:
|
579
556
|
"""Async iterate over items."""
|
580
|
-
async with self.
|
557
|
+
async with self.lock:
|
581
558
|
current_order = list(self.progression)
|
582
559
|
|
583
560
|
for key in current_order:
|
584
|
-
|
585
|
-
|
561
|
+
if key in self.collections:
|
562
|
+
yield self.collections[key]
|
563
|
+
await asyncio.sleep(0) # Yield control to the event loop
|
586
564
|
|
587
565
|
async def __anext__(self) -> T:
|
588
566
|
"""Async get next item."""
|
@@ -915,7 +893,7 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
915
893
|
|
916
894
|
async def __aenter__(self) -> Self:
|
917
895
|
"""Enter async context."""
|
918
|
-
await self.
|
896
|
+
await self.lock.__aenter__()
|
919
897
|
return self
|
920
898
|
|
921
899
|
async def __aexit__(
|
@@ -925,7 +903,7 @@ class Pile(Element, Collective[E], Generic[E], Adaptable, AsyncAdaptable):
|
|
925
903
|
exc_tb: Any,
|
926
904
|
) -> None:
|
927
905
|
"""Exit async context."""
|
928
|
-
self.
|
906
|
+
await self.lock.__aexit__(exc_type, exc_val, exc_tb)
|
929
907
|
|
930
908
|
def is_homogenous(self) -> bool:
|
931
909
|
"""Check if all items are same type."""
|
@@ -5,6 +5,9 @@
|
|
5
5
|
import asyncio
|
6
6
|
from typing import Any, ClassVar
|
7
7
|
|
8
|
+
from lionagi.libs.concurrency import Event as ConcurrencyEvent
|
9
|
+
from lionagi.libs.concurrency import Semaphore, create_task_group
|
10
|
+
|
8
11
|
from .._concepts import Observer
|
9
12
|
from .element import ID
|
10
13
|
from .event import Event, EventStatus
|
@@ -56,9 +59,9 @@ class Processor(Observer):
|
|
56
59
|
self.queue = asyncio.Queue()
|
57
60
|
self._available_capacity = queue_capacity
|
58
61
|
self._execution_mode = False
|
59
|
-
self._stop_event =
|
62
|
+
self._stop_event = ConcurrencyEvent()
|
60
63
|
if concurrency_limit:
|
61
|
-
self._concurrency_sem =
|
64
|
+
self._concurrency_sem = Semaphore(concurrency_limit)
|
62
65
|
else:
|
63
66
|
self._concurrency_sem = None
|
64
67
|
|
@@ -106,7 +109,9 @@ class Processor(Observer):
|
|
106
109
|
|
107
110
|
async def start(self) -> None:
|
108
111
|
"""Clears the stop signal, allowing event processing to resume."""
|
109
|
-
|
112
|
+
# Create a new event since ConcurrencyEvent doesn't have clear()
|
113
|
+
if self._stop_event.is_set():
|
114
|
+
self._stop_event = ConcurrencyEvent()
|
110
115
|
|
111
116
|
def is_stopped(self) -> bool:
|
112
117
|
"""Checks whether the processor is in a stopped state.
|
@@ -136,30 +141,52 @@ class Processor(Observer):
|
|
136
141
|
for tasks to complete. Resets capacity afterward if any events
|
137
142
|
were processed.
|
138
143
|
"""
|
139
|
-
tasks = set()
|
140
144
|
prev_event: Event | None = None
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
if await self.request_permission(**next_event.request):
|
152
|
-
if next_event.streaming:
|
153
|
-
task = asyncio.create_task(next_event.stream())
|
145
|
+
events_processed = 0
|
146
|
+
|
147
|
+
async with create_task_group() as tg:
|
148
|
+
while self.available_capacity > 0 and not self.queue.empty():
|
149
|
+
next_event = None
|
150
|
+
if prev_event and prev_event.status == EventStatus.PENDING:
|
151
|
+
# Wait if previous event is still pending
|
152
|
+
await asyncio.sleep(self.capacity_refresh_time)
|
153
|
+
next_event = prev_event
|
154
154
|
else:
|
155
|
-
|
156
|
-
|
155
|
+
next_event = await self.dequeue()
|
156
|
+
|
157
|
+
if await self.request_permission(**next_event.request):
|
158
|
+
if next_event.streaming:
|
159
|
+
# For streaming, we need to consume the async generator
|
160
|
+
async def consume_stream(event):
|
161
|
+
async for _ in event.stream():
|
162
|
+
pass
|
163
|
+
|
164
|
+
if self._concurrency_sem:
|
165
|
+
|
166
|
+
async def stream_with_sem(event):
|
167
|
+
async with self._concurrency_sem:
|
168
|
+
await consume_stream(event)
|
169
|
+
|
170
|
+
await tg.start_soon(stream_with_sem, next_event)
|
171
|
+
else:
|
172
|
+
await tg.start_soon(consume_stream, next_event)
|
173
|
+
else:
|
174
|
+
# For non-streaming, just invoke
|
175
|
+
if self._concurrency_sem:
|
176
|
+
|
177
|
+
async def invoke_with_sem(event):
|
178
|
+
async with self._concurrency_sem:
|
179
|
+
await event.invoke()
|
180
|
+
|
181
|
+
await tg.start_soon(invoke_with_sem, next_event)
|
182
|
+
else:
|
183
|
+
await tg.start_soon(next_event.invoke)
|
184
|
+
events_processed += 1
|
157
185
|
|
158
|
-
|
159
|
-
|
186
|
+
prev_event = next_event
|
187
|
+
self._available_capacity -= 1
|
160
188
|
|
161
|
-
if
|
162
|
-
await asyncio.wait(tasks)
|
189
|
+
if events_processed > 0:
|
163
190
|
self.available_capacity = self.queue_capacity
|
164
191
|
|
165
192
|
async def request_permission(self, **kwargs: Any) -> bool:
|
@@ -270,9 +297,9 @@ class Executor(Observer):
|
|
270
297
|
Args:
|
271
298
|
event (Event): The event to add.
|
272
299
|
"""
|
273
|
-
async
|
274
|
-
|
275
|
-
|
300
|
+
# Use async methods to avoid deadlock between sync/async locks
|
301
|
+
await self.pile.ainclude(event)
|
302
|
+
self.pending.include(event)
|
276
303
|
|
277
304
|
@property
|
278
305
|
def completed_events(self) -> Pile[Event]:
|
@@ -4,9 +4,11 @@
|
|
4
4
|
|
5
5
|
import asyncio
|
6
6
|
import logging
|
7
|
+
from typing import Any
|
7
8
|
|
8
9
|
from typing_extensions import Self, override
|
9
10
|
|
11
|
+
from lionagi.libs.concurrency import CapacityLimiter, Lock, move_on_after
|
10
12
|
from lionagi.protocols.types import Executor, Processor
|
11
13
|
|
12
14
|
from .connections.api_calling import APICalling
|
@@ -40,24 +42,40 @@ class RateLimitedAPIProcessor(Processor):
|
|
40
42
|
self.available_request = self.limit_requests
|
41
43
|
self.available_token = self.limit_tokens
|
42
44
|
self._rate_limit_replenisher_task: asyncio.Task | None = None
|
43
|
-
self._lock
|
44
|
-
|
45
|
-
|
46
|
-
|
45
|
+
self._lock = Lock()
|
46
|
+
|
47
|
+
# Use CapacityLimiter for better token management
|
48
|
+
if self.limit_tokens:
|
49
|
+
self._token_limiter = CapacityLimiter(self.limit_tokens)
|
50
|
+
else:
|
51
|
+
self._token_limiter = None
|
52
|
+
|
53
|
+
if self.limit_requests:
|
54
|
+
self._request_limiter = CapacityLimiter(self.limit_requests)
|
55
|
+
else:
|
56
|
+
self._request_limiter = None
|
47
57
|
|
48
58
|
async def start_replenishing(self):
|
49
59
|
"""Start replenishing rate limit capacities at regular intervals."""
|
50
60
|
await self.start()
|
51
61
|
try:
|
52
62
|
while not self.is_stopped():
|
53
|
-
await asyncio.sleep(
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
63
|
+
await asyncio.sleep(self.interval)
|
64
|
+
|
65
|
+
# Reset capacity limiters to their original values
|
66
|
+
if self._request_limiter and self.limit_requests:
|
67
|
+
# Adjust total tokens to reset capacity
|
68
|
+
current_borrowed = self._request_limiter.borrowed_tokens
|
69
|
+
if current_borrowed < self.limit_requests:
|
70
|
+
self._request_limiter.total_tokens = (
|
71
|
+
self.limit_requests
|
58
72
|
)
|
59
|
-
|
60
|
-
|
73
|
+
|
74
|
+
if self._token_limiter and self.limit_tokens:
|
75
|
+
# Reset token limiter capacity
|
76
|
+
current_borrowed = self._token_limiter.borrowed_tokens
|
77
|
+
if current_borrowed < self.limit_tokens:
|
78
|
+
self._token_limiter.total_tokens = self.limit_tokens
|
61
79
|
|
62
80
|
except asyncio.CancelledError:
|
63
81
|
logging.info("Rate limit replenisher task cancelled.")
|
@@ -98,31 +116,31 @@ class RateLimitedAPIProcessor(Processor):
|
|
98
116
|
|
99
117
|
@override
|
100
118
|
async def request_permission(
|
101
|
-
self, required_tokens: int = None, **kwargs
|
119
|
+
self, required_tokens: int = None, **kwargs: Any
|
102
120
|
) -> bool:
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
if self.
|
122
|
-
self.
|
123
|
-
|
124
|
-
|
125
|
-
|
121
|
+
# No limits configured, just check queue capacity
|
122
|
+
if self._request_limiter is None and self._token_limiter is None:
|
123
|
+
return self.queue.qsize() < self.queue_capacity
|
124
|
+
|
125
|
+
# Check request limit
|
126
|
+
if self._request_limiter:
|
127
|
+
# Try to acquire with timeout
|
128
|
+
with move_on_after(0.1) as scope:
|
129
|
+
await self._request_limiter.acquire()
|
130
|
+
if scope.cancelled_caught:
|
131
|
+
return False
|
132
|
+
|
133
|
+
# Check token limit if required
|
134
|
+
if self._token_limiter and required_tokens:
|
135
|
+
# For token-based limiting, we need to acquire multiple tokens
|
136
|
+
# This is a simplified approach - in production you might want
|
137
|
+
# a more sophisticated token bucket algorithm
|
138
|
+
if self._token_limiter.available_tokens < required_tokens:
|
139
|
+
if self._request_limiter:
|
140
|
+
self._request_limiter.release()
|
141
|
+
return False
|
142
|
+
|
143
|
+
return True
|
126
144
|
|
127
145
|
|
128
146
|
class RateLimitedAPIExecutor(Executor):
|
lionagi/session/branch.py
CHANGED
@@ -1186,7 +1186,6 @@ class Branch(Element, Communicatable, Relational):
|
|
1186
1186
|
backoff_factor: float = 1,
|
1187
1187
|
retry_default: Any = UNDEFINED,
|
1188
1188
|
retry_timeout: float | None = None,
|
1189
|
-
retry_timing: bool = False,
|
1190
1189
|
max_concurrent: int | None = None,
|
1191
1190
|
throttle_period: float | None = None,
|
1192
1191
|
flatten: bool = True,
|
@@ -1223,8 +1222,6 @@ class Branch(Element, Communicatable, Relational):
|
|
1223
1222
|
Fallback value if all retries fail (if suppressing errors).
|
1224
1223
|
retry_timeout (float|None):
|
1225
1224
|
Overall timeout for all attempts (None = no limit).
|
1226
|
-
retry_timing (bool):
|
1227
|
-
If True, track time used for retries.
|
1228
1225
|
max_concurrent (int|None):
|
1229
1226
|
Maximum concurrent tasks (if batching).
|
1230
1227
|
throttle_period (float|None):
|
@@ -1261,7 +1258,6 @@ class Branch(Element, Communicatable, Relational):
|
|
1261
1258
|
backoff_factor=backoff_factor,
|
1262
1259
|
retry_default=retry_default,
|
1263
1260
|
retry_timeout=retry_timeout,
|
1264
|
-
retry_timing=retry_timing,
|
1265
1261
|
max_concurrent=max_concurrent,
|
1266
1262
|
throttle_period=throttle_period,
|
1267
1263
|
flatten=flatten,
|
@@ -1290,7 +1286,6 @@ class Branch(Element, Communicatable, Relational):
|
|
1290
1286
|
backoff_factor=backoff_factor,
|
1291
1287
|
retry_default=retry_default,
|
1292
1288
|
retry_timeout=retry_timeout,
|
1293
|
-
retry_timing=retry_timing,
|
1294
1289
|
throttle_period=throttle_period,
|
1295
1290
|
flatten=flatten,
|
1296
1291
|
dropna=dropna,
|