lionagi 0.14.4__py3-none-any.whl → 0.14.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,9 +4,11 @@
4
4
 
5
5
  import asyncio
6
6
  import logging
7
+ from typing import Any
7
8
 
8
9
  from typing_extensions import Self, override
9
10
 
11
+ from lionagi.libs.concurrency import CapacityLimiter, Lock, move_on_after
10
12
  from lionagi.protocols.types import Executor, Processor
11
13
 
12
14
  from .connections.api_calling import APICalling
@@ -40,24 +42,40 @@ class RateLimitedAPIProcessor(Processor):
40
42
  self.available_request = self.limit_requests
41
43
  self.available_token = self.limit_tokens
42
44
  self._rate_limit_replenisher_task: asyncio.Task | None = None
43
- self._lock: asyncio.Lock = asyncio.Lock()
44
- self._concurrency_sem = asyncio.Semaphore(
45
- concurrency_limit or queue_capacity
46
- )
45
+ self._lock = Lock()
46
+
47
+ # Use CapacityLimiter for better token management
48
+ if self.limit_tokens:
49
+ self._token_limiter = CapacityLimiter(self.limit_tokens)
50
+ else:
51
+ self._token_limiter = None
52
+
53
+ if self.limit_requests:
54
+ self._request_limiter = CapacityLimiter(self.limit_requests)
55
+ else:
56
+ self._request_limiter = None
47
57
 
48
58
  async def start_replenishing(self):
49
59
  """Start replenishing rate limit capacities at regular intervals."""
50
60
  await self.start()
51
61
  try:
52
62
  while not self.is_stopped():
53
- await asyncio.sleep(delay=self.interval)
54
- async with self._lock:
55
- if self.limit_requests is not None:
56
- self.available_request = (
57
- self.limit_requests - self.queue.qsize()
63
+ await asyncio.sleep(self.interval)
64
+
65
+ # Reset capacity limiters to their original values
66
+ if self._request_limiter and self.limit_requests:
67
+ # Adjust total tokens to reset capacity
68
+ current_borrowed = self._request_limiter.borrowed_tokens
69
+ if current_borrowed < self.limit_requests:
70
+ self._request_limiter.total_tokens = (
71
+ self.limit_requests
58
72
  )
59
- if self.limit_tokens is not None:
60
- self.available_token = self.limit_tokens
73
+
74
+ if self._token_limiter and self.limit_tokens:
75
+ # Reset token limiter capacity
76
+ current_borrowed = self._token_limiter.borrowed_tokens
77
+ if current_borrowed < self.limit_tokens:
78
+ self._token_limiter.total_tokens = self.limit_tokens
61
79
 
62
80
  except asyncio.CancelledError:
63
81
  logging.info("Rate limit replenisher task cancelled.")
@@ -98,31 +116,31 @@ class RateLimitedAPIProcessor(Processor):
98
116
 
99
117
  @override
100
118
  async def request_permission(
101
- self, required_tokens: int = None, **kwargs
119
+ self, required_tokens: int = None, **kwargs: Any
102
120
  ) -> bool:
103
- async with self._lock:
104
- if self.limit_requests is None and self.limit_tokens is None:
105
- if self.queue.qsize() < self.queue_capacity:
106
- return True
107
-
108
- if self.limit_requests is not None:
109
- if self.available_request > 0:
110
- self.available_request -= 1
111
- if required_tokens is None:
112
- return True
113
- else:
114
- if self.limit_tokens >= required_tokens:
115
- self.limit_tokens -= required_tokens
116
- return True
117
-
118
- if self.limit_tokens is not None:
119
- if required_tokens is None:
120
- return True
121
- if self.limit_tokens >= required_tokens:
122
- self.limit_tokens -= required_tokens
123
- return True
124
-
125
- return False
121
+ # No limits configured, just check queue capacity
122
+ if self._request_limiter is None and self._token_limiter is None:
123
+ return self.queue.qsize() < self.queue_capacity
124
+
125
+ # Check request limit
126
+ if self._request_limiter:
127
+ # Try to acquire with timeout
128
+ with move_on_after(0.1) as scope:
129
+ await self._request_limiter.acquire()
130
+ if scope.cancelled_caught:
131
+ return False
132
+
133
+ # Check token limit if required
134
+ if self._token_limiter and required_tokens:
135
+ # For token-based limiting, we need to acquire multiple tokens
136
+ # This is a simplified approach - in production you might want
137
+ # a more sophisticated token bucket algorithm
138
+ if self._token_limiter.available_tokens < required_tokens:
139
+ if self._request_limiter:
140
+ self._request_limiter.release()
141
+ return False
142
+
143
+ return True
126
144
 
127
145
 
128
146
  class RateLimitedAPIExecutor(Executor):
lionagi/session/branch.py CHANGED
@@ -917,9 +917,7 @@ class Branch(Element, Communicatable, Relational):
917
917
  actions: bool = False,
918
918
  reason: bool = False,
919
919
  action_kwargs: dict = None,
920
- action_strategy: Literal[
921
- "sequential", "concurrent", "batch"
922
- ] = "concurrent",
920
+ action_strategy: Literal["sequential", "concurrent"] = "concurrent",
923
921
  verbose_action: bool = False,
924
922
  field_models: list[FieldModel] = None,
925
923
  exclude_fields: list | dict | None = None,
@@ -987,7 +985,7 @@ class Branch(Element, Communicatable, Relational):
987
985
  If `True`, signals that the LLM should provide chain-of-thought or reasoning (where applicable).
988
986
  action_kwargs (dict | None, optional):
989
987
  Additional parameters for the `branch.act()` call if tools are invoked.
990
- action_strategy (Literal["sequential","concurrent","batch"], optional):
988
+ action_strategy (Literal["sequential","concurrent"], optional):
991
989
  The strategy for invoking tools (default: "concurrent").
992
990
  verbose_action (bool, optional):
993
991
  If `True`, logs detailed information about tool invocation.
@@ -1174,9 +1172,8 @@ class Branch(Element, Communicatable, Relational):
1174
1172
  self,
1175
1173
  action_request: list | ActionRequest | BaseModel | dict,
1176
1174
  *,
1177
- strategy: Literal["concurrent", "sequential", "batch"] = "concurrent",
1175
+ strategy: Literal["concurrent", "sequential"] = "concurrent",
1178
1176
  verbose_action: bool = False,
1179
- batch_size: int = None,
1180
1177
  suppress_errors: bool = True,
1181
1178
  sanitize_input: bool = False,
1182
1179
  unique_input: bool = False,
@@ -1186,7 +1183,6 @@ class Branch(Element, Communicatable, Relational):
1186
1183
  backoff_factor: float = 1,
1187
1184
  retry_default: Any = UNDEFINED,
1188
1185
  retry_timeout: float | None = None,
1189
- retry_timing: bool = False,
1190
1186
  max_concurrent: int | None = None,
1191
1187
  throttle_period: float | None = None,
1192
1188
  flatten: bool = True,
@@ -1223,10 +1219,8 @@ class Branch(Element, Communicatable, Relational):
1223
1219
  Fallback value if all retries fail (if suppressing errors).
1224
1220
  retry_timeout (float|None):
1225
1221
  Overall timeout for all attempts (None = no limit).
1226
- retry_timing (bool):
1227
- If True, track time used for retries.
1228
1222
  max_concurrent (int|None):
1229
- Maximum concurrent tasks (if batching).
1223
+ Maximum concurrent tasks.
1230
1224
  throttle_period (float|None):
1231
1225
  Minimum spacing (in seconds) between requests.
1232
1226
  flatten (bool):
@@ -1242,11 +1236,6 @@ class Branch(Element, Communicatable, Relational):
1242
1236
  Any:
1243
1237
  The result or results from the invoked tool(s).
1244
1238
  """
1245
- if batch_size and not strategy == "batch":
1246
- raise ValueError(
1247
- "Batch size is only applicable for 'batch' strategy."
1248
- )
1249
-
1250
1239
  match strategy:
1251
1240
  case "concurrent":
1252
1241
  return await self._concurrent_act(
@@ -1261,7 +1250,6 @@ class Branch(Element, Communicatable, Relational):
1261
1250
  backoff_factor=backoff_factor,
1262
1251
  retry_default=retry_default,
1263
1252
  retry_timeout=retry_timeout,
1264
- retry_timing=retry_timing,
1265
1253
  max_concurrent=max_concurrent,
1266
1254
  throttle_period=throttle_period,
1267
1255
  flatten=flatten,
@@ -1275,28 +1263,8 @@ class Branch(Element, Communicatable, Relational):
1275
1263
  verbose_action=verbose_action,
1276
1264
  suppress_errors=suppress_errors,
1277
1265
  )
1278
- case "batch":
1279
- return await self._batch_act(
1280
- action_request,
1281
- verbose_action=verbose_action,
1282
- batch_size=batch_size or 1,
1283
- max_concurrent=max_concurrent,
1284
- suppress_errors=suppress_errors,
1285
- sanitize_input=sanitize_input,
1286
- unique_input=unique_input,
1287
- num_retries=num_retries,
1288
- initial_delay=initial_delay,
1289
- retry_delay=retry_delay,
1290
- backoff_factor=backoff_factor,
1291
- retry_default=retry_default,
1292
- retry_timeout=retry_timeout,
1293
- retry_timing=retry_timing,
1294
- throttle_period=throttle_period,
1295
- flatten=flatten,
1296
- dropna=dropna,
1297
- unique_output=unique_output,
1298
- flatten_tuple_set=flatten_tuple_set,
1299
- )
1266
+ case _:
1267
+ raise
1300
1268
 
1301
1269
  async def _concurrent_act(
1302
1270
  self,
@@ -1327,19 +1295,6 @@ class Branch(Element, Communicatable, Relational):
1327
1295
  )
1328
1296
  return results
1329
1297
 
1330
- async def _batch_act(
1331
- self,
1332
- action_request: list[ActionRequest | BaseModel | dict],
1333
- batch_size: int = None,
1334
- **kwargs,
1335
- ) -> list:
1336
- result = []
1337
- async for i in bcall(
1338
- action_request, self._act, batch_size=batch_size, **kwargs
1339
- ):
1340
- result.extend(i)
1341
- return result
1342
-
1343
1298
  async def translate(
1344
1299
  self,
1345
1300
  text: str,
@@ -2,6 +2,7 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ import contextlib
5
6
  from collections.abc import Callable
6
7
  from typing import Any
7
8
 
@@ -46,7 +47,7 @@ class Session(Node, Communicatable, Relational):
46
47
  mail_manager (MailManager | None): Manages mail operations.
47
48
  """
48
49
 
49
- branches: Pile[Any] = Field(
50
+ branches: Pile[Branch] = Field(
50
51
  default_factory=lambda: Pile(item_type={Branch}, strict_type=False)
51
52
  )
52
53
  default_branch: Any = Field(default=None, exclude=True)
@@ -57,21 +58,38 @@ class Session(Node, Communicatable, Relational):
57
58
  name: str = Field(default="Session")
58
59
 
59
60
  @model_validator(mode="after")
60
- def _initialize_default_branch(self) -> Self:
61
+ def _add_mail_sources(self) -> Self:
61
62
  if self.default_branch is None:
62
- from .branch import Branch
63
-
64
63
  self.default_branch = Branch()
65
- return self
66
-
67
- @model_validator(mode="after")
68
- def _add_mail_sources(self) -> Self:
69
64
  if self.default_branch not in self.branches:
70
65
  self.branches.include(self.default_branch)
71
66
  if self.branches:
72
67
  self.mail_manager.add_sources(self.branches)
73
68
  return self
74
69
 
70
+ def _lookup_branch_by_name(self, name: str) -> Branch | None:
71
+ for branch in self.branches:
72
+ if branch.name == name:
73
+ return branch
74
+ return None
75
+
76
+ def get_branch(
77
+ self, branch: ID.Ref | str, default: Any = ..., /
78
+ ) -> Branch:
79
+ """Get a branch by its ID or name."""
80
+
81
+ with contextlib.suppress(ItemNotFoundError, ValueError):
82
+ id = ID.get_id(branch)
83
+ return self.branches[id]
84
+
85
+ if isinstance(branch, str):
86
+ if b := self._lookup_branch_by_name(branch):
87
+ return b
88
+
89
+ if default is ...:
90
+ raise ItemNotFoundError(f"Branch '{branch}' not found.")
91
+ return default
92
+
75
93
  def new_branch(
76
94
  self,
77
95
  system: System | JsonValue = None,
lionagi/utils.py CHANGED
@@ -43,9 +43,12 @@ from typing import (
43
43
  overload,
44
44
  )
45
45
 
46
+ import anyio
46
47
  from pydantic import BaseModel, model_validator
47
48
  from pydantic_core import PydanticUndefinedType
48
49
 
50
+ from .libs.concurrency import Lock as ConcurrencyLock
51
+ from .libs.concurrency import Semaphore, create_task_group
49
52
  from .settings import Settings
50
53
 
51
54
  R = TypeVar("R")
@@ -612,32 +615,6 @@ class CallParams(Params):
612
615
  )
613
616
 
614
617
 
615
- class LCallParams(CallParams):
616
- func: Any = None
617
- sanitize_input: bool = False
618
- unique_input: bool = False
619
- flatten: bool = False
620
- dropna: bool = False
621
- unique_output: bool = False
622
- flatten_tuple_set: bool = False
623
-
624
- def __call__(self, input_: Any, func=None):
625
- if self.func is None and func is None:
626
- raise ValueError("a sync func must be provided")
627
- return lcall(
628
- input_,
629
- func or self.func,
630
- *self.args,
631
- sanitize_input=self.sanitize_input,
632
- unique_input=self.unique_input,
633
- flatten=self.flatten,
634
- dropna=self.dropna,
635
- unique_output=self.unique_output,
636
- flatten_tuple_set=self.flatten_tuple_set,
637
- **self.kwargs,
638
- )
639
-
640
-
641
618
  async def alcall(
642
619
  input_: list[Any],
643
620
  func: Callable[..., T],
@@ -651,7 +628,6 @@ async def alcall(
651
628
  backoff_factor: float = 1,
652
629
  retry_default: Any = UNDEFINED,
653
630
  retry_timeout: float | None = None,
654
- retry_timing: bool = False,
655
631
  max_concurrent: int | None = None,
656
632
  throttle_period: float | None = None,
657
633
  flatten: bool = False,
@@ -659,7 +635,7 @@ async def alcall(
659
635
  unique_output: bool = False,
660
636
  flatten_tuple_set: bool = False,
661
637
  **kwargs: Any,
662
- ) -> list[T] | list[tuple[T, float]]:
638
+ ) -> list[T]:
663
639
  """
664
640
  Asynchronously apply a function to each element of a list, with optional input sanitization,
665
641
  retries, timeout, and output processing.
@@ -675,7 +651,6 @@ async def alcall(
675
651
  backoff_factor (float): Multiplier for delay after each retry.
676
652
  retry_default (Any): Default value if all retries fail.
677
653
  retry_timeout (float | None): Timeout for each function call.
678
- retry_timing (bool): If True, return (result, duration) tuples.
679
654
  max_concurrent (int | None): Maximum number of concurrent operations.
680
655
  throttle_period (float | None): Delay after each completed operation.
681
656
  flatten (bool): Flatten the final result if True.
@@ -685,7 +660,7 @@ async def alcall(
685
660
  **kwargs: Additional arguments passed to func.
686
661
 
687
662
  Returns:
688
- list[T] or list[tuple[T, float]]: The processed results, or results with timing if retry_timing is True.
663
+ list[T]: The processed results.
689
664
 
690
665
  Raises:
691
666
  asyncio.TimeoutError: If a call times out and no default is provided.
@@ -734,9 +709,9 @@ async def alcall(
734
709
 
735
710
  # Optional initial delay before processing
736
711
  if initial_delay:
737
- await asyncio.sleep(initial_delay)
712
+ await anyio.sleep(initial_delay)
738
713
 
739
- semaphore = asyncio.Semaphore(max_concurrent) if max_concurrent else None
714
+ semaphore = Semaphore(max_concurrent) if max_concurrent else None
740
715
  throttle_delay = throttle_period or 0
741
716
  coro_func = is_coro_func(func)
742
717
 
@@ -744,137 +719,92 @@ async def alcall(
744
719
  if coro_func:
745
720
  # Async function
746
721
  if retry_timeout is not None:
747
- return await asyncio.wait_for(
748
- func(item, **kwargs), timeout=retry_timeout
749
- )
722
+ with anyio.move_on_after(retry_timeout) as cancel_scope:
723
+ result = await func(item, **kwargs)
724
+ if cancel_scope.cancelled_caught:
725
+ raise asyncio.TimeoutError(
726
+ f"Function call timed out after {retry_timeout}s"
727
+ )
728
+ return result
750
729
  else:
751
730
  return await func(item, **kwargs)
752
731
  else:
753
732
  # Sync function
754
733
  if retry_timeout is not None:
755
- return await asyncio.wait_for(
756
- asyncio.to_thread(func, item, **kwargs),
757
- timeout=retry_timeout,
758
- )
734
+ with anyio.move_on_after(retry_timeout) as cancel_scope:
735
+ result = await anyio.to_thread.run_sync(
736
+ func, item, **kwargs
737
+ )
738
+ if cancel_scope.cancelled_caught:
739
+ raise asyncio.TimeoutError(
740
+ f"Function call timed out after {retry_timeout}s"
741
+ )
742
+ return result
759
743
  else:
760
- return func(item, **kwargs)
744
+ return await anyio.to_thread.run_sync(func, item, **kwargs)
761
745
 
762
746
  async def execute_task(i: Any, index: int) -> Any:
763
- start_time = asyncio.get_running_loop().time()
764
747
  attempts = 0
765
748
  current_delay = retry_delay
766
749
  while True:
767
750
  try:
768
751
  result = await call_func(i)
769
- if retry_timing:
770
- end_time = asyncio.get_running_loop().time()
771
- return index, result, end_time - start_time
772
- else:
773
- return index, result
774
- except asyncio.CancelledError as e:
775
- raise e
752
+ return index, result
753
+ except anyio.get_cancelled_exc_class():
754
+ raise
776
755
 
777
756
  except Exception:
778
757
  attempts += 1
779
758
  if attempts <= num_retries:
780
759
  if current_delay:
781
- await asyncio.sleep(current_delay)
760
+ await anyio.sleep(current_delay)
782
761
  current_delay *= backoff_factor
783
762
  # Retry loop continues
784
763
  else:
785
764
  # Exhausted retries
786
765
  if retry_default is not UNDEFINED:
787
- # Return default if provided
788
- if retry_timing:
789
- end_time = asyncio.get_running_loop().time()
790
- duration = end_time - (start_time or end_time)
791
- return index, retry_default, duration
792
- else:
793
- return index, retry_default
766
+ return index, retry_default
794
767
  # No default, re-raise
795
768
  raise
796
769
 
797
770
  async def task_wrapper(item: Any, idx: int) -> Any:
798
771
  if semaphore:
799
772
  async with semaphore:
800
- return await execute_task(item, idx)
773
+ result = await execute_task(item, idx)
801
774
  else:
802
- return await execute_task(item, idx)
775
+ result = await execute_task(item, idx)
803
776
 
804
- # Create tasks
805
- tasks = [task_wrapper(item, idx) for idx, item in enumerate(input_)]
777
+ return result
806
778
 
807
- # Collect results as they complete
779
+ # Use task group for structured concurrency
808
780
  results = []
809
- for coro in asyncio.as_completed(tasks):
810
- res = await coro
811
- results.append(res)
812
- if throttle_delay:
813
- await asyncio.sleep(throttle_delay)
781
+ results_lock = ConcurrencyLock() # Protect results list
814
782
 
815
- # Sort by original index
816
- results.sort(key=lambda x: x[0])
783
+ async def run_and_store(item: Any, idx: int):
784
+ result = await task_wrapper(item, idx)
785
+ async with results_lock:
786
+ results.append(result)
817
787
 
818
- if retry_timing:
819
- # (index, result, duration)
820
- filtered = [
821
- (r[1], r[2]) for r in results if not dropna or r[1] is not None
822
- ]
823
- return filtered
824
- else:
825
- # (index, result)
826
- output_list = [r[1] for r in results]
827
- return to_list(
828
- output_list,
829
- flatten=flatten,
830
- dropna=dropna,
831
- unique=unique_output,
832
- flatten_tuple_set=flatten_tuple_set,
833
- )
788
+ # Execute all tasks using task group
789
+ async with create_task_group() as tg:
790
+ for idx, item in enumerate(input_):
791
+ await tg.start_soon(run_and_store, item, idx)
792
+ # Apply throttle delay between starting tasks
793
+ if throttle_delay and idx < len(input_) - 1:
794
+ await anyio.sleep(throttle_delay)
834
795
 
796
+ # Sort by original index
797
+ results.sort(key=lambda x: x[0])
835
798
 
836
- class ALCallParams(CallParams):
837
- func: Any = None
838
- sanitize_input: bool = False
839
- unique_input: bool = False
840
- num_retries: int = 0
841
- initial_delay: float = 0
842
- retry_delay: float = 0
843
- backoff_factor: float = 1
844
- retry_default: Any = UNDEFINED
845
- retry_timeout: float | None = None
846
- retry_timing: bool = False
847
- max_concurrent: int | None = None
848
- throttle_period: float | None = None
849
- flatten: bool = False
850
- dropna: bool = False
851
- unique_output: bool = False
852
- flatten_tuple_set: bool = False
853
-
854
- async def __call__(self, input_: Any, func=None):
855
- if self.func is None and func is None:
856
- raise ValueError("a sync/async func must be provided")
857
- return await alcall(
858
- input_,
859
- func or self.func,
860
- *self.args,
861
- sanitize_input=self.sanitize_input,
862
- unique_input=self.unique_input,
863
- num_retries=self.num_retries,
864
- initial_delay=self.initial_delay,
865
- retry_delay=self.retry_delay,
866
- backoff_factor=self.backoff_factor,
867
- retry_default=self.retry_default,
868
- retry_timeout=self.retry_timeout,
869
- retry_timing=self.retry_timing,
870
- max_concurrent=self.max_concurrent,
871
- throttle_period=self.throttle_period,
872
- flatten=self.flatten,
873
- dropna=self.dropna,
874
- unique_output=self.unique_output,
875
- flatten_tuple_set=self.flatten_tuple_set,
876
- **self.kwargs,
877
- )
799
+ # (index, result)
800
+ output_list = [r[1] for r in results]
801
+ return to_list(
802
+ output_list,
803
+ flatten=flatten,
804
+ dropna=dropna,
805
+ unique=unique_output,
806
+ flatten_tuple_set=flatten_tuple_set,
807
+ )
878
808
 
879
809
 
880
810
  async def bcall(
@@ -891,7 +821,6 @@ async def bcall(
891
821
  backoff_factor: float = 1,
892
822
  retry_default: Any = UNDEFINED,
893
823
  retry_timeout: float | None = None,
894
- retry_timing: bool = False,
895
824
  max_concurrent: int | None = None,
896
825
  throttle_period: float | None = None,
897
826
  flatten: bool = False,
@@ -915,7 +844,6 @@ async def bcall(
915
844
  backoff_factor=backoff_factor,
916
845
  retry_default=retry_default,
917
846
  retry_timeout=retry_timeout,
918
- retry_timing=retry_timing,
919
847
  max_concurrent=max_concurrent,
920
848
  throttle_period=throttle_period,
921
849
  flatten=flatten,
@@ -926,52 +854,6 @@ async def bcall(
926
854
  )
927
855
 
928
856
 
929
- class BCallParams(CallParams):
930
- func: Any = None
931
- batch_size: int
932
- sanitize_input: bool = False
933
- unique_input: bool = False
934
- num_retries: int = 0
935
- initial_delay: float = 0
936
- retry_delay: float = 0
937
- backoff_factor: float = 1
938
- retry_default: Any = UNDEFINED
939
- retry_timeout: float | None = None
940
- retry_timing: bool = False
941
- max_concurrent: int | None = None
942
- throttle_period: float | None = None
943
- flatten: bool = False
944
- dropna: bool = False
945
- unique_output: bool = False
946
- flatten_tuple_set: bool = False
947
-
948
- async def __call__(self, input_, func=None):
949
- if self.func is None and func is None:
950
- raise ValueError("a sync/async func must be provided")
951
- return await bcall(
952
- input_,
953
- func or self.func,
954
- *self.args,
955
- batch_size=self.batch_size,
956
- sanitize_input=self.sanitize_input,
957
- unique_input=self.unique_input,
958
- num_retries=self.num_retries,
959
- initial_delay=self.initial_delay,
960
- retry_delay=self.retry_delay,
961
- backoff_factor=self.backoff_factor,
962
- retry_default=self.retry_default,
963
- retry_timeout=self.retry_timeout,
964
- retry_timing=self.retry_timing,
965
- max_concurrent=self.max_concurrent,
966
- throttle_period=self.throttle_period,
967
- flatten=self.flatten,
968
- dropna=self.dropna,
969
- unique_output=self.unique_output,
970
- flatten_tuple_set=self.flatten_tuple_set,
971
- **self.kwargs,
972
- )
973
-
974
-
975
857
  def create_path(
976
858
  directory: Path | str,
977
859
  filename: str,
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.14.4"
1
+ __version__ = "0.14.6"