lionagi 0.8.6__py3-none-any.whl → 0.8.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,8 +28,8 @@ async def ReAct(
28
28
  tools: Any = None,
29
29
  tool_schemas: Any = None,
30
30
  response_format: type[BaseModel] | BaseModel = None,
31
- extension_allowed: bool = False,
32
- max_extensions: int | None = None,
31
+ extension_allowed: bool = True,
32
+ max_extensions: int | None = 3,
33
33
  response_kwargs: dict | None = None,
34
34
  return_analysis: bool = False,
35
35
  analysis_model: iModel | None = None,
@@ -54,6 +54,8 @@ async def ReAct(
54
54
  sample_writing=interpret_sample,
55
55
  **(interpret_kwargs or {}),
56
56
  )
57
+ if verbose_analysis:
58
+ print(f"Interpreted instruction: {instruction_str}")
57
59
 
58
60
  # Convert Instruct to dict if necessary
59
61
  instruct_dict = (
@@ -92,9 +94,11 @@ async def ReAct(
92
94
  )
93
95
 
94
96
  # Validate and clamp max_extensions if needed
95
- if max_extensions and max_extensions > 5:
96
- logging.warning("max_extensions should not exceed 5; defaulting to 5.")
97
- max_extensions = 5
97
+ if max_extensions and max_extensions > 100:
98
+ logging.warning(
99
+ "max_extensions should not exceed 100; defaulting to 100."
100
+ )
101
+ max_extensions = 100
98
102
 
99
103
  # Step 2: Possibly loop through expansions if extension_needed
100
104
  extensions = max_extensions
@@ -86,6 +86,7 @@ class Event(Element):
86
86
  """
87
87
 
88
88
  execution: Execution = Field(default_factory=Execution)
89
+ streaming: bool = False
89
90
 
90
91
  @field_serializer("execution")
91
92
  def _serialize_execution(self, val: Execution) -> dict:
@@ -160,6 +161,15 @@ class Event(Element):
160
161
  """
161
162
  raise NotImplementedError("Override in subclass.")
162
163
 
164
+ async def stream(self) -> None:
165
+ """Performs the event action asynchronously, streaming results.
166
+
167
+ Raises:
168
+ NotImplementedError: This base method must be overridden by
169
+ subclasses.
170
+ """
171
+ raise NotImplementedError("Override in subclass.")
172
+
163
173
  @classmethod
164
174
  def from_dict(cls, data: dict) -> "Event":
165
175
  """Not implemented. Events cannot be fully recreated once done.
@@ -31,6 +31,7 @@ class Processor(Observer):
31
31
  self,
32
32
  queue_capacity: int,
33
33
  capacity_refresh_time: float,
34
+ concurrency_limit: int,
34
35
  ) -> None:
35
36
  """Initializes a Processor instance.
36
37
 
@@ -56,6 +57,10 @@ class Processor(Observer):
56
57
  self._available_capacity = queue_capacity
57
58
  self._execution_mode = False
58
59
  self._stop_event = asyncio.Event()
60
+ if concurrency_limit:
61
+ self._concurrency_sem = asyncio.Semaphore(concurrency_limit)
62
+ else:
63
+ self._concurrency_sem = None
59
64
 
60
65
  @property
61
66
  def available_capacity(self) -> int:
@@ -144,8 +149,11 @@ class Processor(Observer):
144
149
  next_event = await self.dequeue()
145
150
 
146
151
  if await self.request_permission(**next_event.request):
147
- next_event.status = EventStatus.PROCESSING
148
- task = asyncio.create_task(next_event.invoke())
152
+
153
+ if next_event.streaming:
154
+ task = asyncio.create_task(next_event.stream())
155
+ else:
156
+ task = asyncio.create_task(next_event.invoke())
149
157
  tasks.add(task)
150
158
 
151
159
  prev_event = next_event
@@ -3,13 +3,16 @@
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
5
  import asyncio
6
+ import json
6
7
  import logging
7
8
  from abc import ABC
9
+ from collections.abc import AsyncGenerator
8
10
  from typing import Any, Literal
9
11
 
10
12
  import aiohttp
11
13
  from aiocache import cached
12
- from pydantic import BaseModel, ConfigDict, Field
14
+ from pydantic import BaseModel, ConfigDict, Field, model_validator
15
+ from typing_extensions import Self
13
16
 
14
17
  from lionagi._errors import ExecutionError, RateLimitError
15
18
  from lionagi.protocols.generic.event import Event, EventStatus
@@ -347,6 +350,12 @@ class APICalling(Event):
347
350
  is_cached: bool = Field(default=False, exclude=True)
348
351
  should_invoke_endpoint: bool = Field(default=True, exclude=True)
349
352
 
353
+ @model_validator(mode="after")
354
+ def _validate_streaming(self) -> Self:
355
+ if self.payload.get("stream") is True:
356
+ self.streaming = True
357
+ return self
358
+
350
359
  @property
351
360
  def required_tokens(self) -> int | None:
352
361
  """int | None: The number of tokens required for this request."""
@@ -442,10 +451,91 @@ class APICalling(Event):
442
451
  """
443
452
  return await self._inner(**kwargs)
444
453
 
445
- async def stream(self, **kwargs):
454
+ async def _stream(
455
+ self,
456
+ verbose: bool = True,
457
+ output_file: str = None,
458
+ with_response_header: bool = False,
459
+ ) -> AsyncGenerator:
460
+ async with aiohttp.ClientSession() as client:
461
+ async with client.request(
462
+ method=self.endpoint.method.upper(),
463
+ url=self.endpoint.full_url,
464
+ headers=self.headers,
465
+ json=self.payload,
466
+ ) as response:
467
+ if response.status != 200:
468
+ try:
469
+ error_text = await response.json()
470
+ except Exception:
471
+ error_text = await response.text()
472
+ raise aiohttp.ClientResponseError(
473
+ request_info=response.request_info,
474
+ history=response.history,
475
+ status=response.status,
476
+ message=f"{error_text}",
477
+ headers=response.headers,
478
+ )
479
+
480
+ file_handle = None
481
+
482
+ if output_file:
483
+ try:
484
+ file_handle = open(output_file, "w")
485
+ except Exception as e:
486
+ raise ValueError(
487
+ f"Invalid to output the response "
488
+ f"to {output_file}. Error:{e}"
489
+ )
490
+
491
+ try:
492
+ async for chunk in response.content:
493
+ chunk_str = chunk.decode("utf-8")
494
+ chunk_list = chunk_str.split("data:")
495
+ for c in chunk_list:
496
+ c = c.strip()
497
+ if c and c != "[DONE]":
498
+ try:
499
+ if file_handle:
500
+ file_handle.write(c + "\n")
501
+ c_dict = json.loads(c)
502
+ if verbose:
503
+ if c_dict.get("choices"):
504
+ if content := c_dict["choices"][0][
505
+ "delta"
506
+ ].get("content"):
507
+ print(
508
+ content, end="", flush=True
509
+ )
510
+ yield c_dict
511
+ except json.JSONDecodeError:
512
+ yield c
513
+ except asyncio.CancelledError as e:
514
+ raise e
515
+
516
+ if with_response_header:
517
+ yield response.headers
518
+
519
+ finally:
520
+ if file_handle:
521
+ file_handle.close()
522
+
523
+ async def stream(
524
+ self,
525
+ verbose: bool = True,
526
+ output_file: str = None,
527
+ with_response_header: bool = False,
528
+ **kwargs,
529
+ ) -> AsyncGenerator:
446
530
  """Performs a streaming request, if supported by the endpoint.
447
531
 
448
532
  Args:
533
+ verbose (bool):
534
+ If True, prints the response content to the console.
535
+ output_file (str):
536
+ If set, writes the response content to this file. (only applies to non-endpoint invoke)
537
+ with_response_header (bool):
538
+ If True, yields the response headers as well. (only applies to non-endpoint invoke)
449
539
  **kwargs: Additional parameters for the streaming call.
450
540
 
451
541
  Yields:
@@ -456,23 +546,39 @@ class APICalling(Event):
456
546
  """
457
547
  start = asyncio.get_event_loop().time()
458
548
  response = []
459
- if not self.endpoint.is_streamable:
460
- raise ValueError(
461
- f"Endpoint {self.endpoint.endpoint} is not streamable."
462
- )
463
-
464
- async for i in self.endpoint._stream(
465
- self.payload, self.headers, **kwargs
466
- ):
467
- content = i.choices[0].delta.content
468
- if content is not None:
469
- print(content, end="", flush=True)
470
- response.append(i)
471
- yield i
472
-
473
- self.execution.duration = asyncio.get_event_loop().time() - start
474
- self.execution.response = response
475
- self.execution.status = EventStatus.COMPLETED
549
+ e1 = None
550
+ try:
551
+ if self.should_invoke_endpoint and self.endpoint.is_streamable:
552
+ async for i in self.endpoint._stream(
553
+ self.payload, self.headers, **kwargs
554
+ ):
555
+ content = i.choices[0].delta.content
556
+ if verbose:
557
+ if content is not None:
558
+ print(content, end="", flush=True)
559
+ response.append(i)
560
+ yield i
561
+ else:
562
+ async for i in self._stream(
563
+ verbose=verbose,
564
+ output_file=output_file,
565
+ with_response_header=with_response_header,
566
+ ):
567
+ response.append(i)
568
+ yield i
569
+ except Exception as e:
570
+ e1 = e
571
+ finally:
572
+ self.execution.duration = asyncio.get_event_loop().time() - start
573
+ if not response and e1:
574
+ self.execution.error = str(e1)
575
+ self.execution.status = EventStatus.FAILED
576
+ logging.error(
577
+ f"API call to {self.endpoint.full_url} failed: {e1}"
578
+ )
579
+ else:
580
+ self.execution.response = response
581
+ self.execution.status = EventStatus.COMPLETED
476
582
 
477
583
  async def invoke(self) -> None:
478
584
  """Invokes the API call, updating the execution state with results.
@@ -483,9 +589,10 @@ class APICalling(Event):
483
589
  """
484
590
  start = asyncio.get_event_loop().time()
485
591
  kwargs = {"headers": self.headers, "json": self.payload}
592
+ response = None
593
+ e1 = None
486
594
 
487
595
  try:
488
- response = None
489
596
  if self.should_invoke_endpoint and self.endpoint.is_invokeable:
490
597
  response = await self.endpoint.invoke(
491
598
  payload=self.payload,
@@ -498,14 +605,20 @@ class APICalling(Event):
498
605
  else:
499
606
  response = await self._inner(**kwargs)
500
607
 
501
- self.execution.duration = asyncio.get_event_loop().time() - start
502
- self.execution.response = response
503
- self.execution.status = EventStatus.COMPLETED
504
608
  except Exception as e:
609
+ e1 = e
610
+
611
+ finally:
505
612
  self.execution.duration = asyncio.get_event_loop().time() - start
506
- self.execution.error = str(e)
507
- self.execution.status = EventStatus.FAILED
508
- logging.error(f"API call to {self.endpoint.full_url} failed: {e}")
613
+ if not response and e1:
614
+ self.execution.error = str(e1)
615
+ self.execution.status = EventStatus.FAILED
616
+ logging.error(
617
+ f"API call to {self.endpoint.full_url} failed: {e1}"
618
+ )
619
+ else:
620
+ self.execution.response = response
621
+ self.execution.status = EventStatus.COMPLETED
509
622
 
510
623
  def __str__(self) -> str:
511
624
  return (
@@ -27,10 +27,13 @@ class ChatCompletionEndPoint(EndPoint):
27
27
  headers: dict,
28
28
  **kwargs,
29
29
  ):
30
- import litellm
30
+ from lionagi.libs.package.imports import check_import
31
+
32
+ check_import("litellm")
33
+ import litellm # type: ignore
31
34
 
32
35
  litellm.drop_params = True
33
- from litellm import acompletion
36
+ from litellm import acompletion # type: ignore
34
37
 
35
38
  provider = self.config.provider
36
39
 
@@ -64,10 +67,13 @@ class ChatCompletionEndPoint(EndPoint):
64
67
  headers: dict,
65
68
  **kwargs,
66
69
  ) -> AsyncGenerator:
67
- import litellm
70
+ from lionagi.libs.package.imports import check_import
71
+
72
+ check_import("litellm")
73
+ import litellm # type: ignore
68
74
 
69
75
  litellm.drop_params = True
70
- from litellm import acompletion
76
+ from litellm import acompletion # type: ignore
71
77
 
72
78
  provider = self.config.provider
73
79
 
@@ -28,8 +28,13 @@ class RateLimitedAPIProcessor(Processor):
28
28
  interval: float | None = None,
29
29
  limit_requests: int = None,
30
30
  limit_tokens: int = None,
31
+ concurrency_limit: int | None = None,
31
32
  ):
32
- super().__init__(queue_capacity, capacity_refresh_time)
33
+ super().__init__(
34
+ queue_capacity=queue_capacity,
35
+ capacity_refresh_time=capacity_refresh_time,
36
+ concurrency_limit=concurrency_limit,
37
+ )
33
38
  self.limit_tokens = limit_tokens
34
39
  self.limit_requests = limit_requests
35
40
  self.interval = interval or self.capacity_refresh_time
@@ -37,6 +42,9 @@ class RateLimitedAPIProcessor(Processor):
37
42
  self.available_token = self.limit_tokens
38
43
  self._rate_limit_replenisher_task: asyncio.Task | None = None
39
44
  self._lock: asyncio.Lock = asyncio.Lock()
45
+ self._concurrency_sem = asyncio.Semaphore(
46
+ concurrency_limit or queue_capacity
47
+ )
40
48
 
41
49
  async def start_replenishing(self):
42
50
  """Start replenishing rate limit capacities at regular intervals."""
@@ -74,6 +82,7 @@ class RateLimitedAPIProcessor(Processor):
74
82
  interval: float | None = None,
75
83
  limit_requests: int = None,
76
84
  limit_tokens: int = None,
85
+ concurrency_limit: int | None = None,
77
86
  ) -> Self:
78
87
  self = cls(
79
88
  interval=interval,
@@ -81,6 +90,7 @@ class RateLimitedAPIProcessor(Processor):
81
90
  capacity_refresh_time=capacity_refresh_time,
82
91
  limit_requests=limit_requests,
83
92
  limit_tokens=limit_tokens,
93
+ concurrency_limit=concurrency_limit,
84
94
  )
85
95
  self._rate_limit_replenisher_task = asyncio.create_task(
86
96
  self.start_replenishing()
@@ -126,6 +136,7 @@ class RateLimitedAPIExecutor(Executor):
126
136
  limit_requests: int = None,
127
137
  limit_tokens: int = None,
128
138
  strict_event_type: bool = False,
139
+ concurrency_limit: int | None = None,
129
140
  ):
130
141
  config = {
131
142
  "queue_capacity": queue_capacity,
@@ -133,13 +144,13 @@ class RateLimitedAPIExecutor(Executor):
133
144
  "interval": interval,
134
145
  "limit_requests": limit_requests,
135
146
  "limit_tokens": limit_tokens,
147
+ "concurrency_limit": concurrency_limit,
136
148
  }
137
-
149
+ super().__init__(
150
+ processor_config=config, strict_event_type=strict_event_type
151
+ )
138
152
  self.config = config
139
153
  self.interval = interval
140
154
  self.limit_requests = limit_requests
141
155
  self.limit_tokens = limit_tokens
142
-
143
- super().__init__(
144
- processor_config=config, strict_event_type=strict_event_type
145
- )
156
+ self.concurrency_limit = concurrency_limit or queue_capacity
lionagi/service/imodel.py CHANGED
@@ -2,11 +2,16 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ import asyncio
5
6
  import os
6
7
  import warnings
8
+ from collections.abc import AsyncGenerator, Callable
7
9
 
8
10
  from pydantic import BaseModel
9
11
 
12
+ from lionagi.protocols.generic.event import EventStatus
13
+ from lionagi.utils import is_coro_func
14
+
10
15
  from .endpoints.base import APICalling, EndPoint
11
16
  from .endpoints.match_endpoint import match_endpoint
12
17
  from .endpoints.rate_limited_processor import RateLimitedAPIExecutor
@@ -54,7 +59,9 @@ class iModel:
54
59
  interval: float | None = None,
55
60
  limit_requests: int = None,
56
61
  limit_tokens: int = None,
57
- invoke_with_endpoint: bool = True,
62
+ invoke_with_endpoint: bool = False,
63
+ concurrency_limit: int | None = None,
64
+ streaming_process_func: Callable = None,
58
65
  **kwargs,
59
66
  ) -> None:
60
67
  """Initializes the iModel instance.
@@ -88,6 +95,9 @@ class iModel:
88
95
  invoke_with_endpoint (bool, optional):
89
96
  If True, the endpoint is actually invoked. If False,
90
97
  calls might be mocked or cached.
98
+ concurrency_limit (int | None, optional):
99
+ Maximum number of streaming concurrent requests allowed.
100
+ only applies to streaming requests.
91
101
  **kwargs:
92
102
  Additional keyword arguments, such as `model`, or any other
93
103
  provider-specific fields.
@@ -144,7 +154,14 @@ class iModel:
144
154
  interval=interval,
145
155
  limit_requests=limit_requests,
146
156
  limit_tokens=limit_tokens,
157
+ concurrency_limit=concurrency_limit,
147
158
  )
159
+ if not streaming_process_func and hasattr(
160
+ self.endpoint, "process_chunk"
161
+ ):
162
+ self.streaming_process_func = self.endpoint.process_chunk
163
+ else:
164
+ self.streaming_process_func = streaming_process_func
148
165
 
149
166
  def create_api_calling(self, **kwargs) -> APICalling:
150
167
  """Constructs an `APICalling` object from endpoint-specific payload.
@@ -179,9 +196,12 @@ class iModel:
179
196
  chunk:
180
197
  A portion of the streamed data returned by the API.
181
198
  """
182
- pass
199
+ if self.streaming_process_func and not isinstance(chunk, APICalling):
200
+ if is_coro_func(self.streaming_process_func):
201
+ return await self.streaming_process_func(chunk)
202
+ return self.streaming_process_func(chunk)
183
203
 
184
- async def stream(self, **kwargs) -> APICalling | None:
204
+ async def stream(self, api_call=None, **kwargs) -> AsyncGenerator:
185
205
  """Performs a streaming API call with the given arguments.
186
206
 
187
207
  Args:
@@ -193,14 +213,38 @@ class iModel:
193
213
  An APICalling instance upon success, or None if something
194
214
  goes wrong.
195
215
  """
196
- try:
216
+ if api_call is None:
197
217
  kwargs["stream"] = True
198
218
  api_call = self.create_api_calling(**kwargs)
199
- async for i in api_call.stream():
200
- await self.process_chunk(i)
201
- return api_call
202
- except Exception as e:
203
- raise ValueError(f"Failed to stream API call: {e}")
219
+ await self.executor.append(api_call)
220
+
221
+ if (
222
+ self.executor.processor is None
223
+ or self.executor.processor.is_stopped()
224
+ ):
225
+ await self.executor.start()
226
+
227
+ if self.executor.processor._concurrency_sem:
228
+ async with self.executor.processor._concurrency_sem:
229
+ try:
230
+ async for i in api_call.stream():
231
+ result = await self.process_chunk(i)
232
+ if result:
233
+ yield result
234
+ except Exception as e:
235
+ raise ValueError(f"Failed to stream API call: {e}")
236
+ finally:
237
+ yield self.executor.pile.pop(api_call.id)
238
+ else:
239
+ try:
240
+ async for i in api_call.stream():
241
+ result = await self.process_chunk(i)
242
+ if result:
243
+ yield result
244
+ except Exception as e:
245
+ raise ValueError(f"Failed to stream API call: {e}")
246
+ finally:
247
+ yield self.executor.pile.pop(api_call.id)
204
248
 
205
249
  async def invoke(
206
250
  self, api_call: APICalling = None, **kwargs
@@ -232,10 +276,20 @@ class iModel:
232
276
 
233
277
  await self.executor.append(api_call)
234
278
  await self.executor.forward()
235
- if api_call.id in self.executor.completed_events:
236
- return self.executor.pile.pop(api_call.id)
279
+ ctr = 0
280
+ while api_call.status not in (
281
+ EventStatus.COMPLETED,
282
+ EventStatus.FAILED,
283
+ ):
284
+ if ctr > 100:
285
+ break
286
+ await self.executor.forward()
287
+ ctr += 1
288
+ await asyncio.sleep(0.1)
237
289
  except Exception as e:
238
290
  raise ValueError(f"Failed to invoke API call: {e}")
291
+ finally:
292
+ return self.executor.pile.pop(api_call.id)
239
293
 
240
294
  @property
241
295
  def allowed_roles(self) -> set[str]:
lionagi/session/branch.py CHANGED
@@ -577,6 +577,7 @@ class Branch(Element, Communicatable, Relational):
577
577
  request_options: type[BaseModel] = None,
578
578
  description: str = None,
579
579
  update: bool = False,
580
+ **kwargs,
580
581
  ):
581
582
  if not imodel:
582
583
  imodel = iModel(
@@ -591,6 +592,7 @@ class Branch(Element, Communicatable, Relational):
591
592
  limit_requests=limit_requests,
592
593
  limit_tokens=limit_tokens,
593
594
  invoke_with_endpoint=invoke_with_endpoint,
595
+ **kwargs,
594
596
  )
595
597
 
596
598
  if not update and name in self.tools:
File without changes
@@ -6,7 +6,7 @@ from pydantic import BaseModel, Field, field_validator
6
6
  from lionagi.operatives.action.tool import Tool
7
7
  from lionagi.utils import to_num
8
8
 
9
- from .base import LionTool
9
+ from ..base import LionTool
10
10
 
11
11
 
12
12
  class ReaderAction(str, Enum):
@@ -208,7 +208,7 @@ class ReaderTool(LionTool):
208
208
  e = min(length, end if end is not None else length)
209
209
 
210
210
  try:
211
- with open(path, "r", encoding="utf-8") as f:
211
+ with open(path, encoding="utf-8") as f:
212
212
  f.seek(s)
213
213
  content = f.read(e - s)
214
214
  except Exception as ex:
lionagi/tools/types.py CHANGED
@@ -1,3 +1,3 @@
1
- from .reader import ReaderTool
1
+ from .file.reader import ReaderTool
2
2
 
3
3
  __all__ = ("ReaderTool",)
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.8.6"
1
+ __version__ = "0.8.8"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lionagi
3
- Version: 0.8.6
3
+ Version: 0.8.8
4
4
  Summary: An Intelligence Operating System.
5
5
  Author-email: HaiyangLi <quantocean.li@gmail.com>
6
6
  License: Apache License
@@ -222,11 +222,11 @@ Requires-Python: >=3.10
222
222
  Requires-Dist: aiocache>=0.12.0
223
223
  Requires-Dist: aiohttp>=3.11.0
224
224
  Requires-Dist: jinja2>=3.1.0
225
- Requires-Dist: litellm>=1.55.3
226
225
  Requires-Dist: pandas>=2.0.0
227
226
  Requires-Dist: pillow>=10.0.0
228
227
  Requires-Dist: pydantic>=2.0.0
229
228
  Requires-Dist: python-dotenv>=1.0.1
229
+ Requires-Dist: tiktoken>=0.8.0
230
230
  Description-Content-Type: text/markdown
231
231
 
232
232
  ![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935)
@@ -235,310 +235,137 @@ Description-Content-Type: text/markdown
235
235
 
236
236
  [Documentation](https://lion-agi.github.io/lionagi/) | [Discord](https://discord.gg/aqSJ2v46vu) | [PyPI](https://pypi.org/project/lionagi/) | [Roadmap](https://trello.com/b/3seomsrI/lionagi)
237
237
 
238
- # LION Framework
239
- ### Language InterOperable Network - The Future of Controlled AI Operations
238
+ # LION - Language InterOperable Network
240
239
 
241
- > Harness the power of next-generation AI while maintaining complete control and reliability.
240
+ ## An Intelligence Operating System
242
241
 
243
- ## Why LION?
242
+ LionAGI is a robust framework for orchestrating multi-step AI operations with precise control. Bring together multiple models, advanced ReAct reasoning, tool integrations, and custom validations in a single coherent pipeline.
244
243
 
245
- The AI revolution is transforming how we work - but with great power comes great responsibility. LION provides the control mechanisms and reliability features needed to safely integrate advanced AI capabilities into enterprise workflows.
246
-
247
- LION is designed to be:
248
- - 🔒 **Controlled**: Built-in safety mechanisms and verification
249
- - 🎯 **Precise**: Exact control over AI behaviors
250
- - 🔧 **Flexible**: Build any workflow you need
251
- - 🚀 **Efficient**: Minimal dependencies, maximum performance
244
+ ## Why LionAGI?
252
245
 
246
+ - **Structured**: LLM interactions are validated and typed (via Pydantic).
247
+ - **Expandable**: Integrate multiple providers (OpenAI, Anthropic, Perplexity, custom) with minimal friction.
248
+ - **Controlled**: Built-in safety checks, concurrency strategies, and advanced multi-step flows—like ReAct with verbose outputs.
249
+ - **Transparent**: Real-time logging, message introspection, and easy debugging of tool usage.
253
250
 
254
251
 
255
252
  ## Installation
256
253
 
257
- LION maintains minimal dependencies for maximum reliability:
258
-
259
- ```bash
260
- uv pip install lionagi
254
+ ```
255
+ pip install lionagi
261
256
  ```
262
257
 
263
258
  Dependencies:
264
- - litellm
265
- - jinja2
266
- - pandas
267
- - pillow
268
- - python-dotenv
269
-
259
+ • aiocahce
260
+ • aiohttp
261
+ • jinja2
262
+ • pandas
263
+ • pillow
264
+ • pydantic
265
+ • python-dotenv
266
+ • tiktoken
270
267
 
271
268
  ## Quick Start
272
-
273
269
  ```python
274
- from lionagi import iModel, Branch
270
+ from lionagi import Branch, iModel
275
271
 
276
- # Initialize model
277
- gpt4o = iModel(provider="openai", task="chat", model="gpt-4o")
272
+ # Pick a model
273
+ gpt4o = iModel(provider="openai", model="gpt-4o")
278
274
 
275
+ # Create a Branch (conversation context)
279
276
  hunter = Branch(
280
- system="you are a hilarious dragon hunter who responds in 10 words rhymes",
281
- imodel=gpt4o,
277
+ system="you are a hilarious dragon hunter who responds in 10 words rhymes.",
278
+ chat_model=gpt4o,
282
279
  )
283
280
 
284
- # Chat asynchronously
285
- print(await hunter.communicate("I am a dragon"))
281
+ # Communicate asynchronously
282
+ response = await hunter.communicate("I am a dragon")
283
+ print(response)
286
284
  ```
287
285
 
288
286
  ```
289
287
  You claim to be a dragon, oh what a braggin'!
290
288
  ```
289
+ ### Structured Responses
291
290
 
292
- ## 📦 Features
293
-
294
- ### 1. Model Agnostic Structured Output
295
-
296
- LION provides a unified interface for interacting with any AI model, regardless of the underlying architecture. This allows you to easily switch between models without changing your code.
291
+ Use Pydantic to keep outputs structured:
297
292
 
298
293
  ```python
299
294
  from pydantic import BaseModel
300
295
 
301
296
  class Joke(BaseModel):
302
- joke: str
297
+ joke: str
303
298
 
304
- sonnet = iModel(
305
- provider="anthropic",
306
- model="claude-3-5-sonnet-20241022",
307
- max_tokens=100, # max_tokens is required for anthropic models
299
+ res = await hunter.communicate(
300
+ "Tell me a short dragon joke",
301
+ response_format=Joke
308
302
  )
309
-
310
- response = await hunter.communicate(
311
- instruction="I am a dragon",
312
- response_format=Joke, # structured output in given pydantic model
313
- clear_messages=True, # refresh the conversation
314
- imodel=sonnet, # use sonnet model, which doesn't support structured output
315
- )
316
-
317
303
  print(type(response))
318
304
  print(response.joke)
319
305
  ```
320
-
321
306
  ```
322
307
  <class '__main__.Joke'>
323
- Joke(joke='With fiery claws, dragons hide their laughter flaws!')
308
+ With fiery claws, dragons hide their laughter flaws!
324
309
  ```
325
310
 
311
+ ### ReAct and Tools
326
312
 
327
- ### 2. Complete Observability
313
+ LionAGI supports advanced multi-step reasoning with ReAct. Tools let the LLM invoke external actions:
328
314
 
329
315
  ```python
330
- # using perplexity model
331
- pplx_small = iModel(
332
- provider="perplexity",
333
- task="chat/completions",
334
- model="llama-3.1-sonar-small-128k-online",
335
- max_tokens=1000,
336
- )
337
-
338
- b = await hunter.communicate(
339
- instruction="What makes a well-behaved dragon?",
340
- clear_messages=True, # refresh the conversation
341
- imodel=pplx_small, # use perplexity model
316
+ from lionagi.tools.types import ReaderTool
317
+
318
+ branch = Branch(chat_model=gpt4o, tools=ReaderTool)
319
+ result = await branch.ReAct(
320
+ instruct={
321
+ "instruction": "Summarize my PDF and compare with relevant papers.",
322
+ "context": {"paper_file_path": "/path/to/paper.pdf"},
323
+ },
324
+ extension_allowed=True, # allow multi-round expansions
325
+ max_extensions=5,
326
+ verbose=True, # see step-by-step chain-of-thought
342
327
  )
343
-
344
- print(b)
345
- ```
346
-
347
- ```
348
- A well-behaved dragon is one that's calm and bright,
349
- No stress or fear, just a peaceful night.
350
- It's active, not lethargic, with a happy face,
351
- And behaviors like digging, not a frantic pace.
352
- It's social, friendly, and never a fright,
353
- Just a gentle soul, shining with delight
354
- ```
355
-
356
- ```python
357
- hunter.msgs.last_response.model_response
358
- ```
359
-
360
- ```
361
- {'id': '1be10f4c-0936-4050-ab48-91bd86ab11a5',
362
- 'model': 'llama-3.1-sonar-small-128k-online',
363
- 'object': 'chat.completion',
364
- 'created': 1734369700,
365
- 'choices': [{'index': 0,
366
- 'message': {'role': 'assistant',
367
- 'content': "A well-behaved dragon is one that's calm and bright,\nNo stress or fear, just a peaceful night.\nIt's active, not lethargic, with a happy face,\nAnd behaviors like digging, not a frantic pace.\nIt's social, friendly, and never a fright,\nJust a gentle soul, shining with delight"},
368
- 'finish_reason': 'stop',
369
- 'delta': {'role': 'assistant', 'content': ''}}],
370
- 'usage': {'prompt_tokens': 40, 'completion_tokens': 69, 'total_tokens': 109},
371
- 'citations': [{'url': 'https://dragonsdiet.com/blogs/dragon-care/15-bearded-dragon-behaviors-and-what-they-could-mean'},
372
- {'url': 'https://masterbraeokk.tripod.com/dragons/behavior.html'},
373
- {'url': 'https://files.eric.ed.gov/fulltext/ED247607.pdf'},
374
- {'url': 'https://www.travelchinaguide.com/intro/social_customs/zodiac/dragon/five-elements.htm'},
375
- {'url': 'https://www.travelchinaguide.com/intro/social_customs/zodiac/dragon/'}]}
328
+ print(result)
376
329
  ```
377
330
 
331
+ The LLM can now open the PDF, read in slices, fetch references, and produce a final structured summary.
378
332
 
379
- ### 3. Easy composition of complex workflows
380
-
333
+ ### Observability & Debugging
334
+ - Inspect messages:
381
335
  ```python
382
- # chain of thoughts
383
- from pydantic import Field
384
-
385
- class Reason(BaseModel):
386
- reason: str
387
- confidence_score: float
388
-
389
- class Thought(BaseModel):
390
- thought: str
391
-
392
- class Analysis(BaseModel):
393
- thought: list[Thought] = Field(
394
- default_factory=list,
395
- description="concise Chain of thoughts from you, 3 step, each in 8 words"
396
- )
397
- analysis: str = Field(
398
- ...,
399
- description="Final analysis of the dragon's psyche in 20 words",
400
- )
401
- reason: list[Reason] = Field(
402
- default_factory=list,
403
- description="Concise Reasoning behind the analysis, 3 support, each in 8 words"
404
- )
405
-
406
- context1 = "I am a dragon, I think therefore I am, I suffer from shiny objects syndrome"
407
- context2 = "I like food and poetry, I use uv sometimes, it's cool but I am not familiar with pip"
408
-
409
- async def analyze(context) -> Analysis:
410
- psychologist = Branch(
411
- system="you are a renowned dragon psychologist",
412
- imodel=gpt4o,
413
- )
414
- return await psychologist.communicate(
415
- instruction="analyze the dragon's psyche using chain of thoughts",
416
- guidance="think step by step, reason with logic",
417
- context=context,
418
- response_format=Analysis,
419
- )
420
-
336
+ df = branch.to_df()
337
+ print(df.tail())
421
338
  ```
339
+ - Action logs show each tool call, arguments, and outcomes.
340
+ - Verbose ReAct provides chain-of-thought analysis (helpful for debugging multi-step flows).
422
341
 
423
- ```python
424
- result1 = await analyze(context1)
425
-
426
- print("\nThoughts:")
427
- for i in result1.thought:
428
- print(i.thought)
429
-
430
- print("\nAnalysis:")
431
- print(result1.analysis)
432
-
433
- print("\nReasoning:")
434
- for i in result1.reason:
435
- print(i.reason)
436
- ```
437
-
438
- ```
439
-
440
- Thoughts:
441
- Dragons are attracted to shiny objects naturally.
442
- This suggests a strong affinity for hoarding.
443
- Reflects the dragon's inherent desire for possession.
444
-
445
- Analysis:
446
- The dragon demonstrates a compulsive hoarding behavior linked to attraction for shiny objects.
447
-
448
- Reasoning:
449
- Shiny objects trigger instinctual hoarding behavior.
450
- Possession indicates a symbol of power and security.
451
- Hoarding is reinforced by evolutionary survival mechanisms.
452
- ```
342
+ ### Example: Multi-Model Orchestration
453
343
 
454
344
  ```python
455
- result2 = await analyze(context2)
345
+ from lionagi import Branch, iModel
456
346
 
457
- print("\nThoughts:")
458
- for i in result2.thought:
459
- print(i.thought)
460
-
461
- print("\nAnalysis:")
462
- print(result2.analysis)
347
+ gpt4o = iModel(provider="openai", model="gpt-4o")
348
+ sonnet = iModel(
349
+ provider="anthropic",
350
+ model="claude-3-5-sonnet-20241022",
351
+ max_tokens=1000, # max_tokens is required for anthropic models
352
+ )
463
353
 
464
- print("\nReasoning:")
465
- for i in result2.reason:
466
- print(i.reason)
354
+ branch = Branch(chat_model=gpt4o)
355
+ # Switch mid-flow
356
+ analysis = await branch.communicate("Analyze these stats", imodel=sonnet)
467
357
  ```
468
358
 
469
- ```
470
- Thoughts:
471
- Dragon enjoys both food and poetry regularly.
472
- Dragon uses uv light with frequent interest.
473
- Dragon is unfamiliar and not comfortable with pip.
474
-
475
- Analysis:
476
- The dragon is curious and exploratory, yet selectively cautious about unfamiliar methodologies.
477
-
478
- Reasoning:
479
- Preference for food and poetry suggests curiosity.
480
- Frequent uv light use indicates exploratory nature.
481
- Discomfort with pip usage shows selective caution.
482
- ```
359
+ Seamlessly route to different models in the same workflow.
483
360
 
361
+ ## Community & Contributing
484
362
 
363
+ We welcome issues, ideas, and pull requests:
364
+ - Discord: Join to chat or get help
365
+ - Issues / PRs: GitHub
485
366
 
486
- ## 🌟 Example Workflow
487
-
488
- Below is an example of what you can build with LION. Note that these are sample implementations - LION provides the building blocks, you create the workflows that fit your needs.
489
-
490
- ```mermaid
491
- sequenceDiagram
492
- autonumber
493
- participant Client
494
- participant Orchestrator
495
- participant ResearchAgent
496
- participant AnalysisAgent
497
- participant ValidationAgent
498
- participant Tools
499
-
500
- Client->>+Orchestrator: Submit Complex Task
501
- Note over Orchestrator: Task Analysis & Planning
502
-
503
- %% Research Phase
504
- Orchestrator->>+ResearchAgent: Delegate Research
505
- activate ResearchAgent
506
- ResearchAgent->>Tools: Access Data Sources
507
- Tools-->>ResearchAgent: Raw Data
508
- ResearchAgent-->>-Orchestrator: Research Results
509
- deactivate ResearchAgent
510
-
511
- %% Analysis Phase
512
- Orchestrator->>+AnalysisAgent: Process Data
513
- activate AnalysisAgent
514
- AnalysisAgent->>Tools: Apply Models
515
- Tools-->>AnalysisAgent: Analysis Results
516
- AnalysisAgent-->>-Orchestrator: Processed Insights
517
- deactivate AnalysisAgent
518
-
519
- %% Validation Phase
520
- Orchestrator->>+ValidationAgent: Verify Results
521
- activate ValidationAgent
522
- ValidationAgent->>Tools: Apply Safety Checks
523
- Tools-->>ValidationAgent: Validation Status
524
- ValidationAgent-->>-Orchestrator: Verified Results
525
- deactivate ValidationAgent
526
-
527
- Orchestrator-->>-Client: Return Validated Output
367
+ ### Citation
528
368
  ```
529
-
530
-
531
- ## 🤝 Contributing
532
-
533
- Join our [Discord community](https://discord.gg/aqSJ2v46vu) to:
534
- - Share ideas
535
- - Report issues
536
- - Contribute code
537
- - Learn from others
538
-
539
- ## 📚 Citation
540
-
541
- ```bibtex
542
369
  @software{Li_LionAGI_2023,
543
370
  author = {Haiyang Li},
544
371
  month = {12},
@@ -547,3 +374,6 @@ Join our [Discord community](https://discord.gg/aqSJ2v46vu) to:
547
374
  url = {https://github.com/lion-agi/lionagi},
548
375
  }
549
376
  ```
377
+
378
+ **🦁 LionAGI**
379
+ > Because real AI orchestration demands more than a single prompt. Try it out and discover the next evolution in structured, multi-model, safe AI.
@@ -4,7 +4,7 @@ lionagi/_errors.py,sha256=wNKdnVQvE_CHEstK7htrrj334RA_vbGcIds-3pUiRkc,455
4
4
  lionagi/_types.py,sha256=9g7iytvSj3UjZxD-jL06_fxuNfgZyWT3Qnp0XYp1wQU,63
5
5
  lionagi/settings.py,sha256=k9zRJXv57TveyfHO3Vr9VGiKrSwlRUUVKt5zf6v9RU4,1627
6
6
  lionagi/utils.py,sha256=QbF4E1PG-BaRcEVH3kJIYCJVNq-oRNoTxjda5k8NYW4,73177
7
- lionagi/version.py,sha256=VpASnrti7EGWxUfSWGgERUfe7NLJltfVXYosOzHbpPg,22
7
+ lionagi/version.py,sha256=S5bBAK8bL7bybaXGJQuNE98fa3H65zGjTASMiyKGJGw,22
8
8
  lionagi/libs/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
9
9
  lionagi/libs/parse.py,sha256=tpEbmIRGuHhLCJlUlm6fjmqm_Z6XJLAXGNFHNuk422I,1011
10
10
  lionagi/libs/file/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
@@ -48,7 +48,7 @@ lionagi/operations/__init__.py,sha256=O7nV0tedpUe7_OlUWmCcduGPFtqtzWZcR_SIOnjLsr
48
48
  lionagi/operations/manager.py,sha256=H7UY86PIxvxKdzJY9YVsWyJcqlwLWhVyvm4sYePH_uY,565
49
49
  lionagi/operations/types.py,sha256=LIa68xcyKLVafof-DSFwKtSkneuYPFqrtGyClohYI6o,704
50
50
  lionagi/operations/utils.py,sha256=Twy6L_UFt9JqJFRYuKKTKVZIXsePidNl5ipcYcCbesI,1220
51
- lionagi/operations/ReAct/ReAct.py,sha256=odFcuNMuwJ2NjUGGdTekFJzD43WFvNFNHCjzS1X6HT8,4962
51
+ lionagi/operations/ReAct/ReAct.py,sha256=_PwoP3RgazGsAaDEOWEABATpBujI7e5OQhbc9AyIA1o,5082
52
52
  lionagi/operations/ReAct/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
53
53
  lionagi/operations/ReAct/utils.py,sha256=uWPZC1aJVAPvJweAgr3NdXpYszeagN5OnJIkUdrSvlw,3228
54
54
  lionagi/operations/_act/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
@@ -129,10 +129,10 @@ lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py,sha256=ULGZVhK5aaOuTr
129
129
  lionagi/protocols/adapters/pandas_/pd_series_adapter.py,sha256=TX3cqFtgEip8JqVqkjdJYOu4PQGpW1yYU6POhvz8Jeg,1388
130
130
  lionagi/protocols/generic/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
131
131
  lionagi/protocols/generic/element.py,sha256=6RPUqs3FOdSC65QOwnfBD9yKVsWnmb878kmfas0m928,14169
132
- lionagi/protocols/generic/event.py,sha256=SjR9N4Egr5_oqOBSKVsxQjsn6MlqNcwf48bee-qIT6Y,4879
132
+ lionagi/protocols/generic/event.py,sha256=8_581UqtCPCRPYVjTRtFQc_huky10xV4Rn9dxIwMjaA,5200
133
133
  lionagi/protocols/generic/log.py,sha256=xi8dRKwxtxVYU8T_E4wYJE4lCQzkERgAUARcAN7ZngI,7441
134
134
  lionagi/protocols/generic/pile.py,sha256=Nx4IkWDC5Tdykrw_uGrudgS6Yrz1v1Sykv8t74Deuic,31434
135
- lionagi/protocols/generic/processor.py,sha256=4Gkie1DxE0U-uZAdNBTuTibUlyeEGm_OyVlMXilCEm8,10115
135
+ lionagi/protocols/generic/processor.py,sha256=n1DFmLY4bfvrSQNQ5tQ0_Pkyq0lzP9cYtDAmX4yC5Pc,10382
136
136
  lionagi/protocols/generic/progression.py,sha256=OAirlukJ34rKRipv8MtG7PvvJRkq8nz-RIYB-Yi39Mo,15189
137
137
  lionagi/protocols/graph/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
138
138
  lionagi/protocols/graph/edge.py,sha256=cEbhqapsdJqHx5VhPwXwOkahfC7E7XZNbRqGixt_EFc,5229
@@ -161,14 +161,14 @@ lionagi/protocols/messages/templates/instruction_message.jinja2,sha256=L-ptw5OHx
161
161
  lionagi/protocols/messages/templates/system_message.jinja2,sha256=JRKJ0aFpYfaXSFouKc_N4unZ35C3yZTOWhIrIdCB5qk,215
162
162
  lionagi/protocols/messages/templates/tool_schemas.jinja2,sha256=ozIaSDCRjIAhLyA8VM6S-YqS0w2NcctALSwx4LjDwII,126
163
163
  lionagi/service/__init__.py,sha256=DMGXIqPsmut9H5GT0ZeSzQIzYzzPwI-2gLXydpbwiV8,21
164
- lionagi/service/imodel.py,sha256=zQq9cdVPpEAPB7IscntExvtHOYA5ToiWonmD2n93pEw,12273
164
+ lionagi/service/imodel.py,sha256=KCIwRXf8djr5S-t_Vc9dZGvVIzLyirDWDbOcb7ECMsw,14470
165
165
  lionagi/service/manager.py,sha256=MKSYBkg23s7YhZy5GEFdnpspEnhPVfFhpkpoJe20D7k,1435
166
166
  lionagi/service/types.py,sha256=v9SAn5-GTmds4Mar13Or_VFrRHCinBK99dmeDUd-QNk,486
167
167
  lionagi/service/endpoints/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
168
- lionagi/service/endpoints/base.py,sha256=SaYobDBCdKd4398TyFPp5u3PKyMnzEbm6PsoUxVkZDA,18605
169
- lionagi/service/endpoints/chat_completion.py,sha256=9ltSQaKPH43WdEDW32_-f5x07I9hOU8g-T_PAG-nYsQ,2529
168
+ lionagi/service/endpoints/base.py,sha256=KovCttNJ0h22vmg3wSxZK7IE7jsydw4L3xXbjTOynq8,23134
169
+ lionagi/service/endpoints/chat_completion.py,sha256=-LsmBdYEwEhREEbti_81nTMy_pgaX9xteukj8IPq_bI,2783
170
170
  lionagi/service/endpoints/match_endpoint.py,sha256=hIGYyok1y53FfI6av5NfYMygRIpDWYZbdCj0pJJfmPY,1874
171
- lionagi/service/endpoints/rate_limited_processor.py,sha256=umri0FofbyBSFdAQBEsviDB5K6N12LkRiXQgSOorGKg,4663
171
+ lionagi/service/endpoints/rate_limited_processor.py,sha256=kOFp7oTKZXQDdbgyCYKvR-zUIq_WPEfeRDF0SkCbtCs,5199
172
172
  lionagi/service/endpoints/token_calculator.py,sha256=MflqImGUr_1jh465hB7cUAaIPICBkjirvre1fWGXLrA,6161
173
173
  lionagi/service/providers/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
174
174
  lionagi/service/providers/types.py,sha256=NS91ysRFwOs0cpNeQgFhmtl7JrSz2pJm-tt7sZILmQY,683
@@ -188,13 +188,14 @@ lionagi/service/providers/perplexity_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZK
188
188
  lionagi/service/providers/perplexity_/chat_completions.py,sha256=jhE-KHWRX6yYEeKWLMCKLgK3bQzieSv2viqQWDP8q0Q,1197
189
189
  lionagi/service/providers/perplexity_/models.py,sha256=gXH4XGkhZ4aFxvMSDTlHq9Rz1mhu3aTENXAtE-BIr6U,4866
190
190
  lionagi/session/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
191
- lionagi/session/branch.py,sha256=EH1JhOe1ZGlCVXpf0znz_xAA3GibNIGFATwLAyxiCK0,67835
191
+ lionagi/session/branch.py,sha256=cSdhebkS3Shc-ZwS1KpoEOidiq2AqQBidli1jwezFz4,67879
192
192
  lionagi/session/session.py,sha256=po6C7PnM0iu_ISHUo4PBzzQ61HFOgcsAUfPoO--eLak,8987
193
193
  lionagi/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
194
194
  lionagi/tools/base.py,sha256=ffaIcLF_uwEphCkP_wsa3UfkqVenML3HpsnR5kRCTtA,236
195
- lionagi/tools/reader.py,sha256=TyjSqhSIQwxdkwgYSz760YKBbqJ5OfwZegRwQz47R24,7509
196
- lionagi/tools/types.py,sha256=_OWzoTHTcqNwPs3OGrPkpO9m_vHDCxVDL-FN-t6ZD60,58
197
- lionagi-0.8.6.dist-info/METADATA,sha256=SmTcVVpZcb_nfuNo5WrDS6ilr16Tj0qL_yX-AAP1ri0,22819
198
- lionagi-0.8.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
199
- lionagi-0.8.6.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
200
- lionagi-0.8.6.dist-info/RECORD,,
195
+ lionagi/tools/types.py,sha256=O6ipx7zX0piaIQ3c8V3zHWrXH-1gdIe-KQ4xTPSiLp0,63
196
+ lionagi/tools/file/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
197
+ lionagi/tools/file/reader.py,sha256=o6tgqNmxNq64al4hqNS_whPQbH2CP7YygYQUhTmfjQU,7505
198
+ lionagi-0.8.8.dist-info/METADATA,sha256=_BdfHxsxVZfXAAFEu--AZNNVPrlfeThuhogbDWq9I4s,18053
199
+ lionagi-0.8.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
200
+ lionagi-0.8.8.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
201
+ lionagi-0.8.8.dist-info/RECORD,,