lionagi 0.8.2__py3-none-any.whl → 0.8.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/__init__.py +6 -0
- lionagi/operations/ReAct/ReAct.py +53 -21
- lionagi/operations/ReAct/utils.py +72 -8
- lionagi/operations/_act/act.py +8 -0
- lionagi/operations/interpret/interpret.py +39 -8
- lionagi/operations/operate/operate.py +7 -1
- lionagi/operatives/action/function_calling.py +12 -2
- lionagi/operatives/action/tool.py +10 -0
- lionagi/operatives/forms/flow.py +0 -1
- lionagi/operatives/forms/form.py +1 -1
- lionagi/protocols/messages/instruction.py +104 -51
- lionagi/service/endpoints/base.py +11 -0
- lionagi/service/imodel.py +11 -0
- lionagi/service/providers/exa_/search.py +4 -4
- lionagi/service/providers/perplexity_/chat_completions.py +4 -0
- lionagi/service/providers/perplexity_/models.py +144 -0
- lionagi/session/branch.py +135 -3
- lionagi/tools/__init__.py +0 -0
- lionagi/tools/base.py +12 -0
- lionagi/tools/reader.py +244 -0
- lionagi/tools/types.py +3 -0
- lionagi/version.py +1 -1
- {lionagi-0.8.2.dist-info → lionagi-0.8.7.dist-info}/METADATA +73 -246
- {lionagi-0.8.2.dist-info → lionagi-0.8.7.dist-info}/RECORD +26 -21
- {lionagi-0.8.2.dist-info → lionagi-0.8.7.dist-info}/WHEEL +0 -0
- {lionagi-0.8.2.dist-info → lionagi-0.8.7.dist-info}/licenses/LICENSE +0 -0
lionagi/tools/reader.py
ADDED
@@ -0,0 +1,244 @@
|
|
1
|
+
import tempfile
|
2
|
+
from enum import Enum
|
3
|
+
|
4
|
+
from pydantic import BaseModel, Field, field_validator
|
5
|
+
|
6
|
+
from lionagi.operatives.action.tool import Tool
|
7
|
+
from lionagi.utils import to_num
|
8
|
+
|
9
|
+
from .base import LionTool
|
10
|
+
|
11
|
+
|
12
|
+
class ReaderAction(str, Enum):
|
13
|
+
"""
|
14
|
+
This enumeration indicates the *type* of action the LLM wants to perform.
|
15
|
+
- 'open': Convert a file/URL to text and store it internally for partial reads
|
16
|
+
- 'read': Return a partial slice of the already-opened doc
|
17
|
+
"""
|
18
|
+
|
19
|
+
open = "open"
|
20
|
+
read = "read"
|
21
|
+
|
22
|
+
|
23
|
+
class ReaderRequest(BaseModel):
|
24
|
+
"""
|
25
|
+
The request model for the 'ReaderTool'.
|
26
|
+
It indicates:
|
27
|
+
- whether we are 'open'-ing a doc or 'read'-ing from a doc
|
28
|
+
- which file/URL we want to open (if action='open')
|
29
|
+
- which doc_id and offsets we want to read (if action='read')
|
30
|
+
"""
|
31
|
+
|
32
|
+
action: ReaderAction = Field(
|
33
|
+
...,
|
34
|
+
description=(
|
35
|
+
"Action to perform. Must be one of: "
|
36
|
+
"- 'open': Convert a file/URL to text and store it internally for partial reads. "
|
37
|
+
"- 'read': Return a partial slice of the already-opened doc."
|
38
|
+
),
|
39
|
+
)
|
40
|
+
|
41
|
+
path_or_url: str | None = Field(
|
42
|
+
None,
|
43
|
+
description=(
|
44
|
+
"Local file path or remote URL to open. This field is REQUIRED if action='open'. "
|
45
|
+
"If action='read', leave it None."
|
46
|
+
),
|
47
|
+
)
|
48
|
+
|
49
|
+
doc_id: str | None = Field(
|
50
|
+
None,
|
51
|
+
description=(
|
52
|
+
"Unique ID referencing a previously opened document. "
|
53
|
+
"This field is REQUIRED if action='read'. If action='open', leave it None."
|
54
|
+
),
|
55
|
+
)
|
56
|
+
|
57
|
+
start_offset: int | None = Field(
|
58
|
+
None,
|
59
|
+
description=(
|
60
|
+
"Character start offset in the doc for partial reading. "
|
61
|
+
"If omitted or None, defaults to 0. Only used if action='read'."
|
62
|
+
),
|
63
|
+
)
|
64
|
+
|
65
|
+
end_offset: int | None = Field(
|
66
|
+
None,
|
67
|
+
description=(
|
68
|
+
"Character end offset in the doc for partial reading. "
|
69
|
+
"If omitted or None, we read until the document's end. Only used if action='read'."
|
70
|
+
),
|
71
|
+
)
|
72
|
+
|
73
|
+
@field_validator("start_offset", "end_offset", mode="before")
|
74
|
+
def _validate_offsets(cls, v):
|
75
|
+
try:
|
76
|
+
return to_num(v, num_type=int)
|
77
|
+
except ValueError:
|
78
|
+
return None
|
79
|
+
|
80
|
+
|
81
|
+
class DocumentInfo(BaseModel):
|
82
|
+
"""
|
83
|
+
Returned info when we 'open' a doc.
|
84
|
+
doc_id: The unique string to reference this doc in subsequent 'read' calls
|
85
|
+
length: The total character length of the converted text
|
86
|
+
"""
|
87
|
+
|
88
|
+
doc_id: str
|
89
|
+
length: int | None = None
|
90
|
+
|
91
|
+
|
92
|
+
class PartialChunk(BaseModel):
|
93
|
+
"""
|
94
|
+
Represents a partial slice of text from [start_offset..end_offset).
|
95
|
+
"""
|
96
|
+
|
97
|
+
start_offset: int | None = None
|
98
|
+
end_offset: int | None = None
|
99
|
+
content: str | None = None
|
100
|
+
|
101
|
+
|
102
|
+
class ReaderResponse(BaseModel):
|
103
|
+
"""
|
104
|
+
The response from the 'ReaderTool'.
|
105
|
+
- If action='open' succeeded, doc_info is filled (doc_id & length).
|
106
|
+
- If action='read' succeeded, chunk is filled (the partial text).
|
107
|
+
- If failure occurs, success=False & error hold details.
|
108
|
+
"""
|
109
|
+
|
110
|
+
success: bool = Field(
|
111
|
+
...,
|
112
|
+
description=(
|
113
|
+
"Indicates if the requested action was performed successfully."
|
114
|
+
),
|
115
|
+
)
|
116
|
+
error: str | None = Field(
|
117
|
+
None,
|
118
|
+
description=("Describes any error that occurred, if success=False."),
|
119
|
+
)
|
120
|
+
doc_info: DocumentInfo | None = Field(
|
121
|
+
None,
|
122
|
+
description=(
|
123
|
+
"Populated only if action='open' succeeded, letting the LLM know doc_id & total length."
|
124
|
+
),
|
125
|
+
)
|
126
|
+
chunk: PartialChunk | None = Field(
|
127
|
+
None,
|
128
|
+
description=(
|
129
|
+
"Populated only if action='read' succeeded, providing the partial slice of text."
|
130
|
+
),
|
131
|
+
)
|
132
|
+
|
133
|
+
|
134
|
+
class ReaderTool(LionTool):
|
135
|
+
"""
|
136
|
+
A single tool that the LLM can call with ReaderRequest to either:
|
137
|
+
- open a doc (File/URL) -> returns doc_id, doc length
|
138
|
+
- read partial text from doc -> returns chunk
|
139
|
+
"""
|
140
|
+
|
141
|
+
is_lion_system_tool = True
|
142
|
+
system_tool_name = "reader_tool"
|
143
|
+
|
144
|
+
from lionagi.libs.package.imports import check_import
|
145
|
+
|
146
|
+
DocumentConverter = check_import(
|
147
|
+
"docling",
|
148
|
+
module_name="document_converter",
|
149
|
+
import_name="DocumentConverter",
|
150
|
+
)
|
151
|
+
|
152
|
+
def __init__(self):
|
153
|
+
super().__init__()
|
154
|
+
self.converter = ReaderTool.DocumentConverter()
|
155
|
+
self.documents = {} # doc_id -> (temp_file_path, doc_length)
|
156
|
+
self._tool = None
|
157
|
+
|
158
|
+
def handle_request(self, request: ReaderRequest) -> ReaderResponse:
|
159
|
+
"""
|
160
|
+
A function that takes ReaderRequest to either:
|
161
|
+
- open a doc (File/URL) -> returns doc_id, doc length
|
162
|
+
- read partial text from doc -> returns chunk
|
163
|
+
"""
|
164
|
+
if isinstance(request, dict):
|
165
|
+
request = ReaderRequest(**request)
|
166
|
+
if request.action == "open":
|
167
|
+
return self._open_doc(request.path_or_url)
|
168
|
+
elif request.action == "read":
|
169
|
+
return self._read_doc(
|
170
|
+
request.doc_id, request.start_offset, request.end_offset
|
171
|
+
)
|
172
|
+
else:
|
173
|
+
return ReaderResponse(success=False, error="Unknown action type")
|
174
|
+
|
175
|
+
def _open_doc(self, source: str) -> ReaderResponse:
|
176
|
+
try:
|
177
|
+
result = self.converter.convert(source)
|
178
|
+
text = result.document.export_to_markdown()
|
179
|
+
except Exception as e:
|
180
|
+
return ReaderResponse(
|
181
|
+
success=False, error=f"Conversion error: {str(e)}"
|
182
|
+
)
|
183
|
+
|
184
|
+
doc_id = f"DOC_{abs(hash(source))}"
|
185
|
+
temp_file = tempfile.NamedTemporaryFile(
|
186
|
+
delete=False, mode="w", encoding="utf-8"
|
187
|
+
)
|
188
|
+
temp_file.write(text)
|
189
|
+
doc_len = len(text)
|
190
|
+
temp_file.close()
|
191
|
+
|
192
|
+
# store info
|
193
|
+
self.documents[doc_id] = (temp_file.name, doc_len)
|
194
|
+
|
195
|
+
return ReaderResponse(
|
196
|
+
success=True, doc_info=DocumentInfo(doc_id=doc_id, length=doc_len)
|
197
|
+
)
|
198
|
+
|
199
|
+
def _read_doc(self, doc_id: str, start: int, end: int) -> ReaderResponse:
|
200
|
+
if doc_id not in self.documents:
|
201
|
+
return ReaderResponse(
|
202
|
+
success=False, error="doc_id not found in memory"
|
203
|
+
)
|
204
|
+
|
205
|
+
path, length = self.documents[doc_id]
|
206
|
+
# clamp offsets
|
207
|
+
s = max(0, start if start is not None else 0)
|
208
|
+
e = min(length, end if end is not None else length)
|
209
|
+
|
210
|
+
try:
|
211
|
+
with open(path, encoding="utf-8") as f:
|
212
|
+
f.seek(s)
|
213
|
+
content = f.read(e - s)
|
214
|
+
except Exception as ex:
|
215
|
+
return ReaderResponse(
|
216
|
+
success=False, error=f"Read error: {str(ex)}"
|
217
|
+
)
|
218
|
+
|
219
|
+
return ReaderResponse(
|
220
|
+
success=True,
|
221
|
+
chunk=PartialChunk(start_offset=s, end_offset=e, content=content),
|
222
|
+
)
|
223
|
+
|
224
|
+
def to_tool(self):
|
225
|
+
if self._tool is None:
|
226
|
+
|
227
|
+
def reader_tool(**kwargs):
|
228
|
+
"""
|
229
|
+
A function that takes ReaderRequest to either:
|
230
|
+
- open a doc (File/URL) -> returns doc_id, doc length
|
231
|
+
- read partial text from doc -> returns chunk
|
232
|
+
"""
|
233
|
+
return self.handle_request(
|
234
|
+
ReaderRequest(**kwargs)
|
235
|
+
).model_dump()
|
236
|
+
|
237
|
+
if self.system_tool_name != "reader_tool":
|
238
|
+
reader_tool.__name__ = self.system_tool_name
|
239
|
+
|
240
|
+
self._tool = Tool(
|
241
|
+
func_callable=reader_tool,
|
242
|
+
request_options=ReaderRequest,
|
243
|
+
)
|
244
|
+
return self._tool
|
lionagi/tools/types.py
ADDED
lionagi/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.8.
|
1
|
+
__version__ = "0.8.7"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.8.
|
3
|
+
Version: 0.8.7
|
4
4
|
Summary: An Intelligence Operating System.
|
5
5
|
Author-email: HaiyangLi <quantocean.li@gmail.com>
|
6
6
|
License: Apache License
|
@@ -235,310 +235,134 @@ Description-Content-Type: text/markdown
|
|
235
235
|
|
236
236
|
[Documentation](https://lion-agi.github.io/lionagi/) | [Discord](https://discord.gg/aqSJ2v46vu) | [PyPI](https://pypi.org/project/lionagi/) | [Roadmap](https://trello.com/b/3seomsrI/lionagi)
|
237
237
|
|
238
|
-
# LION
|
239
|
-
### Language InterOperable Network - The Future of Controlled AI Operations
|
238
|
+
# LION - Language InterOperable Network
|
240
239
|
|
241
|
-
|
240
|
+
## An Intelligence Operating System
|
242
241
|
|
243
|
-
|
242
|
+
LionAGI is a robust framework for orchestrating multi-step AI operations with precise control. Bring together multiple models, advanced ReAct reasoning, tool integrations, and custom validations in a single coherent pipeline.
|
244
243
|
|
245
|
-
|
246
|
-
|
247
|
-
LION is designed to be:
|
248
|
-
- 🔒 **Controlled**: Built-in safety mechanisms and verification
|
249
|
-
- 🎯 **Precise**: Exact control over AI behaviors
|
250
|
-
- 🔧 **Flexible**: Build any workflow you need
|
251
|
-
- 🚀 **Efficient**: Minimal dependencies, maximum performance
|
244
|
+
## Why LionAGI?
|
252
245
|
|
246
|
+
- **Structured**: LLM interactions are validated and typed (via Pydantic).
|
247
|
+
- **Expandable**: Integrate multiple providers (OpenAI, Anthropic, Perplexity, custom) with minimal friction.
|
248
|
+
- **Controlled**: Built-in safety checks, concurrency strategies, and advanced multi-step flows—like ReAct with verbose outputs.
|
249
|
+
- **Transparent**: Real-time logging, message introspection, and easy debugging of tool usage.
|
253
250
|
|
254
251
|
|
255
252
|
## Installation
|
256
253
|
|
257
|
-
|
258
|
-
|
259
|
-
```bash
|
260
|
-
uv pip install lionagi
|
254
|
+
```
|
255
|
+
pip install lionagi
|
261
256
|
```
|
262
257
|
|
263
258
|
Dependencies:
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
259
|
+
• litellm
|
260
|
+
• jinja2
|
261
|
+
• pandas
|
262
|
+
• pillow
|
263
|
+
• python-dotenv
|
270
264
|
|
271
265
|
## Quick Start
|
272
|
-
|
273
266
|
```python
|
274
|
-
from lionagi import
|
267
|
+
from lionagi import Branch, iModel
|
275
268
|
|
276
|
-
#
|
277
|
-
gpt4o = iModel(provider="openai",
|
269
|
+
# Pick a model
|
270
|
+
gpt4o = iModel(provider="openai", model="gpt-4o")
|
278
271
|
|
272
|
+
# Create a Branch (conversation context)
|
279
273
|
hunter = Branch(
|
280
|
-
system="you are a hilarious dragon hunter who responds in 10 words rhymes",
|
281
|
-
|
274
|
+
system="you are a hilarious dragon hunter who responds in 10 words rhymes.",
|
275
|
+
chat_model=gpt4o,
|
282
276
|
)
|
283
277
|
|
284
|
-
#
|
285
|
-
|
278
|
+
# Communicate asynchronously
|
279
|
+
response = await hunter.communicate("I am a dragon")
|
280
|
+
print(response)
|
286
281
|
```
|
287
282
|
|
288
283
|
```
|
289
284
|
You claim to be a dragon, oh what a braggin'!
|
290
285
|
```
|
286
|
+
### Structured Responses
|
291
287
|
|
292
|
-
|
293
|
-
|
294
|
-
### 1. Model Agnostic Structured Output
|
295
|
-
|
296
|
-
LION provides a unified interface for interacting with any AI model, regardless of the underlying architecture. This allows you to easily switch between models without changing your code.
|
288
|
+
Use Pydantic to keep outputs structured:
|
297
289
|
|
298
290
|
```python
|
299
291
|
from pydantic import BaseModel
|
300
292
|
|
301
293
|
class Joke(BaseModel):
|
302
|
-
|
294
|
+
joke: str
|
303
295
|
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
max_tokens=100, # max_tokens is required for anthropic models
|
296
|
+
res = await hunter.communicate(
|
297
|
+
"Tell me a short dragon joke",
|
298
|
+
response_format=Joke
|
308
299
|
)
|
309
|
-
|
310
|
-
response = await hunter.communicate(
|
311
|
-
instruction="I am a dragon",
|
312
|
-
response_format=Joke, # structured output in given pydantic model
|
313
|
-
clear_messages=True, # refresh the conversation
|
314
|
-
imodel=sonnet, # use sonnet model, which doesn't support structured output
|
315
|
-
)
|
316
|
-
|
317
300
|
print(type(response))
|
318
301
|
print(response.joke)
|
319
302
|
```
|
320
|
-
|
321
303
|
```
|
322
304
|
<class '__main__.Joke'>
|
323
|
-
|
305
|
+
With fiery claws, dragons hide their laughter flaws!
|
324
306
|
```
|
325
307
|
|
308
|
+
### ReAct and Tools
|
326
309
|
|
327
|
-
|
310
|
+
LionAGI supports advanced multi-step reasoning with ReAct. Tools let the LLM invoke external actions:
|
328
311
|
|
329
312
|
```python
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
imodel=pplx_small, # use perplexity model
|
313
|
+
from lionagi.tools.types import ReaderTool
|
314
|
+
|
315
|
+
branch = Branch(chat_model=gpt4o, tools=ReaderTool)
|
316
|
+
result = await branch.ReAct(
|
317
|
+
instruct={
|
318
|
+
"instruction": "Summarize my PDF and compare with relevant papers.",
|
319
|
+
"context": {"paper_file_path": "/path/to/paper.pdf"},
|
320
|
+
},
|
321
|
+
extension_allowed=True, # allow multi-round expansions
|
322
|
+
max_extensions=5,
|
323
|
+
verbose=True, # see step-by-step chain-of-thought
|
342
324
|
)
|
343
|
-
|
344
|
-
print(b)
|
345
|
-
```
|
346
|
-
|
347
|
-
```
|
348
|
-
A well-behaved dragon is one that's calm and bright,
|
349
|
-
No stress or fear, just a peaceful night.
|
350
|
-
It's active, not lethargic, with a happy face,
|
351
|
-
And behaviors like digging, not a frantic pace.
|
352
|
-
It's social, friendly, and never a fright,
|
353
|
-
Just a gentle soul, shining with delight
|
354
|
-
```
|
355
|
-
|
356
|
-
```python
|
357
|
-
hunter.msgs.last_response.model_response
|
358
|
-
```
|
359
|
-
|
360
|
-
```
|
361
|
-
{'id': '1be10f4c-0936-4050-ab48-91bd86ab11a5',
|
362
|
-
'model': 'llama-3.1-sonar-small-128k-online',
|
363
|
-
'object': 'chat.completion',
|
364
|
-
'created': 1734369700,
|
365
|
-
'choices': [{'index': 0,
|
366
|
-
'message': {'role': 'assistant',
|
367
|
-
'content': "A well-behaved dragon is one that's calm and bright,\nNo stress or fear, just a peaceful night.\nIt's active, not lethargic, with a happy face,\nAnd behaviors like digging, not a frantic pace.\nIt's social, friendly, and never a fright,\nJust a gentle soul, shining with delight"},
|
368
|
-
'finish_reason': 'stop',
|
369
|
-
'delta': {'role': 'assistant', 'content': ''}}],
|
370
|
-
'usage': {'prompt_tokens': 40, 'completion_tokens': 69, 'total_tokens': 109},
|
371
|
-
'citations': [{'url': 'https://dragonsdiet.com/blogs/dragon-care/15-bearded-dragon-behaviors-and-what-they-could-mean'},
|
372
|
-
{'url': 'https://masterbraeokk.tripod.com/dragons/behavior.html'},
|
373
|
-
{'url': 'https://files.eric.ed.gov/fulltext/ED247607.pdf'},
|
374
|
-
{'url': 'https://www.travelchinaguide.com/intro/social_customs/zodiac/dragon/five-elements.htm'},
|
375
|
-
{'url': 'https://www.travelchinaguide.com/intro/social_customs/zodiac/dragon/'}]}
|
325
|
+
print(result)
|
376
326
|
```
|
377
327
|
|
328
|
+
The LLM can now open the PDF, read in slices, fetch references, and produce a final structured summary.
|
378
329
|
|
379
|
-
###
|
380
|
-
|
330
|
+
### Observability & Debugging
|
331
|
+
- Inspect messages:
|
381
332
|
```python
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
class Reason(BaseModel):
|
386
|
-
reason: str
|
387
|
-
confidence_score: float
|
388
|
-
|
389
|
-
class Thought(BaseModel):
|
390
|
-
thought: str
|
391
|
-
|
392
|
-
class Analysis(BaseModel):
|
393
|
-
thought: list[Thought] = Field(
|
394
|
-
default_factory=list,
|
395
|
-
description="concise Chain of thoughts from you, 3 step, each in 8 words"
|
396
|
-
)
|
397
|
-
analysis: str = Field(
|
398
|
-
...,
|
399
|
-
description="Final analysis of the dragon's psyche in 20 words",
|
400
|
-
)
|
401
|
-
reason: list[Reason] = Field(
|
402
|
-
default_factory=list,
|
403
|
-
description="Concise Reasoning behind the analysis, 3 support, each in 8 words"
|
404
|
-
)
|
405
|
-
|
406
|
-
context1 = "I am a dragon, I think therefore I am, I suffer from shiny objects syndrome"
|
407
|
-
context2 = "I like food and poetry, I use uv sometimes, it's cool but I am not familiar with pip"
|
408
|
-
|
409
|
-
async def analyze(context) -> Analysis:
|
410
|
-
psychologist = Branch(
|
411
|
-
system="you are a renowned dragon psychologist",
|
412
|
-
imodel=gpt4o,
|
413
|
-
)
|
414
|
-
return await psychologist.communicate(
|
415
|
-
instruction="analyze the dragon's psyche using chain of thoughts",
|
416
|
-
guidance="think step by step, reason with logic",
|
417
|
-
context=context,
|
418
|
-
response_format=Analysis,
|
419
|
-
)
|
420
|
-
|
333
|
+
df = branch.to_df()
|
334
|
+
print(df.tail())
|
421
335
|
```
|
336
|
+
- Action logs show each tool call, arguments, and outcomes.
|
337
|
+
- Verbose ReAct provides chain-of-thought analysis (helpful for debugging multi-step flows).
|
422
338
|
|
423
|
-
|
424
|
-
result1 = await analyze(context1)
|
425
|
-
|
426
|
-
print("\nThoughts:")
|
427
|
-
for i in result1.thought:
|
428
|
-
print(i.thought)
|
429
|
-
|
430
|
-
print("\nAnalysis:")
|
431
|
-
print(result1.analysis)
|
432
|
-
|
433
|
-
print("\nReasoning:")
|
434
|
-
for i in result1.reason:
|
435
|
-
print(i.reason)
|
436
|
-
```
|
437
|
-
|
438
|
-
```
|
439
|
-
|
440
|
-
Thoughts:
|
441
|
-
Dragons are attracted to shiny objects naturally.
|
442
|
-
This suggests a strong affinity for hoarding.
|
443
|
-
Reflects the dragon's inherent desire for possession.
|
444
|
-
|
445
|
-
Analysis:
|
446
|
-
The dragon demonstrates a compulsive hoarding behavior linked to attraction for shiny objects.
|
447
|
-
|
448
|
-
Reasoning:
|
449
|
-
Shiny objects trigger instinctual hoarding behavior.
|
450
|
-
Possession indicates a symbol of power and security.
|
451
|
-
Hoarding is reinforced by evolutionary survival mechanisms.
|
452
|
-
```
|
339
|
+
### Example: Multi-Model Orchestration
|
453
340
|
|
454
341
|
```python
|
455
|
-
|
342
|
+
from lionagi import Branch, iModel
|
456
343
|
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
344
|
+
gpt4o = iModel(provider="openai", model="gpt-4o")
|
345
|
+
sonnet = iModel(
|
346
|
+
provider="anthropic",
|
347
|
+
model="claude-3-5-sonnet-20241022",
|
348
|
+
max_tokens=1000, # max_tokens is required for anthropic models
|
349
|
+
)
|
463
350
|
|
464
|
-
|
465
|
-
|
466
|
-
|
351
|
+
branch = Branch(chat_model=gpt4o)
|
352
|
+
# Switch mid-flow
|
353
|
+
analysis = await branch.communicate("Analyze these stats", imodel=sonnet)
|
467
354
|
```
|
468
355
|
|
469
|
-
|
470
|
-
Thoughts:
|
471
|
-
Dragon enjoys both food and poetry regularly.
|
472
|
-
Dragon uses uv light with frequent interest.
|
473
|
-
Dragon is unfamiliar and not comfortable with pip.
|
474
|
-
|
475
|
-
Analysis:
|
476
|
-
The dragon is curious and exploratory, yet selectively cautious about unfamiliar methodologies.
|
477
|
-
|
478
|
-
Reasoning:
|
479
|
-
Preference for food and poetry suggests curiosity.
|
480
|
-
Frequent uv light use indicates exploratory nature.
|
481
|
-
Discomfort with pip usage shows selective caution.
|
482
|
-
```
|
356
|
+
Seamlessly route to different models in the same workflow.
|
483
357
|
|
358
|
+
## Community & Contributing
|
484
359
|
|
360
|
+
We welcome issues, ideas, and pull requests:
|
361
|
+
- Discord: Join to chat or get help
|
362
|
+
- Issues / PRs: GitHub
|
485
363
|
|
486
|
-
|
487
|
-
|
488
|
-
Below is an example of what you can build with LION. Note that these are sample implementations - LION provides the building blocks, you create the workflows that fit your needs.
|
489
|
-
|
490
|
-
```mermaid
|
491
|
-
sequenceDiagram
|
492
|
-
autonumber
|
493
|
-
participant Client
|
494
|
-
participant Orchestrator
|
495
|
-
participant ResearchAgent
|
496
|
-
participant AnalysisAgent
|
497
|
-
participant ValidationAgent
|
498
|
-
participant Tools
|
499
|
-
|
500
|
-
Client->>+Orchestrator: Submit Complex Task
|
501
|
-
Note over Orchestrator: Task Analysis & Planning
|
502
|
-
|
503
|
-
%% Research Phase
|
504
|
-
Orchestrator->>+ResearchAgent: Delegate Research
|
505
|
-
activate ResearchAgent
|
506
|
-
ResearchAgent->>Tools: Access Data Sources
|
507
|
-
Tools-->>ResearchAgent: Raw Data
|
508
|
-
ResearchAgent-->>-Orchestrator: Research Results
|
509
|
-
deactivate ResearchAgent
|
510
|
-
|
511
|
-
%% Analysis Phase
|
512
|
-
Orchestrator->>+AnalysisAgent: Process Data
|
513
|
-
activate AnalysisAgent
|
514
|
-
AnalysisAgent->>Tools: Apply Models
|
515
|
-
Tools-->>AnalysisAgent: Analysis Results
|
516
|
-
AnalysisAgent-->>-Orchestrator: Processed Insights
|
517
|
-
deactivate AnalysisAgent
|
518
|
-
|
519
|
-
%% Validation Phase
|
520
|
-
Orchestrator->>+ValidationAgent: Verify Results
|
521
|
-
activate ValidationAgent
|
522
|
-
ValidationAgent->>Tools: Apply Safety Checks
|
523
|
-
Tools-->>ValidationAgent: Validation Status
|
524
|
-
ValidationAgent-->>-Orchestrator: Verified Results
|
525
|
-
deactivate ValidationAgent
|
526
|
-
|
527
|
-
Orchestrator-->>-Client: Return Validated Output
|
364
|
+
### Citation
|
528
365
|
```
|
529
|
-
|
530
|
-
|
531
|
-
## 🤝 Contributing
|
532
|
-
|
533
|
-
Join our [Discord community](https://discord.gg/aqSJ2v46vu) to:
|
534
|
-
- Share ideas
|
535
|
-
- Report issues
|
536
|
-
- Contribute code
|
537
|
-
- Learn from others
|
538
|
-
|
539
|
-
## 📚 Citation
|
540
|
-
|
541
|
-
```bibtex
|
542
366
|
@software{Li_LionAGI_2023,
|
543
367
|
author = {Haiyang Li},
|
544
368
|
month = {12},
|
@@ -547,3 +371,6 @@ Join our [Discord community](https://discord.gg/aqSJ2v46vu) to:
|
|
547
371
|
url = {https://github.com/lion-agi/lionagi},
|
548
372
|
}
|
549
373
|
```
|
374
|
+
|
375
|
+
**🦁 LionAGI**
|
376
|
+
> Because real AI orchestration demands more than a single prompt. Try it out and discover the next evolution in structured, multi-model, safe AI.
|