lionagi 0.0.312__tar.gz → 0.0.314__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- lionagi-0.0.314/LICENSE +9 -0
- lionagi-0.0.314/PKG-INFO +174 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/README.md +14 -18
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/direct/__init__.py +2 -1
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/direct/predict.py +3 -2
- lionagi-0.0.314/lionagi/core/direct/react.py +167 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/direct/score.py +2 -1
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/direct/select.py +4 -1
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/messages/schema.py +1 -1
- lionagi-0.0.314/lionagi/core/prompt/action_template.py +26 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/prompt/field_validator.py +40 -1
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/prompt/prompt_template.py +36 -40
- lionagi-0.0.314/lionagi/core/prompt/scored_template.py +13 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/structure.py +3 -1
- lionagi-0.0.314/lionagi/core/tool/manual.py +1 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/tool/tool_manager.py +3 -1
- lionagi-0.0.314/lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +1 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/ollama.py +1 -1
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/sys_util.py +12 -0
- lionagi-0.0.314/lionagi/version.py +1 -0
- lionagi-0.0.314/lionagi.egg-info/PKG-INFO +174 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi.egg-info/SOURCES.txt +3 -1
- lionagi-0.0.312/LICENSE +0 -201
- lionagi-0.0.312/PKG-INFO +0 -371
- lionagi-0.0.312/lionagi/core/tool/manual.py +0 -0
- lionagi-0.0.312/lionagi/integrations/bridge/pydantic_/base_model.py +0 -7
- lionagi-0.0.312/lionagi/integrations/bridge/pydantic_/pydantic_bridge.py +0 -0
- lionagi-0.0.312/lionagi/version.py +0 -1
- lionagi-0.0.312/lionagi.egg-info/PKG-INFO +0 -371
- {lionagi-0.0.312 → lionagi-0.0.314}/README.rst +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/agent/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/agent/base_agent.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/branch/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/branch/base/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/branch/base_branch.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/branch/branch.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/branch/branch_flow_mixin.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/branch/executable_branch.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/branch/util.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/direct/utils.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/direct/vote.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/base/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/base/baseflow.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/monoflow/ReAct.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/monoflow/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/monoflow/chat.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/monoflow/chat_mixin.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/monoflow/followup.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/polyflow/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/flow/polyflow/chat.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/mail/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/mail/mail_manager.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/mail/schema.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/messages/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/prompt/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/action_node.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/base_mixin.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/base_node.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/condition.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/data_logger.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/data_node.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/schema/prompt_template.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/session/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/session/session.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/core/tool/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/langchain_/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/langchain_/documents.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/langchain_/langchain_bridge.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/llamaindex_/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/llamaindex_/index.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/llamaindex_/llama_index_bridge.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/llamaindex_/node_parser.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/llamaindex_/reader.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/llamaindex_/textnode.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/bridge/pydantic_/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/config/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/config/mlx_configs.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/config/oai_configs.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/config/ollama_configs.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/config/openrouter_configs.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/litellm.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/mistralai.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/mlx_service.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/oai.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/openrouter.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/services.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/integrations/provider/transformers.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/ln_api.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/ln_async.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/ln_convert.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/ln_dataframe.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/ln_func_call.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/ln_nested.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/libs/ln_parse.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_base_branch.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_branch.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_chat_flow.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_mail_manager.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_prompts.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_session.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_session_base_util.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_core/test_tool_manager.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_integrations/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/__init__.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/test_api.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/test_async.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/test_convert.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/test_func_call.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/test_nested.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/test_parse.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi/tests/test_libs/test_sys_util.py +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi.egg-info/dependency_links.txt +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi.egg-info/requires.txt +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/lionagi.egg-info/top_level.txt +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/pyproject.toml +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/setup.cfg +0 -0
- {lionagi-0.0.312 → lionagi-0.0.314}/setup.py +0 -0
lionagi-0.0.314/LICENSE
ADDED
@@ -0,0 +1,9 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2023 HaiyangLi quantocean.li@gmail.com
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
6
|
+
|
7
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
8
|
+
|
9
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
lionagi-0.0.314/PKG-INFO
ADDED
@@ -0,0 +1,174 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: lionagi
|
3
|
+
Version: 0.0.314
|
4
|
+
Summary: Towards automated general intelligence.
|
5
|
+
Author: HaiyangLi
|
6
|
+
Author-email: Haiyang Li <ocean@lionagi.ai>
|
7
|
+
License: MIT License
|
8
|
+
|
9
|
+
Copyright (c) 2023 HaiyangLi quantocean.li@gmail.com
|
10
|
+
|
11
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
12
|
+
|
13
|
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
16
|
+
Project-URL: PyPI, https://pypi.org/project/lionagi/
|
17
|
+
Project-URL: Repository, https://github.com/lion-agi/lionagi
|
18
|
+
Project-URL: Discord, https://discord.gg/ACnynvvPjt
|
19
|
+
Classifier: Programming Language :: Python :: 3
|
20
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
21
|
+
Classifier: Operating System :: OS Independent
|
22
|
+
Requires-Python: >=3.10
|
23
|
+
Description-Content-Type: text/markdown
|
24
|
+
License-File: LICENSE
|
25
|
+
Requires-Dist: aiohttp>=3.9.3
|
26
|
+
Requires-Dist: python-dotenv==1.0.0
|
27
|
+
Requires-Dist: tiktoken>=0.5.1
|
28
|
+
Requires-Dist: pydantic>=2.6.0
|
29
|
+
Requires-Dist: aiocache>=0.12.2
|
30
|
+
Requires-Dist: pandas>=2.1.0
|
31
|
+
|
32
|
+
![PyPI - Version](https://img.shields.io/pypi/v/lionagi?labelColor=233476aa&color=231fc935) ![PyPI - Downloads](https://img.shields.io/pypi/dm/lionagi?color=blue)
|
33
|
+
|
34
|
+
|
35
|
+
|
36
|
+
[PyPI](https://pypi.org/project/lionagi/) | [Documentation](https://ocean-lion.com/Welcome) | [Discord](https://discord.gg/xCkA5ErGmV)
|
37
|
+
|
38
|
+
```
|
39
|
+
Documentation for v0.0.300+ is in progress
|
40
|
+
|
41
|
+
To contribute, you need to make a fork first, and then make pull request from your fork.
|
42
|
+
```
|
43
|
+
|
44
|
+
# LionAGI
|
45
|
+
|
46
|
+
**Powerful Intelligent Workflow Automation**
|
47
|
+
|
48
|
+
It is an intelligent agentic workflow automation framework. It introduces advanced ML models into any existing workflows and data infrastructure.
|
49
|
+
|
50
|
+
|
51
|
+
### Currently, it can
|
52
|
+
|
53
|
+
- interact with almost any models including local*
|
54
|
+
- run interactions in parallel for most models (OpenRouter, OpenAI, Ollama, litellm...)
|
55
|
+
- produce structured pydantic outputs with flexible usage\*\*
|
56
|
+
- automate workflow via graph based agents
|
57
|
+
- use advanced prompting techniques, i.e. ReAct (reason-action)
|
58
|
+
- …
|
59
|
+
|
60
|
+
### It aims to:
|
61
|
+
|
62
|
+
- provide a centralized agent-managed framework for, "ML-powered tools coordination".
|
63
|
+
- The ways of coordination and possible path can occur among nodes is what we also refers to as `workflow` (the concept of workflow is still in design).
|
64
|
+
- such that, people can utilize intelligence to solve their problems in real life.
|
65
|
+
- achieve the goal by dramatically lowering the barrier of entries for creating use-case/domain specific tools.
|
66
|
+
|
67
|
+
|
68
|
+
All notebooks should run, as of 0.0.313,
|
69
|
+
|
70
|
+
\* if there are models on providers that have not been configured, you can do so by configuring your own AI providers, and endpoints.
|
71
|
+
|
72
|
+
\*\* Structured Input/Output, Graph based agent system, as well as more advanced prompting techniques are undergoing fast interations...
|
73
|
+
|
74
|
+
### Why Automating Workflows?
|
75
|
+
|
76
|
+
Intelligent AI models such as [Large Language Model (LLM)](https://en.wikipedia.org/wiki/Large_language_model), introduced new possibilities of human-computer interaction. LLMs is drawing a lot of attention worldwide due to its “one model fits all”, and incredible performance. One way of using LLM is to use as search engine, however, this usage is complicated by the fact that LLMs [hallucinate](https://arxiv.org/abs/2311.05232).
|
77
|
+
|
78
|
+
What goes inside of a LLM is more akin to a [black-box](https://pauldeepakraj-r.medium.com/demystifying-the-black-box-a-deep-dive-into-llm-interpretability-971524966fdf), lacking interpretability, meaning we don’t know how it reaches certain answer or conclusion, thus we cannot fully trust/rely the output from such a system.
|
79
|
+
|
80
|
+
<img width="500" alt="ReAct flow" src="https://github.com/lion-agi/lionagi/assets/122793010/fabec1eb-fa8e-4ce9-b75f-b7aca4809c0f">
|
81
|
+
|
82
|
+
|
83
|
+
Another approach of using LLM is to treat them as [intelligent agent](https://arxiv.org/html/2401.03428v1), that are equipped with various tools and data sources. A workflow conducted by such an intelligent agent have clear steps, and we can specify, observe, evaluate and optimize the logic for each decision that the `agent` made to perform actions. This approach, though we still cannot pinpoint how LLM output what it outputs, but the flow itself is **explainable**.
|
84
|
+
|
85
|
+
LionAGI `agent` can manage and direct other agents, can also use multiple different tools in parallel.
|
86
|
+
|
87
|
+
<img width="700" alt="parallel agents" src="https://github.com/lion-agi/lionagi/assets/122793010/ab263a6a-c7cc-40c3-8c03-ba1968df7309">
|
88
|
+
|
89
|
+
|
90
|
+
### Install LionAGI with pip:
|
91
|
+
|
92
|
+
```bash
|
93
|
+
pip install lionagi
|
94
|
+
```
|
95
|
+
Download the `.env_template` file, input your appropriate `API_KEY`, save the file, rename as `.env` and put in your project's root directory.
|
96
|
+
by default we use `OPENAI_API_KEY`.
|
97
|
+
|
98
|
+
|
99
|
+
### Quick Start
|
100
|
+
|
101
|
+
The following example shows how to use LionAGI's `Session` object to interact with `gpt-4-turbo` model:
|
102
|
+
|
103
|
+
```python
|
104
|
+
|
105
|
+
# define system messages, context and user instruction
|
106
|
+
system = "You are a helpful assistant designed to perform calculations."
|
107
|
+
instruction = {"Addition":"Add the two numbers together i.e. x+y"}
|
108
|
+
context = {"x": 10, "y": 5}
|
109
|
+
|
110
|
+
model="gpt-4-turbo-preview"
|
111
|
+
```
|
112
|
+
|
113
|
+
```python
|
114
|
+
# in interactive environment (.ipynb for example)
|
115
|
+
from lionagi import Session
|
116
|
+
|
117
|
+
calculator = Session(system)
|
118
|
+
result = await calculator.chat(instruction, context=context, model=model)
|
119
|
+
|
120
|
+
print(f"Calculation Result: {result}")
|
121
|
+
```
|
122
|
+
|
123
|
+
```python
|
124
|
+
# or otherwise, you can use
|
125
|
+
import asyncio
|
126
|
+
from dotenv import load_dotenv
|
127
|
+
|
128
|
+
load_dotenv()
|
129
|
+
|
130
|
+
from lionagi import Session
|
131
|
+
|
132
|
+
async def main():
|
133
|
+
calculator = Session(system)
|
134
|
+
result = await calculator.chat(instruction, context=context, model=model)
|
135
|
+
|
136
|
+
print(f"Calculation Result: {result}")
|
137
|
+
|
138
|
+
if __name__ == "__main__":
|
139
|
+
asyncio.run(main())
|
140
|
+
```
|
141
|
+
|
142
|
+
Visit our notebooks for examples.
|
143
|
+
|
144
|
+
LionAGI is designed to be `asynchronous` only, please check python official documentation on how `async` work: [here](https://docs.python.org/3/library/asyncio.html)
|
145
|
+
|
146
|
+
---
|
147
|
+
|
148
|
+
**Notice**:
|
149
|
+
* calling API with maximum throughput over large set of data with advanced models i.e. gpt-4 can get **EXPENSIVE IN JUST SECONDS**,
|
150
|
+
* please know what you are doing, and check the usage on OpenAI regularly
|
151
|
+
* default rate limits are set to be 1,000 requests, 100,000 tokens per miniute, please check the [OpenAI usage limit documentation](https://platform.openai.com/docs/guides/rate-limits?context=tier-free) you can modify token rate parameters to fit different use cases.
|
152
|
+
* if you would like to build from source, please download the [latest release](https://github.com/lion-agi/lionagi/releases),
|
153
|
+
### Community
|
154
|
+
|
155
|
+
We encourage contributions to LionAGI and invite you to enrich its features and capabilities. Engage with us and other community members [Join Our Discord](https://discord.gg/7RGWqpSxze)
|
156
|
+
|
157
|
+
### Citation
|
158
|
+
|
159
|
+
When referencing LionAGI in your projects or research, please cite:
|
160
|
+
|
161
|
+
```bibtex
|
162
|
+
@software{Li_LionAGI_2023,
|
163
|
+
author = {Haiyang Li},
|
164
|
+
month = {12},
|
165
|
+
year = {2023},
|
166
|
+
title = {LionAGI: Towards Automated General Intelligence},
|
167
|
+
url = {https://github.com/lion-agi/lionagi},
|
168
|
+
}
|
169
|
+
```
|
170
|
+
|
171
|
+
|
172
|
+
### Requirements
|
173
|
+
Python 3.10 or higher.
|
174
|
+
|
@@ -12,39 +12,35 @@ To contribute, you need to make a fork first, and then make pull request from yo
|
|
12
12
|
|
13
13
|
# LionAGI
|
14
14
|
|
15
|
-
|
16
|
-
**LionAGI is undergoing major transformation.**
|
15
|
+
**Powerful Intelligent Workflow Automation**
|
17
16
|
|
18
17
|
It is an intelligent agentic workflow automation framework. It introduces advanced ML models into any existing workflows and data infrastructure.
|
19
18
|
|
20
|
-
#### Currently, it can
|
21
19
|
|
22
|
-
|
23
|
-
|
24
|
-
-
|
20
|
+
### Currently, it can
|
21
|
+
|
22
|
+
- interact with almost any models including local*
|
23
|
+
- run interactions in parallel for most models (OpenRouter, OpenAI, Ollama, litellm...)
|
24
|
+
- produce structured pydantic outputs with flexible usage\*\*
|
25
25
|
- automate workflow via graph based agents
|
26
26
|
- use advanced prompting techniques, i.e. ReAct (reason-action)
|
27
27
|
- …
|
28
|
-
|
29
|
-
#### It aims to:
|
30
28
|
|
31
|
-
|
29
|
+
### It aims to:
|
30
|
+
|
31
|
+
- provide a centralized agent-managed framework for, "ML-powered tools coordination".
|
32
|
+
- The ways of coordination and possible path can occur among nodes is what we also refers to as `workflow` (the concept of workflow is still in design).
|
32
33
|
- such that, people can utilize intelligence to solve their problems in real life.
|
33
34
|
- achieve the goal by dramatically lowering the barrier of entries for creating use-case/domain specific tools.
|
34
35
|
|
35
|
-
#### Why?
|
36
36
|
|
37
|
-
|
38
|
-
|
39
|
-
---
|
40
|
-
|
41
|
-
|
42
|
-
**Powerful Intelligent Workflow Automation**
|
37
|
+
All notebooks should run, as of 0.0.313,
|
43
38
|
|
44
|
-
|
39
|
+
\* if there are models on providers that have not been configured, you can do so by configuring your own AI providers, and endpoints.
|
45
40
|
|
41
|
+
\*\* Structured Input/Output, Graph based agent system, as well as more advanced prompting techniques are undergoing fast interations...
|
46
42
|
|
47
|
-
|
43
|
+
### Why Automating Workflows?
|
48
44
|
|
49
45
|
Intelligent AI models such as [Large Language Model (LLM)](https://en.wikipedia.org/wiki/Large_language_model), introduced new possibilities of human-computer interaction. LLMs is drawing a lot of attention worldwide due to its “one model fits all”, and incredible performance. One way of using LLM is to use as search engine, however, this usage is complicated by the fact that LLMs [hallucinate](https://arxiv.org/abs/2311.05232).
|
50
46
|
|
@@ -6,9 +6,10 @@ using a language model. It includes fields for the input sentence, number of sen
|
|
6
6
|
confidence score, and reason for the prediction.
|
7
7
|
"""
|
8
8
|
|
9
|
-
from pydantic import Field
|
10
9
|
from lionagi.libs import func_call
|
11
|
-
from
|
10
|
+
from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
|
11
|
+
|
12
|
+
from ..prompt.scored_template import ScoredTemplate
|
12
13
|
from ..branch import Branch
|
13
14
|
|
14
15
|
|
@@ -0,0 +1,167 @@
|
|
1
|
+
from lionagi.libs import func_call, convert, AsyncUtil
|
2
|
+
|
3
|
+
from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
|
4
|
+
from ..prompt.action_template import ActionedTemplate
|
5
|
+
from ..branch import Branch
|
6
|
+
|
7
|
+
|
8
|
+
class ReactTemplate(ActionedTemplate):
|
9
|
+
template_name: str = "default_react"
|
10
|
+
sentence: str | list | dict = Field(
|
11
|
+
default_factory=str,
|
12
|
+
description="the given sentence(s) to reason and take actions on",
|
13
|
+
)
|
14
|
+
|
15
|
+
def __init__(
|
16
|
+
self,
|
17
|
+
sentence=None,
|
18
|
+
instruction=None,
|
19
|
+
confidence_score=False,
|
20
|
+
**kwargs,
|
21
|
+
):
|
22
|
+
super().__init__(**kwargs)
|
23
|
+
|
24
|
+
self.sentence = sentence
|
25
|
+
self.task = f"Think step by step. Perform reasoning and prepare actions with given tools only.Instruction: {instruction}. Absolutely DO NOT MAKE UP FUNCTIONS !!!"
|
26
|
+
|
27
|
+
if confidence_score:
|
28
|
+
self.output_fields.append("confidence_score")
|
29
|
+
|
30
|
+
|
31
|
+
async def _react(
|
32
|
+
sentence,
|
33
|
+
*,
|
34
|
+
instruction=None,
|
35
|
+
branch=None,
|
36
|
+
confidence_score=False,
|
37
|
+
retries=2,
|
38
|
+
delay=0.5,
|
39
|
+
backoff_factor=2,
|
40
|
+
default_value=None,
|
41
|
+
timeout=None,
|
42
|
+
branch_name=None,
|
43
|
+
system=None,
|
44
|
+
messages=None,
|
45
|
+
service=None,
|
46
|
+
sender=None,
|
47
|
+
llmconfig=None,
|
48
|
+
tools=None,
|
49
|
+
datalogger=None,
|
50
|
+
persist_path=None,
|
51
|
+
tool_manager=None,
|
52
|
+
return_branch=False,
|
53
|
+
**kwargs,
|
54
|
+
):
|
55
|
+
|
56
|
+
if "temperature" not in kwargs:
|
57
|
+
kwargs["temperature"] = 0.1
|
58
|
+
|
59
|
+
instruction = instruction or ""
|
60
|
+
|
61
|
+
branch = branch or Branch(
|
62
|
+
name=branch_name,
|
63
|
+
system=system,
|
64
|
+
messages=messages,
|
65
|
+
service=service,
|
66
|
+
sender=sender,
|
67
|
+
llmconfig=llmconfig,
|
68
|
+
tools=tools,
|
69
|
+
datalogger=datalogger,
|
70
|
+
persist_path=persist_path,
|
71
|
+
tool_manager=tool_manager,
|
72
|
+
)
|
73
|
+
|
74
|
+
_template = ReactTemplate(
|
75
|
+
sentence=sentence,
|
76
|
+
instruction=instruction,
|
77
|
+
confidence_score=confidence_score,
|
78
|
+
)
|
79
|
+
|
80
|
+
await func_call.rcall(
|
81
|
+
branch.chat,
|
82
|
+
prompt_template=_template,
|
83
|
+
retries=retries,
|
84
|
+
delay=delay,
|
85
|
+
backoff_factor=backoff_factor,
|
86
|
+
default=default_value,
|
87
|
+
timeout=timeout,
|
88
|
+
**kwargs,
|
89
|
+
)
|
90
|
+
|
91
|
+
if _template.action_needed:
|
92
|
+
actions = _template.actions
|
93
|
+
tasks = [branch.tool_manager.invoke(i.values()) for i in actions]
|
94
|
+
results = await AsyncUtil.execute_tasks(*tasks)
|
95
|
+
|
96
|
+
a = []
|
97
|
+
for idx, item in enumerate(actions):
|
98
|
+
res = {
|
99
|
+
"function": item["function"],
|
100
|
+
"arguments": item["arguments"],
|
101
|
+
"output": results[idx],
|
102
|
+
}
|
103
|
+
branch.add_message(response=res)
|
104
|
+
a.append(res)
|
105
|
+
|
106
|
+
_template.__setattr__("action_response", a)
|
107
|
+
|
108
|
+
return (_template, branch) if return_branch else _template
|
109
|
+
|
110
|
+
|
111
|
+
async def react(
|
112
|
+
sentence,
|
113
|
+
*,
|
114
|
+
instruction=None,
|
115
|
+
num_instances=1,
|
116
|
+
branch=None,
|
117
|
+
confidence_score=False,
|
118
|
+
retries=2,
|
119
|
+
delay=0.5,
|
120
|
+
backoff_factor=2,
|
121
|
+
default_value=None,
|
122
|
+
timeout=None,
|
123
|
+
branch_name=None,
|
124
|
+
system=None,
|
125
|
+
messages=None,
|
126
|
+
service=None,
|
127
|
+
sender=None,
|
128
|
+
llmconfig=None,
|
129
|
+
tools=None,
|
130
|
+
datalogger=None,
|
131
|
+
persist_path=None,
|
132
|
+
tool_manager=None,
|
133
|
+
return_branch=False,
|
134
|
+
**kwargs,
|
135
|
+
):
|
136
|
+
|
137
|
+
async def _inner(i=0):
|
138
|
+
return await _react(
|
139
|
+
sentence=sentence,
|
140
|
+
instruction=instruction,
|
141
|
+
num_instances=num_instances,
|
142
|
+
branch=branch,
|
143
|
+
confidence_score=confidence_score,
|
144
|
+
retries=retries,
|
145
|
+
delay=delay,
|
146
|
+
backoff_factor=backoff_factor,
|
147
|
+
default_value=default_value,
|
148
|
+
timeout=timeout,
|
149
|
+
branch_name=branch_name,
|
150
|
+
system=system,
|
151
|
+
messages=messages,
|
152
|
+
service=service,
|
153
|
+
sender=sender,
|
154
|
+
llmconfig=llmconfig,
|
155
|
+
tools=tools,
|
156
|
+
datalogger=datalogger,
|
157
|
+
persist_path=persist_path,
|
158
|
+
tool_manager=tool_manager,
|
159
|
+
return_branch=return_branch,
|
160
|
+
**kwargs,
|
161
|
+
)
|
162
|
+
|
163
|
+
if num_instances == 1:
|
164
|
+
return await _inner()
|
165
|
+
|
166
|
+
elif num_instances > 1:
|
167
|
+
return await func_call.alcall(range(num_instances), _inner)
|
@@ -12,7 +12,7 @@ ScoreTemplate class and a language model.
|
|
12
12
|
from pydantic import Field
|
13
13
|
import numpy as np
|
14
14
|
from lionagi.libs import func_call, convert
|
15
|
-
from ..prompt.
|
15
|
+
from ..prompt.scored_template import ScoredTemplate
|
16
16
|
from ..branch import Branch
|
17
17
|
|
18
18
|
|
@@ -183,6 +183,7 @@ async def _score(
|
|
183
183
|
|
184
184
|
async def score(
|
185
185
|
sentence,
|
186
|
+
*,
|
186
187
|
num_instances=1,
|
187
188
|
instruction=None,
|
188
189
|
score_range=(1, 10),
|
@@ -13,7 +13,7 @@ from enum import Enum
|
|
13
13
|
from pydantic import Field
|
14
14
|
|
15
15
|
from lionagi.libs import func_call, StringMatch
|
16
|
-
from ..prompt.
|
16
|
+
from ..prompt.scored_template import ScoredTemplate
|
17
17
|
from ..branch import Branch
|
18
18
|
|
19
19
|
|
@@ -39,6 +39,9 @@ class SelectTemplate(ScoredTemplate):
|
|
39
39
|
answer: Enum | str = Field(
|
40
40
|
default_factory=str, description="selection from given choices"
|
41
41
|
)
|
42
|
+
choices: list = Field(
|
43
|
+
default_factory=list, description="the given choices"
|
44
|
+
)
|
42
45
|
|
43
46
|
signature: str = "sentence -> answer"
|
44
47
|
|
@@ -0,0 +1,26 @@
|
|
1
|
+
from typing import Any
|
2
|
+
from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
|
3
|
+
|
4
|
+
from .scored_template import ScoredTemplate
|
5
|
+
|
6
|
+
|
7
|
+
class ActionRequest: ...
|
8
|
+
|
9
|
+
|
10
|
+
class ActionedTemplate(ScoredTemplate):
|
11
|
+
|
12
|
+
action_needed: bool | None = Field(
|
13
|
+
False, description="true if actions are needed else false"
|
14
|
+
)
|
15
|
+
|
16
|
+
actions: list[dict | ActionRequest | Any] | None = Field(
|
17
|
+
default_factory=list,
|
18
|
+
description="""provide The list of action(s) to take, each action in {"function": function_name, "arguments": {param1:..., param2:..., ...}}. Leave blank if no further actions are needed, you must use provided parameters for each action, DO NOT MAKE UP KWARG NAME!!!""",
|
19
|
+
)
|
20
|
+
|
21
|
+
answer: str | dict | Any | None = Field(
|
22
|
+
default_factory=str,
|
23
|
+
description="output answer to the questions asked if further actions are not needed, leave blank if an accurate answer cannot be provided from context during this step",
|
24
|
+
)
|
25
|
+
|
26
|
+
signature: str = "sentence -> reason, action_needed, actions, answer"
|
@@ -6,7 +6,45 @@ including numeric, boolean, string, and enum. It also provides a dictionary `val
|
|
6
6
|
maps data types to their corresponding validation functions.
|
7
7
|
"""
|
8
8
|
|
9
|
-
from lionagi.libs import convert, StringMatch
|
9
|
+
from lionagi.libs import convert, StringMatch, ParseUtil
|
10
|
+
|
11
|
+
|
12
|
+
def _has_action_keys(dict_):
|
13
|
+
return list(dict_.keys()) >= ["function", "arguments"]
|
14
|
+
|
15
|
+
|
16
|
+
def check_action_field(x, fix_=True, **kwargs):
|
17
|
+
if (
|
18
|
+
isinstance(x, list)
|
19
|
+
and convert.is_same_dtype(x, dict)
|
20
|
+
and all(_has_action_keys(y) for y in x)
|
21
|
+
):
|
22
|
+
return x
|
23
|
+
try:
|
24
|
+
x = _fix_action_field(x, fix_)
|
25
|
+
return x
|
26
|
+
except Exception as e:
|
27
|
+
raise ValueError("Invalid action field type.") from e
|
28
|
+
|
29
|
+
|
30
|
+
def _fix_action_field(x, discard_=True):
|
31
|
+
corrected = []
|
32
|
+
if isinstance(x, str):
|
33
|
+
x = ParseUtil.fuzzy_parse_json(x)
|
34
|
+
|
35
|
+
try:
|
36
|
+
x = convert.to_list(x)
|
37
|
+
|
38
|
+
for i in x:
|
39
|
+
i = convert.to_dict(i)
|
40
|
+
if _has_action_keys(i):
|
41
|
+
corrected.append(i)
|
42
|
+
elif not discard_:
|
43
|
+
raise ValueError(f"Invalid action field: {i}")
|
44
|
+
except Exception as e:
|
45
|
+
raise ValueError(f"Invalid action field: {e}") from e
|
46
|
+
|
47
|
+
return corrected
|
10
48
|
|
11
49
|
|
12
50
|
def check_number_field(x, fix_=True, **kwargs):
|
@@ -236,4 +274,5 @@ validation_funcs = {
|
|
236
274
|
"bool": check_bool_field,
|
237
275
|
"str": check_str_field,
|
238
276
|
"enum": check_enum_field,
|
277
|
+
"action": check_action_field,
|
239
278
|
}
|
@@ -207,6 +207,10 @@ class PromptTemplate(BaseComponent):
|
|
207
207
|
setattr(self, k, v_)
|
208
208
|
return True
|
209
209
|
|
210
|
+
if "lionagi.core.prompt.action_template.actionrequest" in str_:
|
211
|
+
self.__setattr__(k, validation_funcs["action"](v))
|
212
|
+
return True
|
213
|
+
|
210
214
|
elif "bool" in str_:
|
211
215
|
self.__setattr__(k, validation_funcs["bool"](v, fix_=fix_, **kwargs))
|
212
216
|
return True
|
@@ -227,48 +231,50 @@ class PromptTemplate(BaseComponent):
|
|
227
231
|
if k not in kwargs:
|
228
232
|
kwargs = {k: {}}
|
229
233
|
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
+
if self._field_has_choices(k):
|
235
|
+
self.choices[k] = self.model_fields[k].json_schema_extra["choices"]
|
236
|
+
if self._validate_field(
|
237
|
+
k, v, choices=self.choices[k], fix_=fix_, **kwargs[k]
|
234
238
|
):
|
235
|
-
self.choices[k] = self.model_fields[k].json_schema_extra["choices"]
|
236
|
-
if self._validate_field(
|
237
|
-
k, v, choices=self.choices[k], fix_=fix_, **kwargs[k]
|
238
|
-
):
|
239
|
-
continue
|
240
|
-
else:
|
241
|
-
raise ValueError(f"{k} has no choices")
|
242
|
-
|
243
|
-
except Exception as e:
|
244
|
-
if self._validate_field(k, v, fix_=fix_, **kwargs[k]):
|
245
239
|
continue
|
246
240
|
else:
|
247
|
-
raise ValueError(f"
|
241
|
+
raise ValueError(f"{k} has no choices")
|
242
|
+
|
243
|
+
elif self._validate_field(k, v, fix_=fix_, **kwargs[k]):
|
244
|
+
continue
|
245
|
+
else:
|
246
|
+
raise ValueError(f"failed to validate field {k}")
|
247
|
+
|
248
|
+
def _field_has_choices(self, k):
|
249
|
+
try:
|
250
|
+
a = (
|
251
|
+
self.model_fields[k].json_schema_extra["choices"] is not None
|
252
|
+
and "choices" in self.model_fields[k].json_schema_extra
|
253
|
+
)
|
254
|
+
return a if isinstance(a, bool) else False
|
255
|
+
except Exception:
|
256
|
+
return False
|
248
257
|
|
249
258
|
def _process_response(self, out_, fix_=True):
|
250
259
|
kwargs = self.out_validation_kwargs.copy()
|
251
260
|
for k, v in out_.items():
|
252
261
|
if k not in kwargs:
|
253
262
|
kwargs = {k: {}}
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
263
|
+
|
264
|
+
if self._field_has_choices(k):
|
265
|
+
self.choices[k] = self.model_fields[k].json_schema_extra["choices"]
|
266
|
+
if self._validate_field(
|
267
|
+
k, v, choices=self.choices[k], fix_=fix_, **kwargs[k]
|
258
268
|
):
|
259
|
-
self.choices[k] = self.model_fields[k].json_schema_extra["choices"]
|
260
|
-
if self._validate_field(
|
261
|
-
k, v, choices=self.choices[k], fix_=fix_, **kwargs[k]
|
262
|
-
):
|
263
|
-
continue
|
264
|
-
else:
|
265
|
-
raise ValueError(f"{k} has no choices")
|
266
|
-
|
267
|
-
except Exception as e:
|
268
|
-
if self._validate_field(k, v, fix_=fix_, **kwargs[k]):
|
269
269
|
continue
|
270
270
|
else:
|
271
|
-
raise ValueError(f"
|
271
|
+
raise ValueError(f"{k} has no choices")
|
272
|
+
|
273
|
+
elif self._validate_field(k, v, fix_=fix_, **kwargs[k]):
|
274
|
+
continue
|
275
|
+
|
276
|
+
else:
|
277
|
+
raise ValueError(f"failed to validate field {k} with value {v}")
|
272
278
|
|
273
279
|
@property
|
274
280
|
def in_(self):
|
@@ -288,16 +294,6 @@ class PromptTemplate(BaseComponent):
|
|
288
294
|
return self
|
289
295
|
|
290
296
|
|
291
|
-
class ScoredTemplate(PromptTemplate):
|
292
|
-
confidence_score: float | None = Field(
|
293
|
-
-1,
|
294
|
-
description="a numeric score between 0 to 1 formatted in num:0.2f",
|
295
|
-
)
|
296
|
-
reason: str | None = Field(
|
297
|
-
default_factory=str, description="brief reason for the given output"
|
298
|
-
)
|
299
|
-
|
300
|
-
|
301
297
|
# class Weather(PromptTemplate):
|
302
298
|
# sunny: bool = Field(True, description="true if the weather is sunny outside else false")
|
303
299
|
# rainy: bool = Field(False, description="true if it is raining outside else false")
|
@@ -0,0 +1,13 @@
|
|
1
|
+
from lionagi.integrations.bridge.pydantic_.pydantic_bridge import Field
|
2
|
+
|
3
|
+
from .prompt_template import PromptTemplate
|
4
|
+
|
5
|
+
|
6
|
+
class ScoredTemplate(PromptTemplate):
|
7
|
+
confidence_score: float | None = Field(
|
8
|
+
-1,
|
9
|
+
description="a numeric score between 0 to 1 formatted in num:0.2f",
|
10
|
+
)
|
11
|
+
reason: str | None = Field(
|
12
|
+
default_factory=str, description="brief reason for the given output"
|
13
|
+
)
|