agentica 0.0.6__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agentica-0.0.6 → agentica-0.1.0}/PKG-INFO +37 -18
- {agentica-0.0.6 → agentica-0.1.0}/README.md +36 -17
- {agentica-0.0.6 → agentica-0.1.0}/agentica/__init__.py +1 -1
- {agentica-0.0.6 → agentica-0.1.0}/agentica/assistant.py +21 -23
- agentica-0.0.6/agentica/documents.py → agentica-0.1.0/agentica/knowledge_base.py +193 -135
- {agentica-0.0.6 → agentica-0.1.0}/agentica/llm/openai_llm.py +2 -4
- agentica-0.1.0/agentica/tools/search_arxiv.py +124 -0
- agentica-0.1.0/agentica/utils/file_parser.py +126 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/utils/misc.py +0 -42
- {agentica-0.0.6 → agentica-0.1.0}/agentica/vectordb/lancedb.py +4 -2
- {agentica-0.0.6 → agentica-0.1.0}/agentica/vectordb/pgvector.py +3 -2
- agentica-0.1.0/agentica/version.py +1 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/workflow.py +9 -10
- {agentica-0.0.6 → agentica-0.1.0}/agentica.egg-info/PKG-INFO +37 -18
- {agentica-0.0.6 → agentica-0.1.0}/agentica.egg-info/SOURCES.txt +3 -1
- agentica-0.1.0/tests/test_llm.py +31 -0
- agentica-0.0.6/agentica/version.py +0 -1
- agentica-0.0.6/tests/test_llm.py +0 -47
- {agentica-0.0.6 → agentica-0.1.0}/LICENSE +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/config.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/document.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/__init__.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/azure_emb.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/base.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/hash_emb.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/ollama_emb.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/openai_emb.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/text2vec_emb.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/together_emb.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/emb/word2vec_emb.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/file/__init__.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/file/base.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/file/csv.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/file/txt.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/llm/__init__.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/llm/anthropic_llm.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/llm/azure_llm.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/llm/base.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/llm/ollama_llm.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/llm/together_llm.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/memory.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/message.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/python_assistant.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/references.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/run_record.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/sqlite_storage.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/task.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tool.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/__init__.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/airflow.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/analyze_image.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/apify.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/create_image.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/duckduckgo.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/file.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/jina.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/ocr.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/run_nb_code.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/run_python_code.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/search_exa.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/search_serper.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/shell.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/sql.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/url_crawler.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/wikipedia.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/tools/yfinance.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/utils/__init__.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/utils/log.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/utils/shell.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/utils/timer.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/vectordb/__init__.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica/vectordb/base.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica.egg-info/dependency_links.txt +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica.egg-info/entry_points.txt +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica.egg-info/not-zip-safe +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica.egg-info/requires.txt +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/agentica.egg-info/top_level.txt +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/setup.cfg +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/setup.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/__init__.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/test_function_create_image.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/test_function_get_url.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/test_function_save_file.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/test_run_nb_code.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/test_sqlite_storage.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/test_write_code.py +0 -0
- {agentica-0.0.6 → agentica-0.1.0}/tests/test_write_plan.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: agentica
|
3
|
-
Version: 0.0
|
3
|
+
Version: 0.1.0
|
4
4
|
Summary: LLM agents
|
5
5
|
Home-page: https://github.com/shibing624/agentica
|
6
6
|
Author: XuMing
|
@@ -63,9 +63,9 @@ License-File: LICENSE
|
|
63
63
|
## Features
|
64
64
|
`agentica`是一个Agent构建工具,功能:
|
65
65
|
|
66
|
-
- 简单代码快速编排Agent,支持RAG、
|
66
|
+
- 简单代码快速编排Agent,支持 Reflection(反思)、Plan and Solve(计划并执行)、RAG、Agent、Multi-Agent、Multi-Role、Workflow等功能
|
67
67
|
- Agent支持prompt自定义,支持多种工具调用(tool_calls)
|
68
|
-
- 支持OpenAI API
|
68
|
+
- 支持OpenAI/Azure/Claude/Ollama/Together API调用
|
69
69
|
|
70
70
|
## Installation
|
71
71
|
|
@@ -104,27 +104,46 @@ m = Assistant(
|
|
104
104
|
read_chat_history=True,
|
105
105
|
debug_mode=True,
|
106
106
|
)
|
107
|
-
|
108
|
-
m.run("
|
109
|
-
|
107
|
+
|
108
|
+
r = m.run("一句话介绍林黛玉")
|
109
|
+
print(r, "".join(r))
|
110
|
+
r = m.run("北京最近的新闻", stream=True, print_output=True)
|
111
|
+
print(r, "".join(r))
|
112
|
+
r = m.run("总结前面的问答", stream=False, print_output=False)
|
113
|
+
print(r)
|
110
114
|
```
|
111
115
|
|
112
116
|
|
113
117
|
## Examples
|
114
118
|
|
115
|
-
| 示例
|
116
|
-
|
117
|
-
| [examples/naive_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/naive_rag_demo.py)
|
118
|
-
| [examples/advanced_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/advanced_rag_demo.py)
|
119
|
-
| [examples/python_assistant_demo.py](https://github.com/shibing624/agentica/blob/main/examples/python_assistant_demo.py)
|
120
|
-
| [examples/research_demo.py](https://github.com/shibing624/agentica/blob/main/examples/research_demo.py)
|
121
|
-
| [examples/
|
122
|
-
| [examples/
|
123
|
-
| [examples/
|
124
|
-
| [examples/
|
125
|
-
| [examples/
|
126
|
-
| [examples/
|
119
|
+
| 示例 | 描述 |
|
120
|
+
|---------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|
|
121
|
+
| [examples/naive_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/naive_rag_demo.py) | 实现了基础版RAG,基于Txt文档回答问题 |
|
122
|
+
| [examples/advanced_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/advanced_rag_demo.py) | 实现了高级版RAG,基于PDF文档回答问题,新增功能:pdf文件解析、query改写,字面+语义多路召回,召回排序(rerank) |
|
123
|
+
| [examples/python_assistant_demo.py](https://github.com/shibing624/agentica/blob/main/examples/python_assistant_demo.py) | 实现了Code Interpreter功能,自动生成python代码,并执行 |
|
124
|
+
| [examples/research_demo.py](https://github.com/shibing624/agentica/blob/main/examples/research_demo.py) | 实现了Research功能,自动调用搜索工具,汇总信息后撰写科技报告 |
|
125
|
+
| [examples/team_news_article_demo.py](https://github.com/shibing624/agentica/blob/main/examples/team_news_article_demo.py) | 实现了写新闻稿的team协作,multi-role实现,委托不用角色完成各自任务:研究员检索分析文章,撰写员根据排版写文章,汇总多角色成果输出结果 |
|
126
|
+
| [examples/workflow_news_article_demo.py](https://github.com/shibing624/agentica/blob/main/examples/workflow_news_article_demo.py) | 实现了写新闻稿的工作流,multi-agent的实现,定义了多个Assistant和Task,多次调用搜索工具,并生成高级排版的新闻文章 |
|
127
|
+
| [examples/workflow_investment_demo.py](https://github.com/shibing624/agentica/blob/main/examples/workflow_investment_demo.py) | 实现了投资研究的工作流:股票信息收集 - 股票分析 - 撰写分析报告 - 复查报告等多个Task |
|
128
|
+
| [examples/crawl_webpage_demo.py](https://github.com/shibing624/agentica/blob/main/examples/crawl_webpage_demo.py) | 实现了网页分析工作流:从Url爬取融资快讯 - 分析网页内容和格式 - 提取核心信息 - 汇总保存为md文件 |
|
129
|
+
| [examples/find_paper_from_arxiv_demo.py](https://github.com/shibing624/agentica/blob/main/examples/find_paper_from_arxiv_demo.py) | 实现了论文推荐工作流:自动从arxiv搜索多组论文 - 相似论文去重 - 提取核心论文信息 - 保存为csv文件 |
|
130
|
+
| [examples/remove_image_background_demo.py](https://github.com/shibing624/agentica/blob/main/examples/remove_image_background_demo.py) | 实现了自动去除图片背景功能,包括自动通过pip安装库,调用库实现去除图片背景 |
|
131
|
+
| [examples/text_classification_demo.py](https://github.com/shibing624/agentica/blob/main/examples/text_classification_demo.py) | 实现了自动训练分类模型的工作流:读取训练集文件并理解格式 - 谷歌搜索pytextclassifier库 - 爬取github页面了解pytextclassifier的调用方法 - 写代码并执行fasttext模型训练 - check训练好的模型预测结果 |
|
132
|
+
| [examples/llm_os_demo.py](https://github.com/shibing624/agentica/blob/main/examples/llm_os_demo.py) | 实现了LLM OS的初步设计,基于LLM设计操作系统,可以通过LLM调用RAG、代码执行器、Shell等工具,并协同代码解释器、研究助手、投资助手等来解决问题。 |
|
133
|
+
|
134
|
+
|
135
|
+
### LLM OS
|
136
|
+
The LLM OS design:
|
137
|
+
<img alt="LLM OS" src="https://github.com/shibing624/agentica/blob/main/docs/llmos.png" width="600" />
|
138
|
+
|
139
|
+
#### Run the LLM OS App
|
140
|
+
|
141
|
+
```shell
|
142
|
+
cd examples
|
143
|
+
streamlit run llm_os_demo.py
|
144
|
+
```
|
127
145
|
|
146
|
+

|
128
147
|
|
129
148
|
## Contact
|
130
149
|
|
@@ -41,9 +41,9 @@
|
|
41
41
|
## Features
|
42
42
|
`agentica`是一个Agent构建工具,功能:
|
43
43
|
|
44
|
-
- 简单代码快速编排Agent,支持RAG、
|
44
|
+
- 简单代码快速编排Agent,支持 Reflection(反思)、Plan and Solve(计划并执行)、RAG、Agent、Multi-Agent、Multi-Role、Workflow等功能
|
45
45
|
- Agent支持prompt自定义,支持多种工具调用(tool_calls)
|
46
|
-
- 支持OpenAI API
|
46
|
+
- 支持OpenAI/Azure/Claude/Ollama/Together API调用
|
47
47
|
|
48
48
|
## Installation
|
49
49
|
|
@@ -82,27 +82,46 @@ m = Assistant(
|
|
82
82
|
read_chat_history=True,
|
83
83
|
debug_mode=True,
|
84
84
|
)
|
85
|
-
|
86
|
-
m.run("
|
87
|
-
|
85
|
+
|
86
|
+
r = m.run("一句话介绍林黛玉")
|
87
|
+
print(r, "".join(r))
|
88
|
+
r = m.run("北京最近的新闻", stream=True, print_output=True)
|
89
|
+
print(r, "".join(r))
|
90
|
+
r = m.run("总结前面的问答", stream=False, print_output=False)
|
91
|
+
print(r)
|
88
92
|
```
|
89
93
|
|
90
94
|
|
91
95
|
## Examples
|
92
96
|
|
93
|
-
| 示例
|
94
|
-
|
95
|
-
| [examples/naive_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/naive_rag_demo.py)
|
96
|
-
| [examples/advanced_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/advanced_rag_demo.py)
|
97
|
-
| [examples/python_assistant_demo.py](https://github.com/shibing624/agentica/blob/main/examples/python_assistant_demo.py)
|
98
|
-
| [examples/research_demo.py](https://github.com/shibing624/agentica/blob/main/examples/research_demo.py)
|
99
|
-
| [examples/
|
100
|
-
| [examples/
|
101
|
-
| [examples/
|
102
|
-
| [examples/
|
103
|
-
| [examples/
|
104
|
-
| [examples/
|
97
|
+
| 示例 | 描述 |
|
98
|
+
|---------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|
|
99
|
+
| [examples/naive_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/naive_rag_demo.py) | 实现了基础版RAG,基于Txt文档回答问题 |
|
100
|
+
| [examples/advanced_rag_demo.py](https://github.com/shibing624/agentica/blob/main/examples/advanced_rag_demo.py) | 实现了高级版RAG,基于PDF文档回答问题,新增功能:pdf文件解析、query改写,字面+语义多路召回,召回排序(rerank) |
|
101
|
+
| [examples/python_assistant_demo.py](https://github.com/shibing624/agentica/blob/main/examples/python_assistant_demo.py) | 实现了Code Interpreter功能,自动生成python代码,并执行 |
|
102
|
+
| [examples/research_demo.py](https://github.com/shibing624/agentica/blob/main/examples/research_demo.py) | 实现了Research功能,自动调用搜索工具,汇总信息后撰写科技报告 |
|
103
|
+
| [examples/team_news_article_demo.py](https://github.com/shibing624/agentica/blob/main/examples/team_news_article_demo.py) | 实现了写新闻稿的team协作,multi-role实现,委托不用角色完成各自任务:研究员检索分析文章,撰写员根据排版写文章,汇总多角色成果输出结果 |
|
104
|
+
| [examples/workflow_news_article_demo.py](https://github.com/shibing624/agentica/blob/main/examples/workflow_news_article_demo.py) | 实现了写新闻稿的工作流,multi-agent的实现,定义了多个Assistant和Task,多次调用搜索工具,并生成高级排版的新闻文章 |
|
105
|
+
| [examples/workflow_investment_demo.py](https://github.com/shibing624/agentica/blob/main/examples/workflow_investment_demo.py) | 实现了投资研究的工作流:股票信息收集 - 股票分析 - 撰写分析报告 - 复查报告等多个Task |
|
106
|
+
| [examples/crawl_webpage_demo.py](https://github.com/shibing624/agentica/blob/main/examples/crawl_webpage_demo.py) | 实现了网页分析工作流:从Url爬取融资快讯 - 分析网页内容和格式 - 提取核心信息 - 汇总保存为md文件 |
|
107
|
+
| [examples/find_paper_from_arxiv_demo.py](https://github.com/shibing624/agentica/blob/main/examples/find_paper_from_arxiv_demo.py) | 实现了论文推荐工作流:自动从arxiv搜索多组论文 - 相似论文去重 - 提取核心论文信息 - 保存为csv文件 |
|
108
|
+
| [examples/remove_image_background_demo.py](https://github.com/shibing624/agentica/blob/main/examples/remove_image_background_demo.py) | 实现了自动去除图片背景功能,包括自动通过pip安装库,调用库实现去除图片背景 |
|
109
|
+
| [examples/text_classification_demo.py](https://github.com/shibing624/agentica/blob/main/examples/text_classification_demo.py) | 实现了自动训练分类模型的工作流:读取训练集文件并理解格式 - 谷歌搜索pytextclassifier库 - 爬取github页面了解pytextclassifier的调用方法 - 写代码并执行fasttext模型训练 - check训练好的模型预测结果 |
|
110
|
+
| [examples/llm_os_demo.py](https://github.com/shibing624/agentica/blob/main/examples/llm_os_demo.py) | 实现了LLM OS的初步设计,基于LLM设计操作系统,可以通过LLM调用RAG、代码执行器、Shell等工具,并协同代码解释器、研究助手、投资助手等来解决问题。 |
|
111
|
+
|
112
|
+
|
113
|
+
### LLM OS
|
114
|
+
The LLM OS design:
|
115
|
+
<img alt="LLM OS" src="https://github.com/shibing624/agentica/blob/main/docs/llmos.png" width="600" />
|
116
|
+
|
117
|
+
#### Run the LLM OS App
|
118
|
+
|
119
|
+
```shell
|
120
|
+
cd examples
|
121
|
+
streamlit run llm_os_demo.py
|
122
|
+
```
|
105
123
|
|
124
|
+

|
106
125
|
|
107
126
|
## Contact
|
108
127
|
|
@@ -8,7 +8,7 @@ from agentica.config import DOTENV_PATH, SMART_LLM, FAST_LLM # noqa, isort: ski
|
|
8
8
|
from agentica.assistant import Assistant
|
9
9
|
from agentica.python_assistant import PythonAssistant
|
10
10
|
from agentica.document import Document
|
11
|
-
from agentica.
|
11
|
+
from agentica.knowledge_base import KnowledgeBase
|
12
12
|
from agentica.llm.openai_llm import OpenAILLM
|
13
13
|
from agentica.llm.azure_llm import AzureOpenAILLM
|
14
14
|
from agentica.task import Task
|
@@ -29,7 +29,7 @@ from uuid import uuid4
|
|
29
29
|
from pydantic import BaseModel, ConfigDict, field_validator, ValidationError
|
30
30
|
|
31
31
|
from agentica.document import Document
|
32
|
-
from agentica.
|
32
|
+
from agentica.knowledge_base import KnowledgeBase
|
33
33
|
from agentica.llm.base import LLM
|
34
34
|
from agentica.memory import AssistantMemory, Memory
|
35
35
|
from agentica.message import Message
|
@@ -81,7 +81,7 @@ class Assistant(BaseModel):
|
|
81
81
|
update_memory_after_run: bool = True
|
82
82
|
|
83
83
|
# -*- Assistant Knowledge Base
|
84
|
-
knowledge_base: Optional[
|
84
|
+
knowledge_base: Optional[KnowledgeBase] = None
|
85
85
|
# Enable RAG by adding references from the knowledge base to the prompt.
|
86
86
|
add_references_to_prompt: bool = False
|
87
87
|
|
@@ -790,6 +790,7 @@ class Assistant(BaseModel):
|
|
790
790
|
*,
|
791
791
|
stream: bool = True,
|
792
792
|
messages: Optional[List[Union[Dict, Message]]] = None,
|
793
|
+
print_output: bool = True,
|
793
794
|
**kwargs: Any,
|
794
795
|
) -> Iterator[str]:
|
795
796
|
logger.debug(f"*********** Assistant Run Start: {self.run_id} ***********")
|
@@ -865,6 +866,8 @@ class Assistant(BaseModel):
|
|
865
866
|
self.llm = cast(LLM, self.llm)
|
866
867
|
if stream and self.streamable:
|
867
868
|
for response_chunk in self.llm.response_stream(messages=llm_messages):
|
869
|
+
if print_output:
|
870
|
+
print_llm_stream(response_chunk)
|
868
871
|
llm_response += response_chunk
|
869
872
|
yield response_chunk
|
870
873
|
else:
|
@@ -922,6 +925,8 @@ class Assistant(BaseModel):
|
|
922
925
|
|
923
926
|
# -*- Yield final response if not streaming
|
924
927
|
if not stream:
|
928
|
+
if print_output:
|
929
|
+
print(llm_response)
|
925
930
|
yield llm_response
|
926
931
|
|
927
932
|
def run(
|
@@ -929,8 +934,8 @@ class Assistant(BaseModel):
|
|
929
934
|
message: Optional[Union[List, Dict, str]] = None,
|
930
935
|
*,
|
931
936
|
stream: bool = True,
|
932
|
-
print_output: bool = True,
|
933
937
|
messages: Optional[List[Union[Dict, Message]]] = None,
|
938
|
+
print_output: bool = True,
|
934
939
|
**kwargs: Any,
|
935
940
|
) -> Union[Iterator[str], str, BaseModel]:
|
936
941
|
# Convert response to structured output if output_model is set
|
@@ -959,16 +964,11 @@ class Assistant(BaseModel):
|
|
959
964
|
return self.output or json_resp
|
960
965
|
else:
|
961
966
|
if stream and self.streamable:
|
962
|
-
resp = self._run(message=message, messages=messages, stream=True, **kwargs)
|
963
|
-
if print_output:
|
964
|
-
for chunk in resp:
|
965
|
-
print_llm_stream(chunk)
|
967
|
+
resp = self._run(message=message, messages=messages, stream=True, print_output=print_output, **kwargs)
|
966
968
|
return resp
|
967
969
|
else:
|
968
|
-
resp = self._run(message=message, messages=messages, stream=False, **kwargs)
|
970
|
+
resp = self._run(message=message, messages=messages, stream=False, print_output=print_output, **kwargs)
|
969
971
|
resp = next(resp)
|
970
|
-
if print_output:
|
971
|
-
print(resp)
|
972
972
|
return resp
|
973
973
|
|
974
974
|
async def _arun(
|
@@ -977,6 +977,7 @@ class Assistant(BaseModel):
|
|
977
977
|
*,
|
978
978
|
stream: bool = True,
|
979
979
|
messages: Optional[List[Union[Dict, Message]]] = None,
|
980
|
+
print_output: bool = True,
|
980
981
|
**kwargs: Any,
|
981
982
|
) -> AsyncIterator[str]:
|
982
983
|
logger.debug(f"*********** Run Start: {self.run_id} ***********")
|
@@ -1055,6 +1056,8 @@ class Assistant(BaseModel):
|
|
1055
1056
|
response_stream = self.llm.aresponse_stream(messages=llm_messages)
|
1056
1057
|
async for response_chunk in response_stream: # type: ignore
|
1057
1058
|
llm_response += response_chunk
|
1059
|
+
if print_output:
|
1060
|
+
print_llm_stream(response_chunk)
|
1058
1061
|
yield response_chunk
|
1059
1062
|
else:
|
1060
1063
|
llm_response = await self.llm.aresponse(messages=llm_messages)
|
@@ -1089,6 +1092,8 @@ class Assistant(BaseModel):
|
|
1089
1092
|
|
1090
1093
|
# -*- Yield final response if not streaming
|
1091
1094
|
if not stream:
|
1095
|
+
if print_output:
|
1096
|
+
print(llm_response)
|
1092
1097
|
yield llm_response
|
1093
1098
|
|
1094
1099
|
async def arun(
|
@@ -1096,8 +1101,8 @@ class Assistant(BaseModel):
|
|
1096
1101
|
message: Optional[Union[List, Dict, str]] = None,
|
1097
1102
|
*,
|
1098
1103
|
stream: bool = True,
|
1099
|
-
print_output: bool = True,
|
1100
1104
|
messages: Optional[List[Union[Dict, Message]]] = None,
|
1105
|
+
print_output: bool = True,
|
1101
1106
|
**kwargs: Any,
|
1102
1107
|
) -> Union[AsyncIterator[str], str, BaseModel]:
|
1103
1108
|
# Convert response to structured output if output_model is set
|
@@ -1127,17 +1132,11 @@ class Assistant(BaseModel):
|
|
1127
1132
|
return self.output or json_resp
|
1128
1133
|
else:
|
1129
1134
|
if stream and self.streamable:
|
1130
|
-
resp = self._arun(message=message, messages=messages, stream=True, **kwargs)
|
1131
|
-
if print_output:
|
1132
|
-
async for chunk in resp:
|
1133
|
-
print_llm_stream(chunk)
|
1135
|
+
resp = self._arun(message=message, messages=messages, stream=True, print_output=print_output, **kwargs)
|
1134
1136
|
return resp
|
1135
1137
|
else:
|
1136
|
-
resp = self._arun(message=message, messages=messages, stream=False, **kwargs)
|
1137
|
-
|
1138
|
-
if print_output:
|
1139
|
-
print(r)
|
1140
|
-
return r
|
1138
|
+
resp = self._arun(message=message, messages=messages, stream=False, print_output=print_output, **kwargs)
|
1139
|
+
return await resp.__anext__()
|
1141
1140
|
|
1142
1141
|
def chat(
|
1143
1142
|
self, message: Union[List, Dict, str], stream: bool = True, **kwargs: Any
|
@@ -1334,7 +1333,6 @@ class Assistant(BaseModel):
|
|
1334
1333
|
user: str = "User",
|
1335
1334
|
emoji: str = ":sunglasses:",
|
1336
1335
|
stream: bool = True,
|
1337
|
-
markdown: bool = False,
|
1338
1336
|
exit_on: Optional[List[str]] = None,
|
1339
1337
|
**kwargs: Any,
|
1340
1338
|
) -> None:
|
@@ -1342,11 +1340,11 @@ class Assistant(BaseModel):
|
|
1342
1340
|
_exit_on = exit_on or ["exit", "quit", "bye"]
|
1343
1341
|
logger.debug(f"Enable cli, exit with {_exit_on[0]}")
|
1344
1342
|
if message:
|
1345
|
-
self.run(message=message, stream=stream,
|
1343
|
+
self.run(message=message, stream=stream, **kwargs)
|
1346
1344
|
|
1347
1345
|
while True:
|
1348
1346
|
message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
|
1349
1347
|
if message in _exit_on:
|
1350
1348
|
break
|
1351
1349
|
|
1352
|
-
self.run(message=message, stream=stream,
|
1350
|
+
self.run(message=message, stream=stream, **kwargs)
|