agentica 0.1.0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agentica-0.1.0 → agentica-0.1.1}/PKG-INFO +4 -3
- {agentica-0.1.0 → agentica-0.1.1}/README.md +3 -2
- {agentica-0.1.0 → agentica-0.1.1}/agentica/__init__.py +3 -1
- {agentica-0.1.0 → agentica-0.1.1}/agentica/assistant.py +9 -9
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/azure_emb.py +11 -7
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/base.py +3 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/hash_emb.py +3 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/ollama_emb.py +3 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/openai_emb.py +5 -6
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/text2vec_emb.py +4 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/word2vec_emb.py +6 -0
- agentica-0.1.1/agentica/knowledge/__init__.py +5 -0
- {agentica-0.1.0/agentica → agentica-0.1.1/agentica/knowledge}/knowledge_base.py +2 -2
- agentica-0.1.1/agentica/knowledge/langchain.py +67 -0
- agentica-0.1.1/agentica/knowledge/llamaindex.py +63 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/llm/anthropic_llm.py +4 -1
- {agentica-0.1.0 → agentica-0.1.1}/agentica/llm/azure_llm.py +2 -2
- {agentica-0.1.0 → agentica-0.1.1}/agentica/llm/base.py +4 -2
- agentica-0.1.1/agentica/llm/moonshot_llm.py +221 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/llm/ollama_llm.py +2 -3
- {agentica-0.1.0 → agentica-0.1.1}/agentica/llm/openai_llm.py +18 -19
- {agentica-0.1.0 → agentica-0.1.1}/agentica/memory.py +1 -1
- agentica-0.1.1/agentica/pg_storage.py +210 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/sqlite_storage.py +8 -11
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/search_serper.py +1 -1
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/sql.py +4 -7
- agentica-0.1.1/agentica/tools/url_crawler.py +124 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/vectordb/pgvector.py +7 -11
- agentica-0.1.1/agentica/version.py +1 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica.egg-info/PKG-INFO +4 -3
- {agentica-0.1.0 → agentica-0.1.1}/agentica.egg-info/SOURCES.txt +6 -1
- agentica-0.1.1/tests/test_llm.py +136 -0
- agentica-0.1.0/agentica/tools/url_crawler.py +0 -214
- agentica-0.1.0/agentica/version.py +0 -1
- agentica-0.1.0/tests/test_llm.py +0 -31
- {agentica-0.1.0 → agentica-0.1.1}/LICENSE +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/config.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/document.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/__init__.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/emb/together_emb.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/file/__init__.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/file/base.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/file/csv.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/file/txt.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/llm/__init__.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/llm/together_llm.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/message.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/python_assistant.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/references.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/run_record.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/task.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tool.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/__init__.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/airflow.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/analyze_image.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/apify.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/create_image.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/duckduckgo.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/file.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/jina.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/ocr.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/run_nb_code.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/run_python_code.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/search_arxiv.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/search_exa.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/shell.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/wikipedia.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/tools/yfinance.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/utils/__init__.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/utils/file_parser.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/utils/log.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/utils/misc.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/utils/shell.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/utils/timer.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/vectordb/__init__.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/vectordb/base.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/vectordb/lancedb.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica/workflow.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica.egg-info/dependency_links.txt +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica.egg-info/entry_points.txt +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica.egg-info/not-zip-safe +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica.egg-info/requires.txt +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/agentica.egg-info/top_level.txt +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/setup.cfg +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/setup.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/__init__.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/test_function_create_image.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/test_function_get_url.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/test_function_save_file.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/test_run_nb_code.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/test_sqlite_storage.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/test_write_code.py +0 -0
- {agentica-0.1.0 → agentica-0.1.1}/tests/test_write_plan.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: agentica
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.1
|
4
4
|
Summary: LLM agents
|
5
5
|
Home-page: https://github.com/shibing624/agentica
|
6
6
|
Author: XuMing
|
@@ -20,7 +20,7 @@ Requires-Python: >=3.8.0
|
|
20
20
|
Description-Content-Type: text/markdown
|
21
21
|
License-File: LICENSE
|
22
22
|
|
23
|
-
[**🇨🇳中文**](https://github.com/shibing624/agentica/blob/main/README.md) | [**🌐English**](https://github.com/shibing624/agentica/blob/main/README_EN.md)
|
23
|
+
[**🇨🇳中文**](https://github.com/shibing624/agentica/blob/main/README.md) | [**🌐English**](https://github.com/shibing624/agentica/blob/main/README_EN.md) | [**🇯🇵日本語**](https://github.com/shibing624/agentica/blob/main/README_JP.md)
|
24
24
|
|
25
25
|
<div align="center">
|
26
26
|
<a href="https://github.com/shibing624/agentica">
|
@@ -47,7 +47,7 @@ License-File: LICENSE
|
|
47
47
|
## Overview
|
48
48
|
|
49
49
|
#### LLM Agent
|
50
|
-

|
51
51
|
|
52
52
|
- **规划(Planning)**:任务拆解、生成计划、反思
|
53
53
|
- **记忆(Memory)**:短期记忆(prompt实现)、长期记忆(RAG实现)
|
@@ -134,6 +134,7 @@ print(r)
|
|
134
134
|
|
135
135
|
### LLM OS
|
136
136
|
The LLM OS design:
|
137
|
+
|
137
138
|
<img alt="LLM OS" src="https://github.com/shibing624/agentica/blob/main/docs/llmos.png" width="600" />
|
138
139
|
|
139
140
|
#### Run the LLM OS App
|
@@ -1,4 +1,4 @@
|
|
1
|
-
[**🇨🇳中文**](https://github.com/shibing624/agentica/blob/main/README.md) | [**🌐English**](https://github.com/shibing624/agentica/blob/main/README_EN.md)
|
1
|
+
[**🇨🇳中文**](https://github.com/shibing624/agentica/blob/main/README.md) | [**🌐English**](https://github.com/shibing624/agentica/blob/main/README_EN.md) | [**🇯🇵日本語**](https://github.com/shibing624/agentica/blob/main/README_JP.md)
|
2
2
|
|
3
3
|
<div align="center">
|
4
4
|
<a href="https://github.com/shibing624/agentica">
|
@@ -25,7 +25,7 @@
|
|
25
25
|
## Overview
|
26
26
|
|
27
27
|
#### LLM Agent
|
28
|
-

|
29
29
|
|
30
30
|
- **规划(Planning)**:任务拆解、生成计划、反思
|
31
31
|
- **记忆(Memory)**:短期记忆(prompt实现)、长期记忆(RAG实现)
|
@@ -112,6 +112,7 @@ print(r)
|
|
112
112
|
|
113
113
|
### LLM OS
|
114
114
|
The LLM OS design:
|
115
|
+
|
115
116
|
<img alt="LLM OS" src="https://github.com/shibing624/agentica/blob/main/docs/llmos.png" width="600" />
|
116
117
|
|
117
118
|
#### Run the LLM OS App
|
@@ -8,9 +8,11 @@ from agentica.config import DOTENV_PATH, SMART_LLM, FAST_LLM # noqa, isort: ski
|
|
8
8
|
from agentica.assistant import Assistant
|
9
9
|
from agentica.python_assistant import PythonAssistant
|
10
10
|
from agentica.document import Document
|
11
|
-
from agentica.knowledge_base import KnowledgeBase
|
11
|
+
from agentica.knowledge.knowledge_base import KnowledgeBase
|
12
12
|
from agentica.llm.openai_llm import OpenAILLM
|
13
13
|
from agentica.llm.azure_llm import AzureOpenAILLM
|
14
|
+
from agentica.llm.together_llm import TogetherLLM
|
15
|
+
from agentica.llm.moonshot_llm import MoonshotLLM
|
14
16
|
from agentica.task import Task
|
15
17
|
from agentica.workflow import Workflow
|
16
18
|
|
@@ -29,7 +29,7 @@ from uuid import uuid4
|
|
29
29
|
from pydantic import BaseModel, ConfigDict, field_validator, ValidationError
|
30
30
|
|
31
31
|
from agentica.document import Document
|
32
|
-
from agentica.knowledge_base import KnowledgeBase
|
32
|
+
from agentica.knowledge.knowledge_base import KnowledgeBase
|
33
33
|
from agentica.llm.base import LLM
|
34
34
|
from agentica.memory import AssistantMemory, Memory
|
35
35
|
from agentica.message import Message
|
@@ -1212,13 +1212,13 @@ class Assistant(BaseModel):
|
|
1212
1212
|
# Default Tools
|
1213
1213
|
###########################################################################
|
1214
1214
|
|
1215
|
-
def get_chat_history(self, num_chats:
|
1215
|
+
def get_chat_history(self, num_chats: int = 5) -> str:
|
1216
1216
|
"""Use this function to get the chat history between the user and assistant.
|
1217
1217
|
|
1218
1218
|
Args:
|
1219
1219
|
num_chats: The number of chats to return.
|
1220
1220
|
Each chat contains 2 messages. One from the user and one from the assistant.
|
1221
|
-
Default:
|
1221
|
+
Default: 5
|
1222
1222
|
|
1223
1223
|
Returns:
|
1224
1224
|
str: A JSON of a list of dictionaries representing the chat history.
|
@@ -1226,8 +1226,8 @@ class Assistant(BaseModel):
|
|
1226
1226
|
Example:
|
1227
1227
|
- To get the last chat, use num_chats=1.
|
1228
1228
|
- To get the last 5 chats, use num_chats=5.
|
1229
|
-
- To get all chats, use num_chats
|
1230
|
-
- To get the first chat, use num_chats
|
1229
|
+
- To get all chats, use num_chats=-1.
|
1230
|
+
- To get the first chat, use num_chats=-1 and pick the first message.
|
1231
1231
|
"""
|
1232
1232
|
history: List[Dict[str, Any]] = []
|
1233
1233
|
all_chats = self.memory.get_chats()
|
@@ -1239,9 +1239,9 @@ class Assistant(BaseModel):
|
|
1239
1239
|
history.insert(0, chat[1].to_dict())
|
1240
1240
|
history.insert(0, chat[0].to_dict())
|
1241
1241
|
chats_added += 1
|
1242
|
-
if
|
1242
|
+
if 0 < num_chats <= chats_added:
|
1243
1243
|
break
|
1244
|
-
return json.dumps(history)
|
1244
|
+
return json.dumps(history, ensure_ascii=False)
|
1245
1245
|
|
1246
1246
|
def get_tool_call_history(self, num_calls: int = 3) -> str:
|
1247
1247
|
"""Use this function to get the tools called by the assistant in reverse chronological order.
|
@@ -1255,13 +1255,13 @@ class Assistant(BaseModel):
|
|
1255
1255
|
|
1256
1256
|
Example:
|
1257
1257
|
- To get the last tool call, use num_calls=1.
|
1258
|
-
- To get all tool calls, use num_calls
|
1258
|
+
- To get all tool calls, use num_calls=-1.
|
1259
1259
|
"""
|
1260
1260
|
tool_calls = self.memory.get_tool_calls(num_calls)
|
1261
1261
|
if len(tool_calls) == 0:
|
1262
1262
|
return ""
|
1263
1263
|
logger.debug(f"tool_calls: {tool_calls}")
|
1264
|
-
return json.dumps(tool_calls)
|
1264
|
+
return json.dumps(tool_calls, ensure_ascii=False)
|
1265
1265
|
|
1266
1266
|
def search_knowledge_base(self, query: str) -> str:
|
1267
1267
|
"""Use this function to search the knowledge base for information about a query.
|
@@ -6,17 +6,14 @@ part of the code from https://github.com/phidatahq/phidata
|
|
6
6
|
"""
|
7
7
|
from os import getenv
|
8
8
|
from typing import Optional, Dict, List, Tuple, Any
|
9
|
+
|
10
|
+
from openai import AzureOpenAI as AzureOpenAIClient
|
11
|
+
from openai.types.create_embedding_response import CreateEmbeddingResponse
|
9
12
|
from typing_extensions import Literal
|
10
13
|
|
11
14
|
from agentica.emb.base import Emb
|
12
15
|
from agentica.utils.log import logger
|
13
16
|
|
14
|
-
try:
|
15
|
-
from openai import AzureOpenAI as AzureOpenAIClient
|
16
|
-
from openai.types.create_embedding_response import CreateEmbeddingResponse
|
17
|
-
except ImportError:
|
18
|
-
raise ImportError("`openai` not installed, please run `pip install openai`")
|
19
|
-
|
20
17
|
|
21
18
|
class AzureOpenAIEmb(Emb):
|
22
19
|
model: str = "text-embedding-ada-002"
|
@@ -57,7 +54,8 @@ class AzureOpenAIEmb(Emb):
|
|
57
54
|
_client_params["azure_ad_token"] = self.azure_ad_token
|
58
55
|
if self.azure_ad_token_provider:
|
59
56
|
_client_params["azure_ad_token_provider"] = self.azure_ad_token_provider
|
60
|
-
|
57
|
+
self.openai_client = AzureOpenAIClient(**_client_params)
|
58
|
+
return self.openai_client
|
61
59
|
|
62
60
|
def _response(self, text: str) -> CreateEmbeddingResponse:
|
63
61
|
_request_params: Dict[str, Any] = {
|
@@ -87,3 +85,9 @@ class AzureOpenAIEmb(Emb):
|
|
87
85
|
embedding = response.data[0].embedding
|
88
86
|
usage = response.usage
|
89
87
|
return embedding, usage.model_dump()
|
88
|
+
|
89
|
+
def get_embeddings(self, texts: List[str]) -> List[List[float]]:
|
90
|
+
embeddings = []
|
91
|
+
for text in texts:
|
92
|
+
embeddings.append(self.get_embedding(text))
|
93
|
+
return embeddings
|
@@ -7,17 +7,13 @@ part of the code from https://github.com/phidatahq/phidata
|
|
7
7
|
from os import getenv
|
8
8
|
from typing import Optional, Dict, List, Tuple, Any
|
9
9
|
|
10
|
+
from openai import OpenAI as OpenAIClient
|
11
|
+
from openai.types.create_embedding_response import CreateEmbeddingResponse
|
10
12
|
from typing_extensions import Literal
|
11
13
|
|
12
14
|
from agentica.emb.base import Emb
|
13
15
|
from agentica.utils.log import logger
|
14
16
|
|
15
|
-
try:
|
16
|
-
from openai import OpenAI as OpenAIClient
|
17
|
-
from openai.types.create_embedding_response import CreateEmbeddingResponse
|
18
|
-
except ImportError:
|
19
|
-
raise ImportError("`openai` not installed, please run `pip install openai`")
|
20
|
-
|
21
17
|
|
22
18
|
class OpenAIEmb(Emb):
|
23
19
|
model: str = "text-embedding-ada-002" # or text-embedding-3-small
|
@@ -75,3 +71,6 @@ class OpenAIEmb(Emb):
|
|
75
71
|
embedding = response.data[0].embedding
|
76
72
|
usage = response.usage
|
77
73
|
return embedding, usage.model_dump()
|
74
|
+
|
75
|
+
def get_embeddings(self, texts: List[str]) -> List[List[float]]:
|
76
|
+
return [self.get_embedding(text) for text in texts]
|
@@ -38,3 +38,7 @@ class Text2VecEmb(Emb):
|
|
38
38
|
def get_embedding(self, text: str) -> List[float]:
|
39
39
|
# Calculate emb of the text
|
40
40
|
return self.get_client.encode([text])[0]
|
41
|
+
|
42
|
+
def get_embeddings(self, texts: List[str]) -> List[List[float]]:
|
43
|
+
# Calculate emb of the texts
|
44
|
+
return self.get_client.encode(texts).tolist() # type: ignore
|
@@ -37,3 +37,9 @@ class Word2VecEmb(Emb):
|
|
37
37
|
def get_embedding(self, text: str) -> List[float]:
|
38
38
|
# Calculate emb of the text
|
39
39
|
return self.get_client.encode([text])[0]
|
40
|
+
|
41
|
+
def get_embeddings(self, texts: List[str]) -> List[List[float]]:
|
42
|
+
# Calculate emb of the texts
|
43
|
+
ndarr = self.get_client.encode(texts)
|
44
|
+
# convert numpy array to list
|
45
|
+
return ndarr.tolist() # type: ignore
|
@@ -182,12 +182,12 @@ class KnowledgeBase(BaseModel):
|
|
182
182
|
Reads a website and returns a list of documents.
|
183
183
|
"""
|
184
184
|
try:
|
185
|
-
|
185
|
+
content = UrlCrawlerTool().url_crawl(url)
|
186
186
|
documents = [
|
187
187
|
Document(
|
188
188
|
name=url,
|
189
189
|
id=url,
|
190
|
-
content=
|
190
|
+
content=content,
|
191
191
|
)
|
192
192
|
]
|
193
193
|
if self.chunk:
|
@@ -0,0 +1,67 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
@author:XuMing(xuming624@qq.com)
|
4
|
+
@description:
|
5
|
+
part of the code from https://github.com/phidatahq/phidata
|
6
|
+
"""
|
7
|
+
from typing import List, Optional, Callable, Any
|
8
|
+
|
9
|
+
from agentica.document import Document
|
10
|
+
from agentica.knowledge.knowledge_base import KnowledgeBase
|
11
|
+
from agentica.utils.log import logger
|
12
|
+
|
13
|
+
|
14
|
+
class LangChainKnowledgeBase(KnowledgeBase):
|
15
|
+
loader: Optional[Callable] = None
|
16
|
+
|
17
|
+
vectorstore: Optional[Any] = None
|
18
|
+
search_kwargs: Optional[dict] = None
|
19
|
+
|
20
|
+
retriever: Optional[Any] = None
|
21
|
+
|
22
|
+
def search(self, query: str, num_documents: Optional[int] = None) -> List[Document]:
|
23
|
+
"""Returns relevant documents matching the query"""
|
24
|
+
|
25
|
+
try:
|
26
|
+
from langchain_core.vectorstores import VectorStoreRetriever
|
27
|
+
from langchain_core.documents import Document as LangChainDocument
|
28
|
+
except ImportError:
|
29
|
+
raise ImportError(
|
30
|
+
"The `langchain` package is not installed. Please install it via `pip install langchain`."
|
31
|
+
)
|
32
|
+
|
33
|
+
if self.vectorstore is not None and self.retriever is None:
|
34
|
+
logger.debug("Creating retriever")
|
35
|
+
if self.search_kwargs is None:
|
36
|
+
self.search_kwargs = {"k": self.num_documents}
|
37
|
+
self.retriever = self.vectorstore.as_retriever(search_kwargs=self.search_kwargs)
|
38
|
+
|
39
|
+
if self.retriever is None:
|
40
|
+
logger.error("No retriever provided")
|
41
|
+
return []
|
42
|
+
|
43
|
+
if not isinstance(self.retriever, VectorStoreRetriever):
|
44
|
+
raise ValueError(f"Retriever is not of type VectorStoreRetriever: {self.retriever}")
|
45
|
+
|
46
|
+
_num_documents = num_documents or self.num_documents
|
47
|
+
logger.debug(f"Getting {_num_documents} relevant documents for query: {query}")
|
48
|
+
lc_documents: List[LangChainDocument] = self.retriever.invoke(input=query)
|
49
|
+
documents = []
|
50
|
+
for lc_doc in lc_documents:
|
51
|
+
documents.append(
|
52
|
+
Document(
|
53
|
+
content=lc_doc.page_content,
|
54
|
+
meta_data=lc_doc.metadata,
|
55
|
+
)
|
56
|
+
)
|
57
|
+
return documents
|
58
|
+
|
59
|
+
def load(self, recreate: bool = False, upsert: bool = True, skip_existing: bool = True) -> None:
|
60
|
+
if self.loader is None:
|
61
|
+
logger.error("No loader provided for LangChainKnowledgeBase")
|
62
|
+
return
|
63
|
+
self.loader()
|
64
|
+
|
65
|
+
def exists(self) -> bool:
|
66
|
+
logger.warning("LangChainKnowledgeBase.exists() not supported - please check the vectorstore manually.")
|
67
|
+
return True
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
@author:XuMing(xuming624@qq.com)
|
4
|
+
@description:
|
5
|
+
part of the code from https://github.com/phidatahq/phidata
|
6
|
+
"""
|
7
|
+
from typing import List, Optional, Callable
|
8
|
+
|
9
|
+
from agentica.document import Document
|
10
|
+
from agentica.knowledge.knowledge_base import KnowledgeBase
|
11
|
+
from agentica.utils.log import logger
|
12
|
+
|
13
|
+
try:
|
14
|
+
from llama_index.core.schema import NodeWithScore
|
15
|
+
from llama_index.core.retrievers import BaseRetriever
|
16
|
+
except ImportError:
|
17
|
+
raise ImportError(
|
18
|
+
"The `llama-index-core` package is not installed. Please install it via `pip install llama-index-core`."
|
19
|
+
)
|
20
|
+
|
21
|
+
|
22
|
+
class LlamaIndexKnowledgeBase(KnowledgeBase):
|
23
|
+
retriever: BaseRetriever
|
24
|
+
loader: Optional[Callable] = None
|
25
|
+
|
26
|
+
def search(self, query: str, num_documents: Optional[int] = None) -> List[Document]:
|
27
|
+
"""
|
28
|
+
Returns relevant documents matching the query.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
query (str): The query string to search for.
|
32
|
+
num_documents (Optional[int]): The maximum number of documents to return. Defaults to None.
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
List[Document]: A list of relevant documents matching the query.
|
36
|
+
Raises:
|
37
|
+
ValueError: If the retriever is not of type BaseRetriever.
|
38
|
+
"""
|
39
|
+
if not isinstance(self.retriever, BaseRetriever):
|
40
|
+
raise ValueError(f"Retriever is not of type BaseRetriever: {self.retriever}")
|
41
|
+
|
42
|
+
lc_documents: List[NodeWithScore] = self.retriever.retrieve(query)
|
43
|
+
if num_documents is not None:
|
44
|
+
lc_documents = lc_documents[:num_documents]
|
45
|
+
documents = []
|
46
|
+
for lc_doc in lc_documents:
|
47
|
+
documents.append(
|
48
|
+
Document(
|
49
|
+
content=lc_doc.text,
|
50
|
+
meta_data=lc_doc.metadata,
|
51
|
+
)
|
52
|
+
)
|
53
|
+
return documents
|
54
|
+
|
55
|
+
def load(self, recreate: bool = False, upsert: bool = True, skip_existing: bool = True) -> None:
|
56
|
+
if self.loader is None:
|
57
|
+
logger.error("No loader provided for LlamaIndexKnowledgeBase")
|
58
|
+
return
|
59
|
+
self.loader()
|
60
|
+
|
61
|
+
def exists(self) -> bool:
|
62
|
+
logger.warning("LlamaIndexKnowledgeBase.exists() not supported - please check the vectorstore manually.")
|
63
|
+
return True
|
@@ -50,7 +50,10 @@ class AnthropicLLM(LLM):
|
|
50
50
|
_client_params: Dict[str, Any] = {}
|
51
51
|
if self.api_key:
|
52
52
|
_client_params["api_key"] = self.api_key
|
53
|
-
|
53
|
+
if self.client_params:
|
54
|
+
_client_params.update(self.client_params)
|
55
|
+
self.anthropic_client = AnthropicClient(**_client_params)
|
56
|
+
return self.anthropic_client
|
54
57
|
|
55
58
|
@property
|
56
59
|
def api_kwargs(self) -> Dict[str, Any]:
|
@@ -53,5 +53,5 @@ class AzureOpenAILLM(OpenAILLM):
|
|
53
53
|
_client_params["http_client"] = self.http_client
|
54
54
|
if self.client_params:
|
55
55
|
_client_params.update(self.client_params)
|
56
|
-
|
57
|
-
return
|
56
|
+
self.client = AzureOpenAIClient(**_client_params)
|
57
|
+
return self.client
|
@@ -104,9 +104,11 @@ class LLM(BaseModel):
|
|
104
104
|
tools_for_api = []
|
105
105
|
for tool in self.tools:
|
106
106
|
if isinstance(tool, Tool):
|
107
|
-
|
107
|
+
if tool.to_dict() not in tools_for_api:
|
108
|
+
tools_for_api.append(tool.to_dict())
|
108
109
|
elif isinstance(tool, Dict):
|
109
|
-
tools_for_api
|
110
|
+
if tool not in tools_for_api:
|
111
|
+
tools_for_api.append(tool)
|
110
112
|
return tools_for_api
|
111
113
|
|
112
114
|
def add_tool(self, tool: Union[Tool, Toolkit, Callable, Dict, Function]) -> None:
|
@@ -0,0 +1,221 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
@author:XuMing(xuming624@qq.com)
|
4
|
+
@description:
|
5
|
+
kimi api refer: https://platform.moonshot.cn/docs/api/tool-use#%E5%B7%A5%E5%85%B7%E8%B0%83%E7%94%A8
|
6
|
+
"""
|
7
|
+
from os import getenv
|
8
|
+
from typing import Optional, List, Iterator, Dict, Any
|
9
|
+
|
10
|
+
from openai import OpenAI as OpenAIClient
|
11
|
+
|
12
|
+
from agentica.llm.base import LLM
|
13
|
+
from agentica.message import Message
|
14
|
+
from agentica.tool import FunctionCall, get_function_call_for_tool_call
|
15
|
+
from agentica.utils.log import logger
|
16
|
+
from agentica.utils.timer import Timer
|
17
|
+
|
18
|
+
|
19
|
+
class MoonshotLLM(LLM):
|
20
|
+
name: str = "Moonshot"
|
21
|
+
model: str = "moonshot-v1-8k"
|
22
|
+
api_key: Optional[str] = getenv("MOONSHOT_API_KEY")
|
23
|
+
base_url: str = "https://api.moonshot.cn/v1"
|
24
|
+
temperature: Optional[float] = None
|
25
|
+
top_p: Optional[float] = None
|
26
|
+
top_k: Optional[int] = None
|
27
|
+
max_tokens: Optional[int] = None
|
28
|
+
# Deactivate tool calls after 1 tool call
|
29
|
+
deactivate_tools_after_use: bool = False
|
30
|
+
request_params: Optional[Dict[str, Any]] = None
|
31
|
+
client_params: Optional[Dict[str, Any]] = None
|
32
|
+
# -*- Provide the client manually
|
33
|
+
client: Optional[OpenAIClient] = None
|
34
|
+
|
35
|
+
def get_client(self) -> OpenAIClient:
|
36
|
+
if self.client:
|
37
|
+
return self.client
|
38
|
+
|
39
|
+
_client_params: Dict[str, Any] = {}
|
40
|
+
if self.api_key:
|
41
|
+
_client_params["api_key"] = self.api_key
|
42
|
+
if self.base_url:
|
43
|
+
_client_params["base_url"] = self.base_url
|
44
|
+
if self.client_params:
|
45
|
+
_client_params.update(self.client_params)
|
46
|
+
self.client = OpenAIClient(**_client_params)
|
47
|
+
return self.client
|
48
|
+
|
49
|
+
@property
|
50
|
+
def api_kwargs(self) -> Dict[str, Any]:
|
51
|
+
_request_params: Dict[str, Any] = {}
|
52
|
+
if self.max_tokens:
|
53
|
+
_request_params["max_tokens"] = self.max_tokens
|
54
|
+
if self.temperature:
|
55
|
+
_request_params["temperature"] = self.temperature
|
56
|
+
if self.top_p:
|
57
|
+
_request_params["top_p"] = self.top_p
|
58
|
+
if self.top_k:
|
59
|
+
_request_params["top_k"] = self.top_k
|
60
|
+
if self.tools:
|
61
|
+
_request_params["tools"] = self.get_tools_for_api()
|
62
|
+
if self.tool_choice is None:
|
63
|
+
_request_params["tool_choice"] = "auto"
|
64
|
+
else:
|
65
|
+
_request_params["tool_choice"] = self.tool_choice
|
66
|
+
if self.request_params:
|
67
|
+
_request_params.update(self.request_params)
|
68
|
+
return _request_params
|
69
|
+
|
70
|
+
def to_dict(self) -> Dict[str, Any]:
|
71
|
+
_dict = super().to_dict()
|
72
|
+
if self.max_tokens:
|
73
|
+
_dict["max_tokens"] = self.max_tokens
|
74
|
+
if self.temperature:
|
75
|
+
_dict["temperature"] = self.temperature
|
76
|
+
if self.top_p:
|
77
|
+
_dict["top_p"] = self.top_p
|
78
|
+
if self.tools:
|
79
|
+
_dict["tools"] = self.get_tools_for_api()
|
80
|
+
if self.tool_choice is None:
|
81
|
+
_dict["tool_choice"] = "auto"
|
82
|
+
else:
|
83
|
+
_dict["tool_choice"] = self.tool_choice
|
84
|
+
return _dict
|
85
|
+
|
86
|
+
def invoke(self, messages: List[Message]):
|
87
|
+
return self.get_client().chat.completions.create(
|
88
|
+
model=self.model,
|
89
|
+
messages=[m.to_dict() for m in messages],
|
90
|
+
**self.api_kwargs,
|
91
|
+
)
|
92
|
+
|
93
|
+
def invoke_stream(self, messages: List[Message]):
|
94
|
+
yield from self.get_client().chat.completions.create(
|
95
|
+
model=self.model,
|
96
|
+
messages=[m.to_dict() for m in messages],
|
97
|
+
stream=True,
|
98
|
+
**self.api_kwargs,
|
99
|
+
)
|
100
|
+
|
101
|
+
def response(self, messages: List[Message]) -> str:
|
102
|
+
logger.debug("---------- Moonshot Response Start ----------")
|
103
|
+
# -*- Log messages for debugging
|
104
|
+
for m in messages:
|
105
|
+
m.log()
|
106
|
+
|
107
|
+
t = Timer()
|
108
|
+
t.start()
|
109
|
+
response = self.invoke(messages=messages)
|
110
|
+
t.stop()
|
111
|
+
logger.debug(f"Time to generate response: {t.elapsed:.4f}s")
|
112
|
+
|
113
|
+
# -*- Parse response
|
114
|
+
response_message = response.choices[0].message
|
115
|
+
response_role = response_message.role
|
116
|
+
response_content: Optional[str] = response_message.content
|
117
|
+
response_tool_calls = response_message.tool_calls
|
118
|
+
|
119
|
+
tool_calls = []
|
120
|
+
if response_tool_calls is not None:
|
121
|
+
tool_calls = [t.model_dump() for t in response_tool_calls]
|
122
|
+
if not response_content:
|
123
|
+
response_content = f"Running tool calls: {tool_calls}"
|
124
|
+
|
125
|
+
# -*- Create assistant message
|
126
|
+
assistant_message = Message(
|
127
|
+
role=response_role or "assistant",
|
128
|
+
content=response_content,
|
129
|
+
)
|
130
|
+
|
131
|
+
# -*- Update usage metrics
|
132
|
+
# Add response time to metrics
|
133
|
+
assistant_message.metrics["time"] = t.elapsed
|
134
|
+
if "response_times" not in self.metrics:
|
135
|
+
self.metrics["response_times"] = []
|
136
|
+
self.metrics["response_times"].append(t.elapsed)
|
137
|
+
|
138
|
+
# Add token usage to metrics
|
139
|
+
response_usage = response.usage
|
140
|
+
prompt_tokens = response_usage.prompt_tokens if response_usage is not None else None
|
141
|
+
if prompt_tokens is not None:
|
142
|
+
assistant_message.metrics["prompt_tokens"] = prompt_tokens
|
143
|
+
if "prompt_tokens" not in self.metrics:
|
144
|
+
self.metrics["prompt_tokens"] = prompt_tokens
|
145
|
+
else:
|
146
|
+
self.metrics["prompt_tokens"] += prompt_tokens
|
147
|
+
completion_tokens = response_usage.completion_tokens if response_usage is not None else None
|
148
|
+
if completion_tokens is not None:
|
149
|
+
assistant_message.metrics["completion_tokens"] = completion_tokens
|
150
|
+
if "completion_tokens" not in self.metrics:
|
151
|
+
self.metrics["completion_tokens"] = completion_tokens
|
152
|
+
else:
|
153
|
+
self.metrics["completion_tokens"] += completion_tokens
|
154
|
+
total_tokens = response_usage.total_tokens if response_usage is not None else None
|
155
|
+
if total_tokens is not None:
|
156
|
+
assistant_message.metrics["total_tokens"] = total_tokens
|
157
|
+
if "total_tokens" not in self.metrics:
|
158
|
+
self.metrics["total_tokens"] = total_tokens
|
159
|
+
else:
|
160
|
+
self.metrics["total_tokens"] += total_tokens
|
161
|
+
|
162
|
+
# -*- Add assistant message to messages
|
163
|
+
messages.append(assistant_message)
|
164
|
+
assistant_message.log()
|
165
|
+
|
166
|
+
# -*- Parse and run function call
|
167
|
+
if tool_calls is not None and self.run_tools:
|
168
|
+
final_response = ""
|
169
|
+
function_calls_to_run: List[FunctionCall] = []
|
170
|
+
for tool_call in tool_calls:
|
171
|
+
_function_call = get_function_call_for_tool_call(tool_call, self.functions)
|
172
|
+
if _function_call is None:
|
173
|
+
messages.append(Message(role="user", content="Could not find function to call."))
|
174
|
+
continue
|
175
|
+
if _function_call.error is not None:
|
176
|
+
messages.append(Message(role="user", content=_function_call.error))
|
177
|
+
continue
|
178
|
+
function_calls_to_run.append(_function_call)
|
179
|
+
|
180
|
+
if self.show_tool_calls:
|
181
|
+
if len(function_calls_to_run) == 1:
|
182
|
+
final_response += f" - Running: {function_calls_to_run[0].get_call_str()}\n\n"
|
183
|
+
elif len(function_calls_to_run) > 1:
|
184
|
+
final_response += "Running:"
|
185
|
+
for _f in function_calls_to_run:
|
186
|
+
final_response += f"\n - {_f.get_call_str()}"
|
187
|
+
final_response += "\n\n"
|
188
|
+
|
189
|
+
function_call_results = self.run_function_calls(function_calls_to_run, role="user")
|
190
|
+
if len(function_call_results) > 0:
|
191
|
+
fc_responses = "<tool_results>"
|
192
|
+
|
193
|
+
for _fc_message in function_call_results:
|
194
|
+
fc_responses += "<result>"
|
195
|
+
fc_responses += "<tool_name>" + _fc_message.tool_call_name + "</tool_name>" # type: ignore
|
196
|
+
fc_responses += "<stdout>" + _fc_message.content + "</stdout>" # type: ignore
|
197
|
+
fc_responses += "</result>"
|
198
|
+
fc_responses += "</tool_results>"
|
199
|
+
|
200
|
+
messages.append(Message(role="user", content=fc_responses))
|
201
|
+
|
202
|
+
# Deactivate tool calls after 1 tool call
|
203
|
+
if self.deactivate_tools_after_use:
|
204
|
+
self.deactivate_function_calls()
|
205
|
+
|
206
|
+
# -*- Yield new response using results of tool calls
|
207
|
+
last_message = messages[-1]
|
208
|
+
if last_message.role == "user" and last_message.content is not None:
|
209
|
+
final_response += self.response(messages=messages)
|
210
|
+
return final_response
|
211
|
+
logger.debug("---------- Moonshot Response End ----------")
|
212
|
+
# -*- Return content if no function calls are present
|
213
|
+
if assistant_message.content is not None:
|
214
|
+
return assistant_message.get_content_string()
|
215
|
+
return "Something went wrong, please try again."
|
216
|
+
|
217
|
+
def response_stream(self, messages: List[Message]) -> Iterator[str]:
|
218
|
+
logger.debug("MoonshotLLM tool use not support stream, use response instead.")
|
219
|
+
r = self.response(messages)
|
220
|
+
for i in r:
|
221
|
+
yield i
|