agentica 0.1.4__tar.gz → 0.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. {agentica-0.1.4 → agentica-0.1.6}/PKG-INFO +6 -5
  2. {agentica-0.1.4 → agentica-0.1.6}/README.md +4 -4
  3. {agentica-0.1.4 → agentica-0.1.6}/agentica/__init__.py +35 -23
  4. {agentica-0.1.4 → agentica-0.1.6}/agentica/assistant.py +67 -25
  5. agentica-0.1.6/agentica/config.py +29 -0
  6. {agentica-0.1.4 → agentica-0.1.6}/agentica/document.py +1 -0
  7. agentica-0.1.4/agentica/emb/azure_emb.py → agentica-0.1.6/agentica/emb/azure_openai_emb.py +1 -2
  8. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/base.py +4 -3
  9. agentica-0.1.6/agentica/emb/fireworks_emb.py +16 -0
  10. agentica-0.1.6/agentica/emb/genimi_emb.py +60 -0
  11. agentica-0.1.6/agentica/emb/huggingface_emb.py +53 -0
  12. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/ollama_emb.py +8 -8
  13. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/openai_emb.py +7 -5
  14. agentica-0.1.6/agentica/emb/sentence_transformer_emb.py +55 -0
  15. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/text2vec_emb.py +8 -5
  16. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/word2vec_emb.py +1 -1
  17. {agentica-0.1.4 → agentica-0.1.6}/agentica/knowledge/knowledge_base.py +3 -1
  18. {agentica-0.1.4 → agentica-0.1.6}/agentica/llm/__init__.py +1 -1
  19. agentica-0.1.4/agentica/llm/azure_llm.py → agentica-0.1.6/agentica/llm/azure_openai_llm.py +12 -5
  20. {agentica-0.1.4 → agentica-0.1.6}/agentica/llm/base.py +43 -30
  21. agentica-0.1.6/agentica/llm/claude_llm.py +424 -0
  22. {agentica-0.1.4 → agentica-0.1.6}/agentica/llm/deepseek_llm.py +1 -1
  23. {agentica-0.1.4 → agentica-0.1.6}/agentica/llm/moonshot_llm.py +2 -2
  24. {agentica-0.1.4 → agentica-0.1.6}/agentica/llm/ollama_llm.py +72 -73
  25. agentica-0.1.6/agentica/llm/ollama_tools_llm.py +489 -0
  26. {agentica-0.1.4 → agentica-0.1.6}/agentica/llm/openai_llm.py +58 -42
  27. agentica-0.1.4/agentica/llm/together_llm.py → agentica-0.1.6/agentica/llm/togetherllm.py +12 -16
  28. {agentica-0.1.4 → agentica-0.1.6}/agentica/memory.py +51 -26
  29. {agentica-0.1.4 → agentica-0.1.6}/agentica/message.py +3 -1
  30. {agentica-0.1.4 → agentica-0.1.6}/agentica/python_assistant.py +0 -1
  31. agentica-0.1.6/agentica/storage/__init__.py +8 -0
  32. agentica-0.1.6/agentica/storage/base.py +38 -0
  33. {agentica-0.1.4/agentica → agentica-0.1.6/agentica/storage}/pg_storage.py +25 -1
  34. {agentica-0.1.4/agentica → agentica-0.1.6/agentica/storage}/sqlite_storage.py +28 -2
  35. agentica-0.1.6/agentica/template.py +27 -0
  36. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/airflow.py +1 -1
  37. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/analyze_image.py +1 -1
  38. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/apify.py +1 -1
  39. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/arxiv.py +1 -1
  40. agentica-0.1.4/agentica/tool.py → agentica-0.1.6/agentica/tools/base.py +1 -1
  41. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/create_image.py +1 -1
  42. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/dblp.py +1 -1
  43. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/duckduckgo.py +1 -2
  44. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/file.py +2 -4
  45. agentica-0.1.6/agentica/tools/hackernews.py +81 -0
  46. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/jina.py +4 -6
  47. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/ocr.py +1 -1
  48. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/run_nb_code.py +10 -6
  49. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/run_python_code.py +2 -2
  50. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/search_exa.py +1 -1
  51. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/search_serper.py +7 -4
  52. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/shell.py +1 -1
  53. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/sql.py +1 -1
  54. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/url_crawler.py +1 -1
  55. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/wikipedia.py +1 -1
  56. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/yfinance.py +1 -1
  57. {agentica-0.1.4 → agentica-0.1.6}/agentica/utils/file_parser.py +0 -6
  58. agentica-0.1.6/agentica/vectordb/chromadb.py +244 -0
  59. {agentica-0.1.4 → agentica-0.1.6}/agentica/vectordb/lancedb.py +6 -4
  60. agentica-0.1.4/agentica/vectordb/memorydb.py → agentica-0.1.6/agentica/vectordb/memory_vectordb.py +7 -3
  61. agentica-0.1.6/agentica/vectordb/pineconedb.py +300 -0
  62. agentica-0.1.6/agentica/vectordb/qdrant.py +229 -0
  63. agentica-0.1.6/agentica/version.py +1 -0
  64. {agentica-0.1.4 → agentica-0.1.6}/agentica.egg-info/PKG-INFO +6 -5
  65. {agentica-0.1.4 → agentica-0.1.6}/agentica.egg-info/SOURCES.txt +20 -8
  66. {agentica-0.1.4 → agentica-0.1.6}/agentica.egg-info/requires.txt +1 -0
  67. {agentica-0.1.4 → agentica-0.1.6}/setup.py +1 -0
  68. {agentica-0.1.4 → agentica-0.1.6}/tests/test_llm.py +0 -3
  69. {agentica-0.1.4 → agentica-0.1.6}/tests/test_sqlite_storage.py +2 -2
  70. agentica-0.1.4/agentica/config.py +0 -32
  71. agentica-0.1.4/agentica/llm/anthropic_llm.py +0 -419
  72. agentica-0.1.4/agentica/version.py +0 -1
  73. {agentica-0.1.4 → agentica-0.1.6}/LICENSE +0 -0
  74. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/__init__.py +0 -0
  75. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/hash_emb.py +0 -0
  76. {agentica-0.1.4 → agentica-0.1.6}/agentica/emb/together_emb.py +0 -0
  77. {agentica-0.1.4 → agentica-0.1.6}/agentica/file/__init__.py +0 -0
  78. {agentica-0.1.4 → agentica-0.1.6}/agentica/file/base.py +0 -0
  79. {agentica-0.1.4 → agentica-0.1.6}/agentica/file/csv.py +0 -0
  80. {agentica-0.1.4 → agentica-0.1.6}/agentica/file/txt.py +0 -0
  81. {agentica-0.1.4 → agentica-0.1.6}/agentica/knowledge/__init__.py +0 -0
  82. {agentica-0.1.4 → agentica-0.1.6}/agentica/knowledge/langchain.py +0 -0
  83. {agentica-0.1.4 → agentica-0.1.6}/agentica/knowledge/llamaindex.py +0 -0
  84. {agentica-0.1.4 → agentica-0.1.6}/agentica/references.py +0 -0
  85. {agentica-0.1.4 → agentica-0.1.6}/agentica/run_record.py +0 -0
  86. {agentica-0.1.4 → agentica-0.1.6}/agentica/task.py +0 -0
  87. {agentica-0.1.4 → agentica-0.1.6}/agentica/tools/__init__.py +0 -0
  88. {agentica-0.1.4 → agentica-0.1.6}/agentica/utils/__init__.py +0 -0
  89. {agentica-0.1.4 → agentica-0.1.6}/agentica/utils/log.py +0 -0
  90. {agentica-0.1.4 → agentica-0.1.6}/agentica/utils/misc.py +0 -0
  91. {agentica-0.1.4 → agentica-0.1.6}/agentica/utils/shell.py +0 -0
  92. {agentica-0.1.4 → agentica-0.1.6}/agentica/utils/timer.py +0 -0
  93. {agentica-0.1.4 → agentica-0.1.6}/agentica/vectordb/__init__.py +0 -0
  94. {agentica-0.1.4 → agentica-0.1.6}/agentica/vectordb/base.py +0 -0
  95. {agentica-0.1.4 → agentica-0.1.6}/agentica/vectordb/pgvector.py +0 -0
  96. {agentica-0.1.4 → agentica-0.1.6}/agentica/workflow.py +0 -0
  97. {agentica-0.1.4 → agentica-0.1.6}/agentica.egg-info/dependency_links.txt +0 -0
  98. {agentica-0.1.4 → agentica-0.1.6}/agentica.egg-info/entry_points.txt +0 -0
  99. {agentica-0.1.4 → agentica-0.1.6}/agentica.egg-info/not-zip-safe +0 -0
  100. {agentica-0.1.4 → agentica-0.1.6}/agentica.egg-info/top_level.txt +0 -0
  101. {agentica-0.1.4 → agentica-0.1.6}/setup.cfg +0 -0
  102. {agentica-0.1.4 → agentica-0.1.6}/tests/__init__.py +0 -0
  103. {agentica-0.1.4 → agentica-0.1.6}/tests/test_create_image.py +0 -0
  104. {agentica-0.1.4 → agentica-0.1.6}/tests/test_jina_tool.py +0 -0
  105. {agentica-0.1.4 → agentica-0.1.6}/tests/test_moonshot_llm.py +0 -0
  106. {agentica-0.1.4 → agentica-0.1.6}/tests/test_run_nb_code.py +0 -0
  107. {agentica-0.1.4 → agentica-0.1.6}/tests/test_url_crawler.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: agentica
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Summary: LLM agents
5
5
  Home-page: https://github.com/shibing624/agentica
6
6
  Author: XuMing
@@ -25,6 +25,7 @@ Requires-Dist: python-dotenv
25
25
  Requires-Dist: pydantic
26
26
  Requires-Dist: requests
27
27
  Requires-Dist: sqlalchemy
28
+ Requires-Dist: scikit-learn
28
29
  Requires-Dist: tqdm
29
30
  Requires-Dist: rich
30
31
 
@@ -91,13 +92,16 @@ pip install .
91
92
 
92
93
  ## Getting Started
93
94
 
94
- ### 运行示例
95
+ #### 1. Install requirements
95
96
 
96
97
  ```shell
97
98
  git clone https://github.com/shibing624/agentica.git
98
99
  cd agentica
99
100
  pip install -r requirements.txt
101
+ ```
100
102
 
103
+ #### 2. Run the example
104
+ ```shell
101
105
  # Copying required .env file, and fill in the LLM api key
102
106
  cp .env.example ~/.agentica/.env
103
107
 
@@ -198,9 +202,6 @@ bash start.sh
198
202
 
199
203
  <img src="https://github.com/shibing624/agentica/blob/main/docs/wechat.jpeg" width="200" />
200
204
 
201
- <img src="https://github.com/shibing624/agentica/blob/main/docs/wechat_group.jpg" width="200" />
202
-
203
-
204
205
  ## Citation
205
206
 
206
207
  如果你在研究中使用了`agentica`,请按如下格式引用:
@@ -61,13 +61,16 @@ pip install .
61
61
 
62
62
  ## Getting Started
63
63
 
64
- ### 运行示例
64
+ #### 1. Install requirements
65
65
 
66
66
  ```shell
67
67
  git clone https://github.com/shibing624/agentica.git
68
68
  cd agentica
69
69
  pip install -r requirements.txt
70
+ ```
70
71
 
72
+ #### 2. Run the example
73
+ ```shell
71
74
  # Copying required .env file, and fill in the LLM api key
72
75
  cp .env.example ~/.agentica/.env
73
76
 
@@ -168,9 +171,6 @@ bash start.sh
168
171
 
169
172
  <img src="https://github.com/shibing624/agentica/blob/main/docs/wechat.jpeg" width="200" />
170
173
 
171
- <img src="https://github.com/shibing624/agentica/blob/main/docs/wechat_group.jpg" width="200" />
172
-
173
-
174
174
  ## Citation
175
175
 
176
176
  如果你在研究中使用了`agentica`,请按如下格式引用:
@@ -4,30 +4,19 @@
4
4
  @description:
5
5
  """
6
6
  from agentica.version import __version__ # noqa, isort: skip
7
- from agentica.config import AGENTICA_DOTENV_PATH, SMART_LLM, FAST_LLM # noqa, isort: skip
8
- # document
9
- from agentica.document import Document
10
- # vectordb
11
- from agentica.vectordb.base import VectorDb
12
- from agentica.vectordb.memorydb import MemoryDb
13
- # emb
14
- from agentica.emb.base import Emb
15
- from agentica.emb.openai_emb import OpenAIEmb
16
- from agentica.emb.azure_emb import AzureOpenAIEmb
17
- from agentica.emb.hash_emb import HashEmb
18
- from agentica.emb.together_emb import TogetherEmb
19
- from agentica.file.base import File
20
- from agentica.file.csv import CsvFile
21
- from agentica.file.txt import TextFile
7
+ from agentica.config import AGENTICA_HOME, AGENTICA_DOTENV_PATH, SMART_LLM, FAST_LLM # noqa, isort: skip
22
8
 
23
- from agentica.knowledge.knowledge_base import KnowledgeBase
24
9
  # llm
25
10
  from agentica.llm.openai_llm import OpenAILLM
26
- from agentica.llm.azure_llm import AzureOpenAILLM
27
- from agentica.llm.together_llm import TogetherLLM
11
+ from agentica.llm.azure_openai_llm import AzureOpenAILLM
12
+ from agentica.llm.togetherllm import TogetherLLM
28
13
  from agentica.llm.deepseek_llm import DeepseekLLM
29
14
  from agentica.llm.moonshot_llm import MoonshotLLM
30
- from agentica.task import Task
15
+ from agentica.llm.ollama_llm import OllamaLLM
16
+ from agentica.llm.ollama_tools_llm import OllamaToolsLLM
17
+ from agentica.llm.claude_llm import ClaudeLLM
18
+
19
+ # memory
31
20
  from agentica.message import Message
32
21
  from agentica.memory import (
33
22
  Memory,
@@ -40,14 +29,37 @@ from agentica.memory import (
40
29
  MemoryClassifier,
41
30
  MemoryManager
42
31
  )
43
-
32
+ from agentica.template import PromptTemplate
33
+ # rag
34
+ from agentica.knowledge.knowledge_base import KnowledgeBase
44
35
  from agentica.references import References
45
36
  from agentica.run_record import RunRecord
37
+ from agentica.document import Document
38
+ # vectordb
39
+ from agentica.vectordb.base import VectorDb
40
+ from agentica.vectordb.memory_vectordb import MemoryVectorDb
41
+ # emb
42
+ from agentica.emb.base import Emb
43
+ from agentica.emb.openai_emb import OpenAIEmb
44
+ from agentica.emb.azure_openai_emb import AzureOpenAIEmb
45
+ from agentica.emb.hash_emb import HashEmb
46
+ from agentica.emb.ollama_emb import OllamaEmb
47
+ from agentica.emb.together_emb import TogetherEmb
48
+ from agentica.emb.fireworks_emb import FireworksEmb
49
+ from agentica.emb.text2vec_emb import Text2VecEmb
50
+ from agentica.emb.word2vec_emb import Word2VecEmb
51
+
52
+ # file
53
+ from agentica.file.base import File
54
+ from agentica.file.csv import CsvFile
55
+ from agentica.file.txt import TextFile
56
+
46
57
  # storage
47
- from agentica.pg_storage import PgStorage
48
- from agentica.sqlite_storage import SqliteStorage
58
+ from agentica.storage.base import AssistantStorage
59
+ from agentica.storage.pg_storage import PgAssistantStorage
60
+ from agentica.storage.sqlite_storage import SqlAssistantStorage
49
61
  # tool
50
- from agentica.tool import Tool, Toolkit, Function, FunctionCall
62
+ from agentica.tools.base import Tool, Toolkit, Function, FunctionCall
51
63
  # assistant
52
64
  from agentica.assistant import Assistant
53
65
  from agentica.python_assistant import PythonAssistant
@@ -25,7 +25,7 @@ from typing import (
25
25
  AsyncIterator,
26
26
  )
27
27
  from uuid import uuid4
28
-
28
+ from pathlib import Path
29
29
  from pydantic import BaseModel, ConfigDict, field_validator, ValidationError
30
30
 
31
31
  from agentica.document import Document
@@ -36,10 +36,11 @@ from agentica.memory import AssistantMemory, Memory
36
36
  from agentica.message import Message
37
37
  from agentica.references import References
38
38
  from agentica.run_record import RunRecord
39
- from agentica.sqlite_storage import SqliteStorage
40
- from agentica.tool import Tool, Toolkit, Function
39
+ from agentica.storage.base import AssistantStorage
40
+ from agentica.tools.base import Tool, Toolkit, Function
41
41
  from agentica.utils.log import logger, set_log_level_to_debug, print_llm_stream
42
42
  from agentica.utils.misc import merge_dictionaries, remove_indent
43
+ from agentica.template import PromptTemplate
43
44
  from agentica.utils.timer import Timer
44
45
 
45
46
 
@@ -80,6 +81,8 @@ class Assistant(BaseModel):
80
81
  create_memories: bool = False
81
82
  # Update memory after each run
82
83
  update_memory_after_run: bool = True
84
+ # Force update memory after each run
85
+ force_update_memory_after_run: bool = False
83
86
 
84
87
  # -*- Assistant Knowledge Base
85
88
  knowledge_base: Optional[KnowledgeBase] = None
@@ -87,7 +90,7 @@ class Assistant(BaseModel):
87
90
  add_references_to_prompt: bool = False
88
91
 
89
92
  # -*- Assistant Storage
90
- storage: Optional[SqliteStorage] = None
93
+ storage: Optional[AssistantStorage] = None
91
94
  # RunRecord from the database: DO NOT SET MANUALLY
92
95
  db_row: Optional[RunRecord] = None
93
96
  # -*- Assistant Tools
@@ -130,6 +133,8 @@ class Assistant(BaseModel):
130
133
  #
131
134
  # -*- System prompt: provide the system prompt as a string
132
135
  system_prompt: Optional[str] = None
136
+ # -*- System prompt template: provide the system prompt as a PromptTemplate
137
+ system_prompt_template: Optional[PromptTemplate] = None
133
138
  # If True, build a default system prompt using instructions and extra_instructions
134
139
  build_default_system_prompt: bool = True
135
140
  # -*- Settings for building the default system prompt
@@ -147,7 +152,7 @@ class Assistant(BaseModel):
147
152
  add_to_system_prompt: Optional[str] = None
148
153
  # If True, add instructions for using the knowledge base to the system prompt if knowledge base is provided
149
154
  add_knowledge_base_instructions: bool = True
150
- # If True, add instructions to return "I dont know" when the assistant does not know the answer.
155
+ # If True, add instructions to return "I don't know" when the assistant does not know the answer.
151
156
  prevent_hallucinations: bool = False
152
157
  # If True, add instructions to prevent prompt injection attacks
153
158
  prevent_prompt_injection: bool = False
@@ -162,6 +167,8 @@ class Assistant(BaseModel):
162
167
  # -*- User prompt: provide the user prompt as a string
163
168
  # Note: this will ignore the message sent to the run function
164
169
  user_prompt: Optional[Union[List, Dict, str]] = None
170
+ # -*- User prompt template: provide the user prompt as a PromptTemplate
171
+ user_prompt_template: Optional[PromptTemplate] = None
165
172
  # If True, build a default user prompt using references and chat history
166
173
  build_default_user_prompt: bool = True
167
174
  # Function to get references for the user_prompt
@@ -187,6 +194,8 @@ class Assistant(BaseModel):
187
194
  output: Optional[Any] = None
188
195
  # Save the output to output_dir
189
196
  output_dir: Optional[str] = "outputs"
197
+ # Save llm messages to output_dir
198
+ save_llm_messages: bool = False
190
199
  # Save the output to a file with this name, if provided save the output to a file
191
200
  output_file_name: Optional[str] = None
192
201
 
@@ -271,7 +280,9 @@ class Assistant(BaseModel):
271
280
  if self.llm is None:
272
281
  logger.debug("LLM not set. Using OpenAILLM")
273
282
  self.llm = OpenAILLM()
274
- logger.debug(f"Using LLM: {self.llm}")
283
+ logger.info(f"Using LLM: {self.llm}")
284
+ else:
285
+ logger.debug(f"Using LLM: {self.llm}")
275
286
 
276
287
  # Set response_format if it is not set on the llm
277
288
  if self.output_model is not None and self.llm.response_format is None:
@@ -289,7 +300,10 @@ class Assistant(BaseModel):
289
300
  self.llm.add_tool(self.get_tool_call_history)
290
301
  if self.create_memories:
291
302
  self.llm.add_tool(self.update_memory)
303
+ if self.force_update_memory_after_run:
304
+ self.update_memory_after_run = True
292
305
  if self.knowledge_base is not None:
306
+ self.add_references_to_prompt = True
293
307
  if self.search_knowledge:
294
308
  self.llm.add_tool(self.search_knowledge_base)
295
309
  if self.update_knowledge:
@@ -555,7 +569,13 @@ class Assistant(BaseModel):
555
569
  sys_prompt += f"\n{self.get_json_output_prompt()}"
556
570
  return sys_prompt
557
571
  return self.system_prompt
558
-
572
+ # If the system_prompt_template is set, build the system_prompt using the template
573
+ if self.system_prompt_template is not None:
574
+ system_prompt_kwargs = {"assistant": self}
575
+ system_prompt_from_template = self.system_prompt_template.get_prompt(**system_prompt_kwargs)
576
+ if system_prompt_from_template is not None and self.output_model is not None:
577
+ system_prompt_from_template += f"\n{self.get_json_output_prompt()}"
578
+ return system_prompt_from_template
559
579
  # If build_default_system_prompt is False, return None
560
580
  if not self.build_default_system_prompt:
561
581
  return None
@@ -564,7 +584,7 @@ class Assistant(BaseModel):
564
584
  raise Exception("LLM not set")
565
585
 
566
586
  # -*- Build a list of instructions for the Assistant
567
- instructions = self.instructions.copy() if self.instructions is not None else []
587
+ instructions = self.instructions.copy() if self.instructions is not None else None
568
588
  # Add default instructions
569
589
  if instructions is None:
570
590
  instructions = []
@@ -716,12 +736,12 @@ class Assistant(BaseModel):
716
736
 
717
737
  if self.chat_history_function is not None:
718
738
  chat_history_kwargs = {"conversation": self}
719
- return self.chat_history_function(**chat_history_kwargs)
739
+ return remove_indent(self.chat_history_function(**chat_history_kwargs))
720
740
 
721
741
  formatted_history = self.memory.get_formatted_chat_history(num_messages=self.num_history_messages)
722
742
  if formatted_history == "":
723
743
  return None
724
- return formatted_history
744
+ return remove_indent(formatted_history)
725
745
 
726
746
  def get_user_prompt(
727
747
  self,
@@ -735,6 +755,16 @@ class Assistant(BaseModel):
735
755
  # Note: this ignores the message provided to the run function
736
756
  if self.user_prompt is not None:
737
757
  return self.user_prompt
758
+ # If the user_prompt_template is set, return the user_prompt from the template
759
+ if self.user_prompt_template is not None:
760
+ user_prompt_kwargs = {
761
+ "assistant": self,
762
+ "message": message,
763
+ "references": references,
764
+ "chat_history": chat_history,
765
+ }
766
+ _user_prompt_from_template = self.user_prompt_template.get_prompt(**user_prompt_kwargs)
767
+ return _user_prompt_from_template
738
768
 
739
769
  if message is None:
740
770
  return None
@@ -875,8 +905,12 @@ class Assistant(BaseModel):
875
905
  if user_message is not None:
876
906
  self.memory.add_chat_message(message=user_message)
877
907
  # Update the memory with the user message if needed
908
+ memory_content = f"user:{user_message.get_content_string()}\nassistant:{llm_response}"
878
909
  if self.create_memories and self.update_memory_after_run:
879
- self.memory.update_memory(input_text=user_message.get_content_string())
910
+ if self.force_update_memory_after_run:
911
+ self.memory.update_memory(input_text=memory_content, force=True)
912
+ else:
913
+ self.memory.update_memory(input_text=memory_content)
880
914
 
881
915
  # Build the LLM response message to add to the memory - this is added to the chat_history
882
916
  llm_response_message = Message(role="assistant", content=llm_response)
@@ -895,24 +929,28 @@ class Assistant(BaseModel):
895
929
  self.write_to_storage()
896
930
 
897
931
  # Save llm_messages to file
898
- try:
899
- os.makedirs(self.output_dir, exist_ok=True)
900
- save_file = os.path.join(self.output_dir, f"output_{self.run_id}.json")
901
- messages_str = json.dumps(
902
- [i.dict() for i in self.memory.llm_messages], indent=2, ensure_ascii=False
903
- )
904
- with open(save_file, "w", encoding='utf-8') as f:
905
- f.write(messages_str)
906
- logger.info(f"Saved messages to file: {save_file}")
907
- except Exception as e:
908
- logger.warning(f"Failed to save output to file: {e}")
932
+ if self.save_llm_messages:
933
+ try:
934
+ os.makedirs(self.output_dir, exist_ok=True)
935
+ save_file = os.path.join(self.output_dir, f"output_{self.run_id}.json")
936
+ messages_str = json.dumps(
937
+ [i.dict() for i in self.memory.llm_messages], indent=2, ensure_ascii=False
938
+ )
939
+ with open(save_file, "w", encoding='utf-8') as f:
940
+ f.write(messages_str)
941
+ logger.info(f"Saved messages to file: {save_file}")
942
+ except Exception as e:
943
+ logger.warning(f"Failed to save output to file: {e}")
909
944
 
910
945
  # Save output_file_name file
911
946
  if self.output_file_name:
912
947
  try:
948
+ os.makedirs(self.output_dir, exist_ok=True)
913
949
  save_file = os.path.join(self.output_dir, self.output_file_name)
914
- with open(save_file, "w", encoding='utf-8') as f:
915
- f.write(self.output)
950
+ fn_path = Path(save_file)
951
+ if not fn_path.parent.exists():
952
+ fn_path.parent.mkdir(parents=True, exist_ok=True)
953
+ fn_path.write_text(self.output)
916
954
  logger.info(f"Saved output to file: {save_file}")
917
955
  except Exception as e:
918
956
  logger.warning(f"Failed to save output to file: {e}")
@@ -1064,8 +1102,12 @@ class Assistant(BaseModel):
1064
1102
  if user_message is not None:
1065
1103
  self.memory.add_chat_message(message=user_message)
1066
1104
  # Update the memory with the user message if needed
1105
+ memory_content = f"user:{user_message.get_content_string()}\nassistant:{llm_response}"
1067
1106
  if self.update_memory_after_run:
1068
- self.memory.update_memory(input_text=user_message.get_content_string())
1107
+ if self.force_update_memory_after_run:
1108
+ self.memory.update_memory(input_text=memory_content, force=True)
1109
+ else:
1110
+ self.memory.update_memory(input_text=memory_content)
1069
1111
 
1070
1112
  # Build the LLM response message to add to the memory - this is added to the chat_history
1071
1113
  llm_response_message = Message(role="assistant", content=llm_response)
@@ -0,0 +1,29 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ @author:XuMing(xuming624@qq.com)
4
+ @description:
5
+ """
6
+ import os
7
+ from datetime import datetime
8
+ from dotenv import load_dotenv # noqa
9
+ from loguru import logger # noqa, need to import logger here to avoid circular import
10
+
11
+ AGENTICA_HOME = os.getenv("AGENTICA_HOME", os.path.expanduser("~/.agentica"))
12
+
13
+ # Load environment variables from .env file
14
+ AGENTICA_DOTENV_PATH = os.getenv("AGENTICA_DOTENV_PATH", f"{AGENTICA_HOME}/.env")
15
+
16
+ if load_dotenv(AGENTICA_DOTENV_PATH, override=True):
17
+ logger.info(f"Loaded AGENTICA_DOTENV_PATH: {AGENTICA_DOTENV_PATH}")
18
+
19
+ AGENTICA_DATA_DIR = os.getenv("AGENTICA_DATA_DIR", f"{AGENTICA_HOME}/data")
20
+ AGENTICA_LOG_LEVEL = os.getenv("AGENTICA_LOG_LEVEL", "INFO")
21
+ AGENTICA_LOG_FILE = os.getenv("AGENTICA_LOG_FILE")
22
+ if AGENTICA_LOG_LEVEL.upper() == "DEBUG":
23
+ formatted_date = datetime.now().strftime("%Y%m%d")
24
+ default_log_file = f"{AGENTICA_HOME}/logs/{formatted_date}.log"
25
+ AGENTICA_LOG_FILE = os.getenv("AGENTICA_LOG_FILE", default_log_file)
26
+ logger.debug(f"AGENTICA_LOG_LEVEL: DEBUG, AGENTICA_LOG_FILE: {AGENTICA_LOG_FILE}")
27
+
28
+ SMART_LLM = os.getenv("SMART_LLM")
29
+ FAST_LLM = os.getenv("FAST_LLM")
@@ -35,6 +35,7 @@ class Document(BaseModel):
35
35
  self.embedding, self.usage = _embedder.get_embedding_and_usage(self.content)
36
36
  else:
37
37
  self.embedding = _embedder.get_embedding(self.content)
38
+ self.usage = None
38
39
 
39
40
  def to_dict(self) -> Dict[str, Any]:
40
41
  """Returns a dictionary representation of the document"""
@@ -5,11 +5,10 @@
5
5
  part of the code from https://github.com/phidatahq/phidata
6
6
  """
7
7
  from os import getenv
8
- from typing import Optional, Dict, List, Tuple, Any
8
+ from typing import Optional, Dict, List, Tuple, Any, Literal
9
9
 
10
10
  from openai import AzureOpenAI as AzureOpenAIClient
11
11
  from openai.types.create_embedding_response import CreateEmbeddingResponse
12
- from typing_extensions import Literal
13
12
 
14
13
  from agentica.emb.base import Emb
15
14
  from agentica.utils.log import logger
@@ -6,12 +6,13 @@ from pydantic import BaseModel, ConfigDict
6
6
  class Emb(BaseModel):
7
7
  """Base class for managing embedders"""
8
8
 
9
- dimensions: int = 1536
9
+ dimensions: Optional[int] = 1536
10
10
 
11
11
  model_config = ConfigDict(arbitrary_types_allowed=True)
12
12
 
13
13
  def get_embedding(self, text: str) -> List[float]:
14
14
  raise NotImplementedError
15
15
 
16
- def get_embeddings(self, texts: List[str]) -> List[List[float]]:
17
- raise NotImplementedError
16
+ def get_embedding_and_usage(self, text: str) -> Tuple[List[float], Optional[Dict]]:
17
+ embedding = self.get_embedding(text)
18
+ return embedding, None
@@ -0,0 +1,16 @@
1
+ """
2
+ @author:XuMing(xuming624@qq.com)
3
+ @description:
4
+ part of the code from https://github.com/phidatahq/phidata
5
+ """
6
+ from os import getenv
7
+ from typing import Optional
8
+
9
+ from agentica.emb.openai_emb import OpenAIEmb
10
+
11
+
12
+ class FireworksEmb(OpenAIEmb):
13
+ model: str = "nomic-ai/nomic-embed-text-v1.5"
14
+ dimensions: int = 768
15
+ api_key: Optional[str] = getenv("FIREWORKS_API_KEY")
16
+ base_url: str = "https://api.fireworks.ai/inference/v1"
@@ -0,0 +1,60 @@
1
+ """
2
+ @author:XuMing(xuming624@qq.com)
3
+ @description:
4
+ part of the code from https://github.com/phidatahq/phidata
5
+ """
6
+ from typing import Optional, Dict, List, Tuple, Any, Union
7
+
8
+ from agentica.emb.base import Emb
9
+ from agentica.utils.log import logger
10
+
11
+ try:
12
+ import google.generativeai as genai
13
+ from google.generativeai.types.text_types import EmbeddingDict, BatchEmbeddingDict
14
+ except ImportError:
15
+ raise ImportError("`google-generativeai` not installed. Please install it using `pip install google-generativeai`")
16
+
17
+
18
+ class GeminiEmb(Emb):
19
+ model: str = "models/embedding-001"
20
+ task_type: str = "RETRIEVAL_QUERY"
21
+ title: Optional[str] = None
22
+ dimensions: Optional[int] = None
23
+ api_key: Optional[str] = None
24
+ request_params: Optional[Dict[str, Any]] = None
25
+ client_params: Optional[Dict[str, Any]] = None
26
+ gemini_client: Optional[genai.embed_content] = None
27
+
28
+ @property
29
+ def client(self):
30
+ if self.gemini_client:
31
+ return self.gemini_client
32
+ _client_params: Dict[str, Any] = {}
33
+ if self.api_key:
34
+ _client_params["api_key"] = self.api_key
35
+ if self.client_params:
36
+ _client_params.update(self.client_params)
37
+ self.gemini_client = genai
38
+ self.gemini_client.configure(**_client_params)
39
+ return self.gemini_client
40
+
41
+ def _response(self, text: str) -> Union[EmbeddingDict, BatchEmbeddingDict]:
42
+ _request_params: Dict[str, Any] = {
43
+ "content": text,
44
+ "model": self.model,
45
+ "output_dimensionality": self.dimensions,
46
+ "task_type": self.task_type,
47
+ "title": self.title,
48
+ }
49
+ if self.request_params:
50
+ _request_params.update(self.request_params)
51
+ return self.client.embed_content(**_request_params)
52
+
53
+ def get_embedding(self, text: str) -> List[float]:
54
+ response = self._response(text=text)
55
+ try:
56
+ return response.get("embedding", [])
57
+ except Exception as e:
58
+ logger.warning(e)
59
+ return []
60
+
@@ -0,0 +1,53 @@
1
+ """
2
+ @author:XuMing(xuming624@qq.com)
3
+ @description:
4
+ part of the code from https://github.com/phidatahq/phidata
5
+ """
6
+ from typing import Optional, Dict, List, Tuple, Any, Union
7
+
8
+ from agentica.emb.base import Emb
9
+ from agentica.utils.log import logger
10
+
11
+ try:
12
+ from huggingface_hub import InferenceClient, SentenceSimilarityInput
13
+ except ImportError:
14
+ raise ImportError("`huggingface-hub` not installed, please run `pip install huggingface-hub`")
15
+
16
+
17
+ class HuggingfaceEmb(Emb):
18
+ """Huggingface Custom Embedder"""
19
+
20
+ model: str = "jinaai/jina-embeddings-v2-base-code"
21
+ api_key: Optional[str] = None
22
+ client_params: Optional[Dict[str, Any]] = None
23
+ huggingface_client: Optional[InferenceClient] = None
24
+
25
+ @property
26
+ def client(self) -> InferenceClient:
27
+ if self.huggingface_client:
28
+ return self.huggingface_client
29
+ _client_params: Dict[str, Any] = {}
30
+ if self.api_key:
31
+ _client_params["api_key"] = self.api_key
32
+ if self.client_params:
33
+ _client_params.update(self.client_params)
34
+ self.huggingface_client = InferenceClient(**_client_params)
35
+ return self.huggingface_client
36
+
37
+ def _response(self, text: str):
38
+ _request_params: SentenceSimilarityInput = {
39
+ "json": {"inputs": text},
40
+ "model": self.model,
41
+ }
42
+ return self.client.post(**_request_params)
43
+
44
+ def get_embedding(self, text: str) -> List[float]:
45
+ resp = []
46
+ try:
47
+ resp = self._response(text=text)
48
+ except Exception as e:
49
+ logger.warning(e)
50
+ return resp
51
+
52
+ def get_embedding_and_usage(self, text: str) -> Tuple[List[float], Optional[Dict]]:
53
+ return super().get_embedding_and_usage(text)
@@ -4,16 +4,11 @@
4
4
  @description:
5
5
  part of the code from https://github.com/phidatahq/phidata
6
6
  """
7
- from typing import Optional, Dict, List, Any
7
+ from typing import Optional, Dict, List, Any, Tuple
8
8
 
9
9
  from agentica.emb.base import Emb
10
10
  from agentica.utils.log import logger
11
11
 
12
- try:
13
- from ollama import Client as OllamaClient
14
- except ImportError:
15
- raise ImportError("`ollama` not installed, please install it via `pip install ollama`")
16
-
17
12
 
18
13
  class OllamaEmb(Emb):
19
14
  model: str = "quentinz/bge-base-zh-v1.5"
@@ -22,10 +17,15 @@ class OllamaEmb(Emb):
22
17
  timeout: Optional[Any] = None
23
18
  options: Optional[Any] = None
24
19
  client_kwargs: Optional[Dict[str, Any]] = None
25
- ollama_client: Optional[OllamaClient] = None
20
+ ollama_client: Optional[Any] = None
26
21
 
27
22
  @property
28
- def client(self) -> OllamaClient:
23
+ def client(self) -> Any:
24
+ try:
25
+ from ollama import Client as OllamaClient
26
+ except ImportError:
27
+ raise ImportError("`ollama` not installed, please run `pip install ollama`")
28
+
29
29
  if self.ollama_client:
30
30
  return self.ollama_client
31
31
 
@@ -5,11 +5,10 @@
5
5
  part of the code from https://github.com/phidatahq/phidata
6
6
  """
7
7
  from os import getenv
8
- from typing import Optional, Dict, List, Tuple, Any
8
+ from typing import Optional, Dict, List, Tuple, Any, Literal
9
9
 
10
10
  from openai import OpenAI as OpenAIClient
11
11
  from openai.types.create_embedding_response import CreateEmbeddingResponse
12
- from typing_extensions import Literal
13
12
 
14
13
  from agentica.emb.base import Emb
15
14
  from agentica.utils.log import logger
@@ -41,7 +40,8 @@ class OpenAIEmb(Emb):
41
40
  _client_params["base_url"] = self.base_url
42
41
  if self.client_params:
43
42
  _client_params.update(self.client_params)
44
- return OpenAIClient(**_client_params)
43
+ self.openai_client = OpenAIClient(**_client_params)
44
+ return self.openai_client
45
45
 
46
46
  def _response(self, text: str) -> CreateEmbeddingResponse:
47
47
  _request_params: Dict[str, Any] = {
@@ -70,7 +70,9 @@ class OpenAIEmb(Emb):
70
70
 
71
71
  embedding = response.data[0].embedding
72
72
  usage = response.usage
73
- return embedding, usage.model_dump()
73
+ if usage:
74
+ return embedding, usage.model_dump()
75
+ return embedding, None
74
76
 
75
77
  def get_embeddings(self, texts: List[str]) -> List[List[float]]:
76
- return [self.get_embedding(text) for text in texts]
78
+ return [self.get_embedding(text) for text in texts]