versionhq 1.1.10.8__py3-none-any.whl → 1.1.10.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/_utils/vars.py +2 -0
- versionhq/agent/TEMPLATES/Backstory.py +2 -2
- versionhq/agent/default_agents.py +10 -0
- versionhq/agent/model.py +127 -39
- versionhq/agent/parser.py +3 -20
- versionhq/{_utils → agent}/rpm_controller.py +22 -15
- versionhq/knowledge/__init__.py +0 -0
- versionhq/knowledge/_utils.py +11 -0
- versionhq/knowledge/embedding.py +192 -0
- versionhq/knowledge/model.py +54 -0
- versionhq/knowledge/source.py +413 -0
- versionhq/knowledge/source_docling.py +129 -0
- versionhq/knowledge/storage.py +177 -0
- versionhq/llm/model.py +76 -62
- versionhq/memory/__init__.py +0 -0
- versionhq/memory/contextual_memory.py +96 -0
- versionhq/memory/model.py +174 -0
- versionhq/storage/base.py +14 -0
- versionhq/storage/ltm_sqlite_storage.py +131 -0
- versionhq/storage/mem0_storage.py +109 -0
- versionhq/storage/rag_storage.py +231 -0
- versionhq/storage/task_output_storage.py +18 -29
- versionhq/storage/utils.py +26 -0
- versionhq/task/TEMPLATES/Description.py +5 -0
- versionhq/task/evaluate.py +122 -0
- versionhq/task/model.py +134 -43
- versionhq/team/team_planner.py +1 -1
- versionhq/tool/model.py +44 -46
- {versionhq-1.1.10.8.dist-info → versionhq-1.1.10.9.dist-info}/METADATA +44 -34
- versionhq-1.1.10.9.dist-info/RECORD +64 -0
- versionhq-1.1.10.8.dist-info/RECORD +0 -45
- {versionhq-1.1.10.8.dist-info → versionhq-1.1.10.9.dist-info}/LICENSE +0 -0
- {versionhq-1.1.10.8.dist-info → versionhq-1.1.10.9.dist-info}/WHEEL +0 -0
- {versionhq-1.1.10.8.dist-info → versionhq-1.1.10.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,177 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import Any, Dict, List, Optional
|
3
|
+
|
4
|
+
import contextlib
|
5
|
+
import hashlib
|
6
|
+
import io
|
7
|
+
import logging
|
8
|
+
import os
|
9
|
+
import shutil
|
10
|
+
|
11
|
+
import chromadb
|
12
|
+
import chromadb.errors
|
13
|
+
from chromadb.api import ClientAPI
|
14
|
+
from chromadb.api.types import OneOrMany
|
15
|
+
from chromadb.config import Settings
|
16
|
+
|
17
|
+
from versionhq._utils.logger import Logger
|
18
|
+
from versionhq._utils.vars import KNOWLEDGE_DIRECTORY
|
19
|
+
from versionhq.storage.utils import fetch_db_storage_path
|
20
|
+
from versionhq.knowledge.embedding import EmbeddingConfigurator
|
21
|
+
|
22
|
+
|
23
|
+
@contextlib.contextmanager
|
24
|
+
def suppress_logging(
|
25
|
+
logger_name="chromadb.segment.impl.vector.local_persistent_hnsw",
|
26
|
+
level=logging.ERROR,
|
27
|
+
):
|
28
|
+
logger = logging.getLogger(logger_name)
|
29
|
+
original_level = logger.getEffectiveLevel()
|
30
|
+
logger.setLevel(level)
|
31
|
+
with (
|
32
|
+
contextlib.redirect_stdout(io.StringIO()),
|
33
|
+
contextlib.redirect_stderr(io.StringIO()),
|
34
|
+
contextlib.suppress(UserWarning),
|
35
|
+
):
|
36
|
+
yield
|
37
|
+
logger.setLevel(original_level)
|
38
|
+
|
39
|
+
|
40
|
+
|
41
|
+
class BaseKnowledgeStorage(ABC):
|
42
|
+
"""
|
43
|
+
Abstract base class for knowledge storage implementations.
|
44
|
+
"""
|
45
|
+
|
46
|
+
@abstractmethod
|
47
|
+
def search(self, query: List[str], limit: int = 3, filter: Optional[dict] = None, score_threshold: float = 0.35) -> List[Dict[str, Any]]:
|
48
|
+
"""Search for documents in the knowledge base."""
|
49
|
+
pass
|
50
|
+
|
51
|
+
@abstractmethod
|
52
|
+
def save(self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]) -> None:
|
53
|
+
"""Save documents to the knowledge base."""
|
54
|
+
pass
|
55
|
+
|
56
|
+
@abstractmethod
|
57
|
+
def reset(self) -> None:
|
58
|
+
"""Reset the knowledge base."""
|
59
|
+
pass
|
60
|
+
|
61
|
+
|
62
|
+
|
63
|
+
class KnowledgeStorage(BaseKnowledgeStorage):
|
64
|
+
"""
|
65
|
+
Extends Storage to handle embeddings for memory entries, improving search efficiency.
|
66
|
+
"""
|
67
|
+
|
68
|
+
collection: Optional[chromadb.Collection] = None
|
69
|
+
collection_name: Optional[str] = "knowledge"
|
70
|
+
app: Optional[ClientAPI] = None
|
71
|
+
|
72
|
+
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None, collection_name: Optional[str] = None):
|
73
|
+
self.collection_name = collection_name
|
74
|
+
self._set_embedder_config(embedder_config)
|
75
|
+
|
76
|
+
def search(self, query: List[str], limit: int = 3, filter: Optional[dict] = None, score_threshold: float = 0.35) -> List[Dict[str, Any]]:
|
77
|
+
with suppress_logging():
|
78
|
+
if self.collection:
|
79
|
+
fetched = self.collection.query(query_texts=query, n_results=limit, where=filter)
|
80
|
+
results = []
|
81
|
+
for i in range(len(fetched["ids"][0])):
|
82
|
+
result = {
|
83
|
+
"id": fetched["ids"][0][i],
|
84
|
+
"metadata": fetched["metadatas"][0][i],
|
85
|
+
"context": fetched["documents"][0][i],
|
86
|
+
"score": fetched["distances"][0][i],
|
87
|
+
}
|
88
|
+
if result["score"] >= score_threshold:
|
89
|
+
results.append(result)
|
90
|
+
return results
|
91
|
+
else:
|
92
|
+
raise Exception("Collection not initialized")
|
93
|
+
|
94
|
+
|
95
|
+
def initialize_knowledge_storage(self):
|
96
|
+
base_path = os.path.join(fetch_db_storage_path(), "knowledge")
|
97
|
+
chroma_client = chromadb.PersistentClient(path=base_path, settings=Settings(allow_reset=True))
|
98
|
+
self.app = chroma_client
|
99
|
+
|
100
|
+
try:
|
101
|
+
collection_name = f"knowledge_{self.collection_name}" if self.collection_name else "knowledge"
|
102
|
+
if self.app:
|
103
|
+
self.collection = self.app.get_or_create_collection(name=collection_name, embedding_function=self.embedder_config)
|
104
|
+
else:
|
105
|
+
raise Exception("Vector Database Client not initialized")
|
106
|
+
except Exception:
|
107
|
+
raise Exception("Failed to create or get collection")
|
108
|
+
|
109
|
+
|
110
|
+
def reset(self):
|
111
|
+
base_path = os.path.join(fetch_db_storage_path(), KNOWLEDGE_DIRECTORY)
|
112
|
+
if not self.app:
|
113
|
+
self.app = chromadb.PersistentClient(path=base_path, settings=Settings(allow_reset=True))
|
114
|
+
self.app.reset()
|
115
|
+
shutil.rmtree(base_path)
|
116
|
+
self.app = None
|
117
|
+
self.collection = None
|
118
|
+
|
119
|
+
|
120
|
+
def save(self, documents: List[str], metadata: Optional[Dict[str, Any] | List[Dict[str, Any]]] = None) -> None:
|
121
|
+
if not self.collection:
|
122
|
+
raise Exception("Collection not initialized")
|
123
|
+
|
124
|
+
try:
|
125
|
+
unique_docs = {}
|
126
|
+
for i, doc in enumerate(documents):
|
127
|
+
doc_id = hashlib.sha256(doc.encode("utf-8")).hexdigest()
|
128
|
+
doc_metadata = None
|
129
|
+
if metadata is not None:
|
130
|
+
if isinstance(metadata, list):
|
131
|
+
doc_metadata = metadata[i]
|
132
|
+
else:
|
133
|
+
doc_metadata = metadata
|
134
|
+
unique_docs[doc_id] = (doc, doc_metadata)
|
135
|
+
|
136
|
+
filtered_docs = []
|
137
|
+
filtered_metadata = []
|
138
|
+
filtered_ids = []
|
139
|
+
|
140
|
+
for doc_id, (doc, meta) in unique_docs.items():
|
141
|
+
filtered_docs.append(doc)
|
142
|
+
filtered_metadata.append(meta)
|
143
|
+
filtered_ids.append(doc_id)
|
144
|
+
|
145
|
+
final_metadata: Optional[OneOrMany[chromadb.Metadata]] = (
|
146
|
+
None if all(m is None for m in filtered_metadata) else filtered_metadata
|
147
|
+
)
|
148
|
+
self.collection.upsert(documents=filtered_docs, metadatas=final_metadata, ids=filtered_ids)
|
149
|
+
|
150
|
+
except chromadb.errors.InvalidDimensionException as e:
|
151
|
+
Logger(verbose=True).log(
|
152
|
+
level="error",
|
153
|
+
message="Embedding dimension mismatch. This usually happens when mixing different embedding models.",
|
154
|
+
color="red",
|
155
|
+
)
|
156
|
+
raise ValueError("Embedding dimension mismatch. Make sure you're using the same embedding model across all operations with this collection.") from e
|
157
|
+
|
158
|
+
except Exception as e:
|
159
|
+
Logger(verbose=True).log(level="error", message=f"Failed to upsert documents: {str(e)}", color="red")
|
160
|
+
raise
|
161
|
+
|
162
|
+
|
163
|
+
def _create_default_embedding_function(self) -> Any:
|
164
|
+
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
165
|
+
OpenAIEmbeddingFunction,
|
166
|
+
)
|
167
|
+
|
168
|
+
return OpenAIEmbeddingFunction(
|
169
|
+
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
170
|
+
)
|
171
|
+
|
172
|
+
|
173
|
+
def _set_embedder_config(self, embedder_config: Optional[Dict[str, Any]] = None) -> None:
|
174
|
+
"""
|
175
|
+
Set the embedding configuration for the knowledge storage.
|
176
|
+
"""
|
177
|
+
self.embedder_config = EmbeddingConfigurator().configure_embedder(embedder_config) if embedder_config else self._create_default_embedding_function()
|
versionhq/llm/model.py
CHANGED
@@ -82,6 +82,7 @@ class LLM(BaseModel):
|
|
82
82
|
|
83
83
|
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
84
84
|
_init_model_name: str = PrivateAttr(default=None)
|
85
|
+
_tokens: int = PrivateAttr(default=0) # accumulate total tokens used for the call
|
85
86
|
model_config = ConfigDict(extra="allow")
|
86
87
|
|
87
88
|
model: str = Field(default=DEFAULT_MODEL_NAME)
|
@@ -136,6 +137,7 @@ class LLM(BaseModel):
|
|
136
137
|
|
137
138
|
self._init_model_name = self.model
|
138
139
|
self.model = None
|
140
|
+
self._tokens = 0
|
139
141
|
|
140
142
|
if self.provider and MODELS.get(self.provider):
|
141
143
|
provider_model_list = MODELS.get(self.provider)
|
@@ -147,7 +149,7 @@ class LLM(BaseModel):
|
|
147
149
|
self.model = item
|
148
150
|
else:
|
149
151
|
temp_model = provider_model_list[0]
|
150
|
-
self._logger.log(level="
|
152
|
+
self._logger.log(level="warning", message=f"The provided model: {self._init_model_name} is not in the list. We'll assign a model: {temp_model} from the selected model provider: {self.provider}.", color="yellow")
|
151
153
|
self.model = temp_model
|
152
154
|
|
153
155
|
else:
|
@@ -163,7 +165,7 @@ class LLM(BaseModel):
|
|
163
165
|
self.provider = k
|
164
166
|
|
165
167
|
if self.model is None:
|
166
|
-
self._logger.log(level="
|
168
|
+
self._logger.log(level="warning", message=f"The provided model \'{self.model}\' is not in the list. We'll assign a default model.", color="yellow")
|
167
169
|
self.model = DEFAULT_MODEL_NAME
|
168
170
|
self.provider = "openai"
|
169
171
|
|
@@ -185,17 +187,36 @@ class LLM(BaseModel):
|
|
185
187
|
return self
|
186
188
|
|
187
189
|
|
190
|
+
def _create_valid_params(self, config: Dict[str, Any], provider: str = None) -> Dict[str, Any]:
|
191
|
+
params = dict()
|
192
|
+
valid_keys = list()
|
193
|
+
|
194
|
+
if not provider:
|
195
|
+
valid_keys = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(self.provider) if self.provider else PARAMS.get("litellm") + PARAMS.get("common")
|
196
|
+
else:
|
197
|
+
valid_keys = PARAMS.get("common") + PARAMS.get(self.provider)
|
198
|
+
|
199
|
+
for item in valid_keys:
|
200
|
+
if hasattr(self, item) and getattr(self, item):
|
201
|
+
params[item] = getattr(self, item)
|
202
|
+
elif item in config:
|
203
|
+
params[item] = config[item]
|
204
|
+
|
205
|
+
return params
|
206
|
+
|
207
|
+
|
188
208
|
def call(
|
189
209
|
self,
|
190
210
|
messages: List[Dict[str, str]],
|
191
211
|
response_format: Optional[Dict[str, Any]] = None,
|
192
|
-
tools: Optional[List[Tool | ToolSet |
|
212
|
+
tools: Optional[List[Tool | ToolSet | Any ]] = None,
|
193
213
|
config: Optional[Dict[str, Any]] = {}, # any other conditions to pass on to the model.
|
194
214
|
tool_res_as_final: bool = False
|
195
215
|
) -> str:
|
196
216
|
"""
|
197
217
|
Execute LLM based on the agent's params and model params.
|
198
218
|
"""
|
219
|
+
|
199
220
|
litellm.drop_params = True
|
200
221
|
|
201
222
|
with suppress_warnings():
|
@@ -203,80 +224,73 @@ class LLM(BaseModel):
|
|
203
224
|
self._set_callbacks(self.callbacks) # passed by agent
|
204
225
|
|
205
226
|
try:
|
206
|
-
if tools:
|
207
|
-
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
208
|
-
|
209
|
-
if response_format:
|
210
|
-
self.response_format = { "type": "json_object" } if tool_res_as_final else response_format
|
211
|
-
|
212
227
|
provider = self.provider if self.provider else "openai"
|
228
|
+
self.response_format = { "type": "json_object" } if tool_res_as_final == True else response_format
|
213
229
|
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
if hasattr(self, item) and getattr(self, item):
|
220
|
-
params[item] = getattr(self, item)
|
221
|
-
elif item in config:
|
222
|
-
params[item] = config[item]
|
223
|
-
else:
|
224
|
-
continue
|
225
|
-
else:
|
226
|
-
continue
|
227
|
-
|
228
|
-
res = litellm.completion(messages=messages, stream=False, **params)
|
229
|
-
|
230
|
-
if self.tools:
|
231
|
-
messages.append(res["choices"][0]["message"])
|
232
|
-
tool_calls = res["choices"][0]["message"]["tool_calls"]
|
233
|
-
tool_res = ""
|
234
|
-
|
235
|
-
for item in tool_calls:
|
236
|
-
func_name = item.function.name
|
237
|
-
func_args = item.function.arguments
|
238
|
-
|
239
|
-
if not isinstance(func_args, dict):
|
240
|
-
func_args = json.loads(json.dumps(eval(str(func_args))))
|
230
|
+
if not tools:
|
231
|
+
params = self._create_valid_params(config=config)
|
232
|
+
res = litellm.completion(messages=messages, stream=False, **params)
|
233
|
+
self._tokens += int(res["usage"]["total_tokens"])
|
234
|
+
return res["choices"][0]["message"]["content"]
|
241
235
|
|
242
|
-
|
243
|
-
|
244
|
-
tool_instance = tool.tool
|
245
|
-
args = tool.kwargs
|
246
|
-
res = tool_instance.run(params=args)
|
236
|
+
else:
|
237
|
+
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
247
238
|
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
239
|
+
if provider == "openai":
|
240
|
+
params = self._create_valid_params(config=config, provider=provider)
|
241
|
+
res = openai_client.chat.completions.create(messages=messages, model=self.model, tools=self.tools)
|
242
|
+
tool_calls = res.choices[0].message.tool_calls
|
243
|
+
tool_res = ""
|
244
|
+
|
245
|
+
for item in tool_calls:
|
246
|
+
func_name = item.function.name
|
247
|
+
func_args = item.function.arguments
|
248
|
+
|
249
|
+
if not isinstance(func_args, dict):
|
250
|
+
try:
|
251
|
+
func_args = json.loads(json.dumps(eval(str(func_args))))
|
252
|
+
except:
|
253
|
+
pass
|
254
|
+
|
255
|
+
for tool in tools:
|
256
|
+
if isinstance(tool, ToolSet) and (tool.tool.name == func_name or tool.tool.func.__name__ == func_name or func_name == "random_func"):
|
257
|
+
tool_instance = tool.tool
|
258
|
+
args = tool.kwargs
|
259
|
+
tool_res_to_add = tool_instance.run(params=args)
|
260
|
+
|
261
|
+
if tool_res_as_final:
|
262
|
+
tool_res += str(tool_res_to_add)
|
263
|
+
else:
|
264
|
+
messages.append(res.choices[0].message)
|
265
|
+
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
252
266
|
|
253
|
-
elif (isinstance(tool, Tool) or type(tool) == Tool) and (tool.name.replace(" ", "_") == func_name or tool.func.__name__ == func_name):
|
254
|
-
res = tool.run(params=func_args)
|
255
|
-
if tool_res_as_final:
|
256
|
-
tool_res += str(res)
|
257
267
|
else:
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
+
try:
|
269
|
+
tool_res_to_add = tool.run(params=func_args)
|
270
|
+
if tool_res_as_final:
|
271
|
+
tool_res += str(tool_res_to_add)
|
272
|
+
else:
|
273
|
+
messages.append(res.choices[0].message)
|
274
|
+
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
275
|
+
except:
|
276
|
+
pass
|
277
|
+
|
278
|
+
if tool_res_as_final:
|
279
|
+
return tool_res
|
280
|
+
else:
|
281
|
+
res = openai_client.chat.completions.create(messages=messages, model=self.model, tools=self.tools)
|
282
|
+
self._tokens += int(res["usage"]["total_tokens"])
|
283
|
+
return res.choices[0].message.content
|
268
284
|
|
269
285
|
except JSONSchemaValidationError as e:
|
270
286
|
self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
|
271
|
-
|
287
|
+
raise e
|
272
288
|
|
273
289
|
except Exception as e:
|
274
290
|
self._logger.log(level="error", message=f"{self.model} failed to execute: {str(e)}", color="red")
|
275
291
|
if "litellm.RateLimitError" in str(e):
|
276
292
|
raise e
|
277
293
|
|
278
|
-
return None
|
279
|
-
|
280
294
|
|
281
295
|
def _supports_function_calling(self) -> bool:
|
282
296
|
try:
|
File without changes
|
@@ -0,0 +1,96 @@
|
|
1
|
+
from typing import Any, Dict, Optional, List
|
2
|
+
|
3
|
+
from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
|
4
|
+
|
5
|
+
|
6
|
+
class ContextualMemory:
|
7
|
+
"""
|
8
|
+
A class to construct context from memories (ShortTermMemory, UserMemory).
|
9
|
+
The context will be added to the prompt when the agent executes the task.
|
10
|
+
"""
|
11
|
+
|
12
|
+
def __init__(
|
13
|
+
self,
|
14
|
+
memory_config: Optional[Dict[str, Any]],
|
15
|
+
stm: ShortTermMemory,
|
16
|
+
ltm: LongTermMemory,
|
17
|
+
um: UserMemory,
|
18
|
+
# em: EntityMemory,
|
19
|
+
):
|
20
|
+
self.memory_provider = memory_config.get("provider") if memory_config is not None else None
|
21
|
+
self.stm = stm
|
22
|
+
self.ltm = ltm
|
23
|
+
self.um = um
|
24
|
+
|
25
|
+
|
26
|
+
def build_context_for_task(self, task, context: List[Any] | str) -> str:
|
27
|
+
"""
|
28
|
+
Automatically builds a minimal, highly relevant set of contextual information for a given task.
|
29
|
+
"""
|
30
|
+
|
31
|
+
query = f"{task.description} {context}".strip()
|
32
|
+
|
33
|
+
if query == "":
|
34
|
+
return ""
|
35
|
+
|
36
|
+
context = []
|
37
|
+
context.append(self._fetch_stm_context(query))
|
38
|
+
if self.memory_provider == "mem0":
|
39
|
+
context.append(self._fetch_user_context(query))
|
40
|
+
return "\n".join(filter(None, context))
|
41
|
+
|
42
|
+
|
43
|
+
def _fetch_stm_context(self, query) -> str:
|
44
|
+
"""
|
45
|
+
Fetches recent relevant insights from STM related to the task's description and expected_output, formatted as bullet points.
|
46
|
+
"""
|
47
|
+
stm_results = self.stm.search(query)
|
48
|
+
formatted_results = "\n".join(
|
49
|
+
[
|
50
|
+
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
51
|
+
for result in stm_results
|
52
|
+
]
|
53
|
+
)
|
54
|
+
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
55
|
+
|
56
|
+
|
57
|
+
def _fetch_ltm_context(self, task) -> Optional[str]:
|
58
|
+
"""
|
59
|
+
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output, formatted as bullet points.
|
60
|
+
"""
|
61
|
+
ltm_results = self.ltm.search(task, latest_n=2)
|
62
|
+
if not ltm_results:
|
63
|
+
return None
|
64
|
+
|
65
|
+
formatted_results = [suggestion for result in ltm_results for suggestion in result["metadata"]["suggestions"]]
|
66
|
+
formatted_results = list(dict.fromkeys(formatted_results))
|
67
|
+
formatted_results = "\n".join([f"- {result}" for result in formatted_results])
|
68
|
+
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
|
69
|
+
|
70
|
+
|
71
|
+
def _fetch_user_context(self, query: str) -> str:
|
72
|
+
"""
|
73
|
+
Fetches and formats relevant user information from User Memory.
|
74
|
+
"""
|
75
|
+
|
76
|
+
user_memories = self.um.search(query)
|
77
|
+
if not user_memories:
|
78
|
+
return ""
|
79
|
+
|
80
|
+
formatted_memories = "\n".join(f"- {result['memory']}" for result in user_memories)
|
81
|
+
return f"User memories/preferences:\n{formatted_memories}"
|
82
|
+
|
83
|
+
|
84
|
+
# def _fetch_entity_context(self, query) -> str:
|
85
|
+
# """
|
86
|
+
# Fetches relevant entity information from Entity Memory related to the task's description and expected_output,
|
87
|
+
# formatted as bullet points.
|
88
|
+
# """
|
89
|
+
# em_results = self.em.search(query)
|
90
|
+
# formatted_results = "\n".join(
|
91
|
+
# [
|
92
|
+
# f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
93
|
+
# for result in em_results
|
94
|
+
# ] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
95
|
+
# )
|
96
|
+
# return f"Entities:\n{formatted_results}" if em_results else ""
|
@@ -0,0 +1,174 @@
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
2
|
+
|
3
|
+
from versionhq.storage.rag_storage import RAGStorage
|
4
|
+
from versionhq.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
5
|
+
|
6
|
+
|
7
|
+
class Memory:
|
8
|
+
"""
|
9
|
+
Base class for memory.
|
10
|
+
"""
|
11
|
+
|
12
|
+
def __init__(self, storage: RAGStorage):
|
13
|
+
self.storage = storage
|
14
|
+
|
15
|
+
|
16
|
+
def save(self, value: Any, metadata: Optional[Dict[str, Any]] = None, agent: Optional[str] = None) -> None:
|
17
|
+
metadata = metadata or {}
|
18
|
+
|
19
|
+
if agent:
|
20
|
+
metadata["agent"] = agent
|
21
|
+
self.storage.save(value, metadata)
|
22
|
+
|
23
|
+
|
24
|
+
def search(self, query: str, limit: int = 3, score_threshold: float = 0.35) -> List[Any]:
|
25
|
+
return self.storage.search(query=query, limit=limit, score_threshold=score_threshold)
|
26
|
+
|
27
|
+
|
28
|
+
|
29
|
+
class ShortTermMemoryItem:
|
30
|
+
def __init__(
|
31
|
+
self,
|
32
|
+
data: Any,
|
33
|
+
agent: Optional[str] = None,
|
34
|
+
metadata: Optional[Dict[str, Any]] = None,
|
35
|
+
):
|
36
|
+
self.data = data
|
37
|
+
self.agent = agent
|
38
|
+
self.metadata = metadata if metadata is not None else {}
|
39
|
+
|
40
|
+
|
41
|
+
class ShortTermMemory(Memory):
|
42
|
+
"""
|
43
|
+
A class for managing transient data related to immediate tasks and interactions.
|
44
|
+
- Type: stm
|
45
|
+
- Storage: Mem0Storage | RAGStorage
|
46
|
+
"""
|
47
|
+
|
48
|
+
def __init__(self, agent = None, embedder_config: Dict[str, Any] = None, storage=None, path=None):
|
49
|
+
if hasattr(agent, "memory_config") and agent.memory_config is not None:
|
50
|
+
self.memory_provider = agent.memory_config.get("provider")
|
51
|
+
else:
|
52
|
+
self.memory_provider = None
|
53
|
+
|
54
|
+
if self.memory_provider == "mem0":
|
55
|
+
try:
|
56
|
+
from versionhq.storage.mem0_storage import Mem0Storage
|
57
|
+
except ImportError:
|
58
|
+
raise ImportError("Mem0 is not installed. Please install it with `uv pip install mem0ai`.")
|
59
|
+
|
60
|
+
storage = Mem0Storage(type="stm", agent=agent)
|
61
|
+
else:
|
62
|
+
storage = storage if storage else RAGStorage(type="stm", embedder_config=embedder_config, agents=[agent,], path=path)
|
63
|
+
|
64
|
+
super().__init__(storage)
|
65
|
+
|
66
|
+
|
67
|
+
def save(self, value: Any, metadata: Optional[Dict[str, Any]] = None, agent: Optional[str] = None) -> None:
|
68
|
+
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
69
|
+
if self.memory_provider == "mem0":
|
70
|
+
item.data = f"Remember the following insights from Agent run: {item.data}"
|
71
|
+
|
72
|
+
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
73
|
+
|
74
|
+
|
75
|
+
def search(self, query: str, limit: int = 3, score_threshold: float = 0.35,):
|
76
|
+
return self.storage.search(query=query, limit=limit, score_threshold=score_threshold)
|
77
|
+
|
78
|
+
|
79
|
+
def reset(self) -> None:
|
80
|
+
try:
|
81
|
+
self.storage.reset()
|
82
|
+
except Exception as e:
|
83
|
+
raise Exception(f"An error occurred while resetting the short-term memory: {str(e)}")
|
84
|
+
|
85
|
+
|
86
|
+
|
87
|
+
class LongTermMemoryItem:
|
88
|
+
def __init__(
|
89
|
+
self,
|
90
|
+
agent: str,
|
91
|
+
task: str,
|
92
|
+
datetime: str,
|
93
|
+
quality: Optional[int | float] = None,
|
94
|
+
metadata: Optional[Dict[str, Any]] = None,
|
95
|
+
):
|
96
|
+
self.task = task
|
97
|
+
self.agent = agent
|
98
|
+
self.quality = quality
|
99
|
+
self.datetime = datetime
|
100
|
+
self.metadata = metadata if metadata is not None else {}
|
101
|
+
|
102
|
+
|
103
|
+
|
104
|
+
class LongTermMemory(Memory):
|
105
|
+
"""
|
106
|
+
A class for managing cross runs data related to overall task executions.
|
107
|
+
- Type: ltm
|
108
|
+
- Storage: LTMSQLiteStorage
|
109
|
+
"""
|
110
|
+
|
111
|
+
def __init__(self, storage=None, path=None):
|
112
|
+
if not storage:
|
113
|
+
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
114
|
+
|
115
|
+
super().__init__(storage)
|
116
|
+
|
117
|
+
|
118
|
+
def save(self, item: LongTermMemoryItem) -> None:
|
119
|
+
metadata = item.metadata
|
120
|
+
metadata.update({ "agent": item.agent })
|
121
|
+
self.storage.save(
|
122
|
+
task_description=item.task,
|
123
|
+
score=metadata["quality"],
|
124
|
+
metadata=metadata,
|
125
|
+
datetime=item.datetime,
|
126
|
+
)
|
127
|
+
|
128
|
+
|
129
|
+
def search(self, task: str, latest_n: int = 3) -> List[Dict[str, Any]]:
|
130
|
+
return self.storage.load(task, latest_n)
|
131
|
+
|
132
|
+
|
133
|
+
def reset(self) -> None:
|
134
|
+
self.storage.reset()
|
135
|
+
|
136
|
+
|
137
|
+
|
138
|
+
class UserMemoryItem:
|
139
|
+
def __init__(self, data: Any, user: str, metadata: Optional[Dict[str, Any]] = None):
|
140
|
+
self.data = data
|
141
|
+
self.user = user
|
142
|
+
self.metadata = metadata if metadata is not None else {} # can be stored last purchased item, comm related to the user
|
143
|
+
|
144
|
+
|
145
|
+
class UserMemory(Memory):
|
146
|
+
"""
|
147
|
+
UserMemory class for handling user memory storage and retrieval.
|
148
|
+
- Type: user
|
149
|
+
- Storage: Mem0Storage
|
150
|
+
- Requirements: `user_id` in metadata
|
151
|
+
"""
|
152
|
+
|
153
|
+
def __init__(self, agent=None, user_id=None):
|
154
|
+
try:
|
155
|
+
from versionhq.storage.mem0_storage import Mem0Storage
|
156
|
+
except ImportError:
|
157
|
+
raise ImportError("Mem0 is not installed. Please install it with `uv pip install mem0ai`.")
|
158
|
+
|
159
|
+
if not user_id:
|
160
|
+
raise ValueError("Need User Id to create UserMemory.")
|
161
|
+
|
162
|
+
else:
|
163
|
+
storage = Mem0Storage(type="user", agent=agent, user_id=user_id)
|
164
|
+
super().__init__(storage)
|
165
|
+
|
166
|
+
|
167
|
+
def save(self, value: str, metadata: Optional[Dict[str, Any]] = None, agent: Optional[str] = None) -> None:
|
168
|
+
data = f"Remember the details about the user: {value}"
|
169
|
+
super().save(value=data, metadata=metadata, agent=agent)
|
170
|
+
|
171
|
+
|
172
|
+
def search(self, query: str, limit: int = 3, score_threshold: float = 0.35):
|
173
|
+
results = self.storage.search(query=query, limit=limit, score_threshold=score_threshold)
|
174
|
+
return results
|
@@ -0,0 +1,14 @@
|
|
1
|
+
from typing import Any, Dict, List
|
2
|
+
|
3
|
+
|
4
|
+
class Storage:
|
5
|
+
"""Abstract base class defining the storage interface"""
|
6
|
+
|
7
|
+
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
8
|
+
pass
|
9
|
+
|
10
|
+
def search(self, query: str, limit: int, score_threshold: float) -> Dict[str, Any] | List[Any]:
|
11
|
+
return {}
|
12
|
+
|
13
|
+
def reset(self) -> None:
|
14
|
+
pass
|