versionhq 1.1.12.1__py3-none-any.whl → 1.1.12.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -8,25 +8,64 @@ from versionhq.agent.model import Agent
8
8
  from versionhq.clients.customer.model import Customer
9
9
  from versionhq.clients.product.model import Product, ProductProvider
10
10
  from versionhq.clients.workflow.model import MessagingWorkflow, MessagingComponent
11
- from versionhq.task.model import Task, TaskOutput
12
- from versionhq.team.model import Team, TeamOutput
13
- from versionhq.tool.model import Tool
11
+ from versionhq.knowledge.model import Knowledge, KnowledgeStorage
12
+ from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
13
+ from versionhq.knowledge.source_docling import DoclingSource
14
+ from versionhq.task.model import Task, TaskOutput, ConditionalTask, ResponseField
15
+ from versionhq.task.evaluate import Evaluation, EvaluationItem
16
+ from versionhq.team.model import Team, TeamOutput, Formation, TeamMember, TaskHandlingProcess
17
+ from versionhq.tool.model import Tool, ToolSet
18
+ from versionhq.tool.cache_handler import CacheHandler
19
+ from versionhq.tool.tool_handler import ToolHandler
14
20
  from versionhq.tool.composio_tool import ComposioHandler
21
+ from versionhq.memory.contextual_memory import ContextualMemory
22
+ from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, MemoryItem
15
23
 
16
24
 
17
- __version__ = "1.1.12.1"
25
+
26
+ __version__ = "1.1.12.2"
18
27
  __all__ = [
19
28
  "Agent",
29
+
20
30
  "Customer",
21
31
  "Product",
22
32
  "ProductProvider",
23
33
  "MessagingWorkflow",
24
34
  "MessagingComponent",
25
- "LLM",
35
+
36
+ "Knowledge",
37
+ "KnowledgeStorage",
38
+ "PDFKnowledgeSource",
39
+ "CSVKnowledgeSource",
40
+ "JSONKnowledgeSource",
41
+ "TextFileKnowledgeSource",
42
+ "ExcelKnowledgeSource",
43
+ "StringKnowledgeSource",
44
+ "DoclingSource",
45
+
26
46
  "Task",
27
47
  "TaskOutput",
48
+ "ConditionalTask",
49
+ "ResponseField",
50
+
51
+ "Evaluation",
52
+ "EvaluationItem",
53
+
28
54
  "Team",
29
55
  "TeamOutput",
56
+ "Formation",
57
+ "TeamMember",
58
+ "TaskHandlingProcess",
59
+
30
60
  "Tool",
31
- "ComposioHandler"
61
+ "ToolSet",
62
+ "CacheHandler",
63
+ "ToolHandler",
64
+ "ComposioHandler",
65
+
66
+ "ContextualMemory",
67
+ "ShortTermMemory",
68
+ "LongTermMemory",
69
+ "UserMemory",
70
+ "MemoryItem"
32
71
  ]
@@ -0,0 +1,3 @@
1
+ from versionhq._utils.logger import Logger
2
+ from versionhq._utils.process_config import process_config
3
+ from versionhq._utils.usage_metrics import UsageMetrics
@@ -3,29 +3,39 @@ from versionhq.llm.model import DEFAULT_MODEL_NAME
3
3
 
4
4
  """
5
5
  In-house agents to be called across the project.
6
- [Rules] agents' names and roles start with `vhq_`.
6
+ [Rules] In house agents have names and roles that start with `vhq_`. No customization allowed by client.
7
7
  """
8
8
 
9
9
  vhq_client_manager = Agent(
10
10
  role="vhq-Client Manager",
11
11
  goal="Efficiently communicate with the client on the task progress",
12
- llm=DEFAULT_MODEL_NAME
12
+ llm=DEFAULT_MODEL_NAME,
13
+ use_memory=True,
13
14
  )
14
15
 
16
+
15
17
  vhq_task_evaluator = Agent(
16
18
  role="vhq-Task Evaluator",
17
19
  goal="score the output according to the given evaluation criteria.",
18
20
  llm=DEFAULT_MODEL_NAME,
19
21
  llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
20
22
  maxit=1,
21
- max_retry_limit=1
23
+ max_retry_limit=1,
24
+ use_memory=True # refer past eval records of similar tasks
22
25
  )
23
26
 
27
+
24
28
  vhq_formation_planner = Agent(
25
29
  role="vhq-Formation Planner",
26
30
  goal="Plan a formation of agents based on the given task descirption.",
27
31
  llm="gemini/gemini-2.0-flash-exp",
28
32
  llm_config=dict(top_p=0.8, top_k=30, temperature=0.9),
29
33
  maxit=1,
30
- max_retry_limit=1
34
+ max_retry_limit=1,
35
+ knowledge_sources=[
36
+ "Solo is a formation where a single agent with tools, knowledge, and memory handles tasks indivudually. When self-learning mode is on - it will turn into Random formation. Typical usecase is an email agent drafts promo message for the given audience using their own knowledge.",
37
+ "Supervising is a formation where the leader agent gives directions, while sharing its knowledge and memory with subbordinates.Subordinates can be solo agents or networks. Typical usecase is that the leader agent strategizes an outbound campaign plan and assigns components such as media mix or message creation to subordinate agents.",
38
+ "Network is a formation where multple agents can share tasks, knowledge, and memory among network members without hierarchy. Typical usecase is that an email agent and social media agent share the product knowledge and deploy multi-channel outbound campaign. ",
39
+ "Random is a formation where a single agent handles tasks, asking help from other agents without sharing its memory or knowledge. Typical usecase is that an email agent drafts promo message for the given audience, asking insights on tones from other email agents which oversee other customer clusters, or an agent calls the external, third party agent to deploy the campaign. ",
40
+ ]
31
41
  )
versionhq/agent/model.py CHANGED
@@ -1,6 +1,5 @@
1
1
  import os
2
2
  import uuid
3
- import datetime
4
3
  from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
5
4
  from typing_extensions import Self
6
5
  from dotenv import load_dotenv
@@ -353,7 +352,7 @@ class Agent(BaseModel):
353
352
  @model_validator(mode="after")
354
353
  def set_up_memory(self) -> Self:
355
354
  """
356
- Set up memories: stm, um
355
+ Set up memories: stm, ltm, and um
357
356
  """
358
357
 
359
358
  if self.use_memory == True:
@@ -5,7 +5,7 @@ from typing_extensions import Self
5
5
 
6
6
  from pydantic import BaseModel, Field, PrivateAttr, model_validator
7
7
 
8
- from versionhq._utils.logger import Logger
8
+ from versionhq._utils import Logger
9
9
 
10
10
 
11
11
  class RPMController(BaseModel):
@@ -56,7 +56,6 @@ class ProductProvider(ABC, BaseModel):
56
56
  return self
57
57
 
58
58
 
59
-
60
59
  class Product(BaseModel):
61
60
  """
62
61
  A class to store product information used to create outbound
@@ -0,0 +1,22 @@
1
+ from versionhq.knowledge.model import Knowledge, KnowledgeStorage
2
+ from versionhq.knowledge.source import (
3
+ CSVKnowledgeSource,
4
+ ExcelKnowledgeSource,
5
+ PDFKnowledgeSource,
6
+ TextFileKnowledgeSource,
7
+ JSONKnowledgeSource,
8
+ StringKnowledgeSource
9
+ )
10
+ from versionhq.knowledge.source_docling import DoclingSource
11
+
12
+ __all__ = [
13
+ "Knowledge",
14
+ "KnowledgeStorage",
15
+ "DoclingSource",
16
+ "CSVKnowledgeSource",
17
+ "ExcelKnowledgeSource",
18
+ "PDFKnowledgeSource",
19
+ "TextFileKnowledgeSource",
20
+ "JSONKnowledgeSource",
21
+ "StringKnowledgeSource"
22
+ ]
@@ -1,5 +1,3 @@
1
- import os
2
- from abc import ABC, abstractmethod
3
1
  from typing import Any, Dict, List, Optional
4
2
  from pydantic import BaseModel, ConfigDict, Field
5
3
 
@@ -280,7 +280,6 @@ class PDFKnowledgeSource(BaseFileKnowledgeSource):
280
280
 
281
281
 
282
282
 
283
-
284
283
  class CSVKnowledgeSource(BaseFileKnowledgeSource):
285
284
  """
286
285
  A knowledge source class that stores and queries CSV file content using embeddings.
versionhq/memory/model.py CHANGED
@@ -1,7 +1,53 @@
1
+ import datetime
1
2
  from typing import Any, Dict, List, Optional
2
3
 
3
4
  from versionhq.storage.rag_storage import RAGStorage
4
5
  from versionhq.storage.ltm_sqlite_storage import LTMSQLiteStorage
6
+ from versionhq._utils.logger import Logger
7
+
8
+
9
+ class MemoryData:
10
+ """
11
+ A class to store structured data to store in the memory.
12
+ """
13
+ def __init__(
14
+ self,
15
+ agent: Optional[str] = None, # task execution agent (core)
16
+ task_description: Optional[str] = None,
17
+ task_output: Optional[str] = None,
18
+ config: Optional[Dict[str, Any]] = None
19
+ ):
20
+ self.agent = agent
21
+ self.task_description = task_description
22
+ self.task_output = task_output
23
+
24
+ if config:
25
+ for k, v in config.items():
26
+ setattr(self, k, str(v))
27
+
28
+
29
+
30
+ class MemoryMetadata:
31
+ """
32
+ A class to store structured metadata to store in the memory.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ eval_criteria: Optional[str] = None,
38
+ score: Optional[int | float] = None,
39
+ suggestion: Optional[str] = None,
40
+ eval_by: Optional[str] = None, # task evaluator agent
41
+ config: Optional[Dict[str, Any]] = None
42
+ ):
43
+ self.eval_criteria = eval_criteria
44
+ self.score = score
45
+ self.suggestion = suggestion
46
+ self.eval_by = eval_by
47
+
48
+ if config:
49
+ for k, v in config.items():
50
+ setattr(self, k, str(v))
5
51
 
6
52
 
7
53
  class Memory:
@@ -13,28 +59,46 @@ class Memory:
13
59
  self.storage = storage
14
60
 
15
61
 
16
- def save(self, value: Any, metadata: Optional[Dict[str, Any]] = None, agent: Optional[str] = None) -> None:
17
- metadata = metadata or {}
62
+ def save(
63
+ self,
64
+ data: MemoryData | Dict[str, Any],
65
+ metadata: Optional[MemoryMetadata | Dict[str, Any]] = None,
66
+ agent: Optional[str] = None
67
+ ) -> None:
68
+
69
+ """
70
+ Create a dict for data and metadata without empty values before storing them in the given storage.
71
+ """
72
+
73
+ if not data:
74
+ Logger(verbose=True).log(level="error", message="Missing data to store. Add either dict or MemoryData object", color="red")
75
+ return None
18
76
 
19
- if agent:
20
- metadata["agent"] = agent
21
- self.storage.save(value, metadata)
77
+ metadata_dict = metadata if isinstance(metadata, dict) else metadata.__dict__ if isinstance(metadata, MemoryMetadata) else dict()
78
+ metadata_dict = {k: v for k, v in metadata_dict.items() if v} # remove empty values
79
+ data_dict = data if isinstance(data, dict) else data.__dict__ if isinstance(data, MemoryData) else dict()
80
+ data_dict = {k: v for k, v in data_dict.items() if v}
81
+
82
+ if agent and data_dict["agent"] is None:
83
+ data_dict["agent"] = agent
84
+
85
+ if metadata_dict:
86
+ self.storage.save(data=data_dict, metadata=metadata_dict)
87
+ else:
88
+ self.storage.save(data=data_dict)
22
89
 
23
90
 
24
91
  def search(self, query: str, limit: int = 3, score_threshold: float = 0.35) -> List[Any]:
25
92
  return self.storage.search(query=query, limit=limit, score_threshold=score_threshold)
26
93
 
27
94
 
95
+ class MemoryItem:
96
+ """
97
+ A class to store item to be saved in either long term memory or short term memory.
98
+ """
28
99
 
29
- class ShortTermMemoryItem:
30
- def __init__(
31
- self,
32
- data: Any,
33
- agent: Optional[str] = None,
34
- metadata: Optional[Dict[str, Any]] = None,
35
- ):
100
+ def __init__(self, data: MemoryData = None, metadata: Optional[MemoryMetadata] = None):
36
101
  self.data = data
37
- self.agent = agent
38
102
  self.metadata = metadata if metadata is not None else {}
39
103
 
40
104
 
@@ -70,12 +134,22 @@ class ShortTermMemory(Memory):
70
134
  super().__init__(storage)
71
135
 
72
136
 
73
- def save(self, value: Any, metadata: Optional[Dict[str, Any]] = None, agent: Optional[str] = None) -> None:
74
- item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
137
+ def save(
138
+ self,
139
+ task_description: str = None,
140
+ task_output: str = None,
141
+ agent: Optional[str] = None,
142
+ data: Optional[MemoryData] = None,
143
+ metadata: Optional[MemoryMetadata] = None
144
+ ) -> None:
145
+
146
+ data = data if data else MemoryData(task_description=task_description, task_output=task_output, agent=agent)
147
+ item = MemoryItem(data=data, metadata=metadata)
148
+
75
149
  if self.memory_provider == "mem0":
76
- item.data = f"Remember the following insights from Agent run: {item.data}"
150
+ item.data.task_output = f"Remember the following insights from Agent run: {item.data.task_output}"
77
151
 
78
- super().save(value=item.data, metadata=item.metadata, agent=item.agent)
152
+ super().save(data=item.data.__dict__, metadata=item.metadata.__dict__ if item.metadata else {})
79
153
 
80
154
 
81
155
  def search(self, query: str, limit: int = 3, score_threshold: float = 0.35,):
@@ -89,29 +163,11 @@ class ShortTermMemory(Memory):
89
163
  raise Exception(f"An error occurred while resetting the short-term memory: {str(e)}")
90
164
 
91
165
 
92
-
93
- class LongTermMemoryItem:
94
- def __init__(
95
- self,
96
- agent: str,
97
- task: str,
98
- datetime: str,
99
- quality: Optional[int | float] = None,
100
- metadata: Optional[Dict[str, Any]] = None,
101
- ):
102
- self.task = task
103
- self.agent = agent
104
- self.quality = quality
105
- self.datetime = datetime
106
- self.metadata = metadata if metadata is not None else {}
107
-
108
-
109
-
110
166
  class LongTermMemory(Memory):
111
167
  """
112
168
  A class for managing cross runs data related to overall task executions.
113
169
  - Type: ltm
114
- - Storage: LTMSQLiteStorage
170
+ - Storage: LTMSQLiteStorage | RAGStorage
115
171
  """
116
172
 
117
173
  def __init__(self, storage=None, path=None):
@@ -121,19 +177,25 @@ class LongTermMemory(Memory):
121
177
  super().__init__(storage)
122
178
 
123
179
 
124
- def save(self, item: LongTermMemoryItem) -> None:
125
- metadata = item.metadata
126
- metadata.update({ "agent": item.agent })
127
- self.storage.save(
128
- task_description=item.task,
129
- score=metadata["quality"],
130
- metadata=metadata,
131
- datetime=item.datetime,
132
- )
180
+ def save(
181
+ self,
182
+ task_description: str = None,
183
+ task_output: str = None,
184
+ agent: Optional[str] = None,
185
+ data: Optional[MemoryData] = None,
186
+ metadata: Optional[MemoryMetadata] = None
187
+ ) -> None:
188
+
189
+ data = data if data else MemoryData(task_description=task_description, task_output=task_output, agent=agent)
190
+ item = MemoryItem(data=data, metadata=metadata)
191
+ super().save(data=item.data, metadata=item.metadata)
133
192
 
134
193
 
135
- def search(self, task: str, latest_n: int = 3) -> List[Dict[str, Any]]:
136
- return self.storage.load(task, latest_n)
194
+ def search(self, query: str, latest_n: int = 3) -> List[Dict[str, Any]]:
195
+ """
196
+ Query the storage and return the results up to latest_n.
197
+ """
198
+ return self.storage.load(query=query, latest_n=latest_n)
137
199
 
138
200
 
139
201
  def reset(self) -> None:
@@ -1,4 +1,5 @@
1
1
  import json
2
+ import datetime
2
3
  import sqlite3
3
4
  from pathlib import Path
4
5
  from typing import Any, Dict, List, Optional
@@ -27,88 +28,77 @@ class LTMSQLiteStorage:
27
28
  """
28
29
  Initializes the SQLite database and creates LTM table
29
30
  """
31
+
30
32
  try:
31
33
  with sqlite3.connect(self.db_path) as conn:
32
34
  cursor = conn.cursor()
33
35
  cursor.execute(
34
- """
36
+ """
35
37
  CREATE TABLE IF NOT EXISTS long_term_memories (
36
38
  id INTEGER PRIMARY KEY AUTOINCREMENT,
37
- task_description TEXT,
38
- metadata TEXT,
39
- datetime TEXT,
40
- score REAL
39
+ datetime REAL,
40
+ data TEXT,
41
+ metadata TEXT
41
42
  )
42
43
  """
43
44
  )
44
-
45
45
  conn.commit()
46
46
 
47
47
  except sqlite3.Error as e:
48
- self._logger.log(
49
- level="error",
50
- message=f"MEMORY ERROR: An error occurred during database initialization: {str(e)}",
51
- color="red",
52
- )
48
+ self._logger.log(level="error", message=f"MEMORY ERROR: An error occurred during database initialization: {str(e)}", color="red")
49
+
53
50
 
54
- def save(self, task_description: str, metadata: Dict[str, Any], datetime: str, score: int | float) -> None:
51
+ def save(self, data: Dict[str, Any] | str, metadata: Optional[Dict[str, Any]] = {}) -> None:
55
52
  """
56
53
  Saves data to the LTM table with error handling.
57
54
  """
55
+ data = data if isinstance(data, dict) else dict(data=data)
56
+
58
57
  try:
59
58
  with sqlite3.connect(self.db_path) as conn:
60
59
  cursor = conn.cursor()
61
60
  cursor.execute(
62
- """
63
- INSERT INTO long_term_memories (task_description, metadata, datetime, score)
64
- VALUES (?, ?, ?, ?)
61
+ """
62
+ INSERT INTO long_term_memories (datetime, data, metadata)
63
+ VALUES (?, ?, ?)
65
64
  """,
66
- (task_description, json.dumps(metadata), datetime, score),
65
+ (datetime.datetime.now().timestamp(), json.dumps(data), json.dumps(metadata)),
67
66
  )
68
67
  conn.commit()
69
68
  except sqlite3.Error as e:
70
- self._logger.log(
71
- level="error",
72
- message=f"MEMORY ERROR: An error occurred while saving to LTM: {str(e)}",
73
- color="red",
74
- )
69
+ self._logger.log(level="error", message=f"MEMORY ERROR: An error occurred while saving to LTM: {str(e)}", color="red")
75
70
 
76
71
 
77
- def load(self, task_description: str, latest_n: int) -> Optional[List[Dict[str, Any]]]:
72
+ def load(self, query: str, latest_n: int) -> Optional[List[Dict[str, Any]]]:
78
73
  """
79
- Queries the LTM table by task description with error handling.
74
+ Queries the data row in the storage with error handling.
80
75
  """
81
76
  try:
82
77
  with sqlite3.connect(self.db_path) as conn:
83
78
  cursor = conn.cursor()
84
79
  cursor.execute(
85
- f"""
86
- SELECT metadata, datetime, score
80
+ f"""
81
+ SELECT datetime, data, metadata
87
82
  FROM long_term_memories
88
- WHERE task_description = ?
89
- ORDER BY datetime DESC, score ASC
83
+ WHERE data LIKE '%{query}%'
84
+ ORDER BY datetime
90
85
  LIMIT {latest_n}
91
- """,
92
- (task_description,),
86
+ """
93
87
  )
94
88
  rows = cursor.fetchall()
95
89
  if rows:
96
90
  return [
97
91
  {
98
- "metadata": json.loads(row[0]),
99
- "datetime": row[1],
100
- "score": row[2],
92
+ "datetime": row[0],
93
+ "data": json.loads(row[1]),
94
+ "metadata": json.loads(row[2]),
101
95
  }
102
96
  for row in rows
103
97
  ]
104
98
 
105
99
  except sqlite3.Error as e:
106
- self._logger.log(
107
- level="error",
108
- message=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
109
- color="red",
110
- )
111
- return None
100
+ self._logger.log(level="error", message=f"MEMORY ERROR: An error occurred while querying LTM: {e}",color="red")
101
+ return None
112
102
 
113
103
 
114
104
  def reset(self) -> None:
@@ -123,9 +113,5 @@ class LTMSQLiteStorage:
123
113
  conn.commit()
124
114
 
125
115
  except sqlite3.Error as e:
126
- self._logger.log(
127
- level="error",
128
- message=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {str(e)}",
129
- color="red",
130
- )
116
+ self._logger.log(level="error", message=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {str(e)}", color="red")
131
117
  return None
@@ -47,7 +47,7 @@ class Mem0Storage(Storage):
47
47
  return role.replace("\n", "").replace(" ", "_").replace("/", "_")
48
48
 
49
49
 
50
- def save(self, value: Any, metadata: Dict[str, Any]) -> None:
50
+ def save(self, value: Dict[str, Any] | str, metadata: Dict[str, Any]) -> None:
51
51
  user_id = self._get_user_id()
52
52
  agent_name = self._get_agent_name()
53
53
 
@@ -163,11 +163,32 @@ class RAGStorage(BaseRAGStorage):
163
163
  return f"{base_path}/{file_name}"
164
164
 
165
165
 
166
- def save(self, value: Any, metadata: Dict[str, Any]) -> None:
166
+ def _create_default_embedding_function(self):
167
+ from chromadb.utils.embedding_functions.openai_embedding_function import (
168
+ OpenAIEmbeddingFunction,
169
+ )
170
+
171
+ return OpenAIEmbeddingFunction(
172
+ api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
173
+ )
174
+
175
+
176
+ def _generate_embedding(self, text: str, metadata: Optional[Dict[str, Any]]) -> None:
177
+ if not hasattr(self, "app") or not hasattr(self, "collection"):
178
+ self._initialize_app()
179
+
180
+ if metadata:
181
+ self.collection.add(documents=[text], metadatas=[metadata, ], ids=[str(uuid.uuid4())])
182
+
183
+ else:
184
+ self.collection.add(documents=[text], ids=[str(uuid.uuid4())])
185
+
186
+
187
+ def save(self, data: Dict[str, Any] | str, metadata: Optional[Dict[str, Any]] = dict()) -> None:
167
188
  if not hasattr(self, "app") or not hasattr(self, "collection"):
168
189
  self._initialize_app()
169
190
  try:
170
- self._generate_embedding(value, metadata)
191
+ self._generate_embedding(text=str(data), metadata=metadata)
171
192
  except Exception as e:
172
193
  logging.error(f"Error during {self.type} save: {str(e)}")
173
194
 
@@ -197,17 +218,6 @@ class RAGStorage(BaseRAGStorage):
197
218
  return []
198
219
 
199
220
 
200
- def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> None:
201
- if not hasattr(self, "app") or not hasattr(self, "collection"):
202
- self._initialize_app()
203
-
204
- self.collection.add(
205
- documents=[text],
206
- metadatas=[metadata or {}],
207
- ids=[str(uuid.uuid4())],
208
- )
209
-
210
-
211
221
  def reset(self) -> None:
212
222
  try:
213
223
  if self.app:
@@ -220,12 +230,3 @@ class RAGStorage(BaseRAGStorage):
220
230
  pass
221
231
  else:
222
232
  raise Exception(f"An error occurred while resetting the {self.type} memory: {e}")
223
-
224
- def _create_default_embedding_function(self):
225
- from chromadb.utils.embedding_functions.openai_embedding_function import (
226
- OpenAIEmbeddingFunction,
227
- )
228
-
229
- return OpenAIEmbeddingFunction(
230
- api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
231
- )
@@ -7,7 +7,7 @@ from versionhq._utils.logger import Logger
7
7
  from versionhq.storage.utils import fetch_db_storage_path
8
8
 
9
9
  storage_path = fetch_db_storage_path()
10
- default_db_name = "task_outputs"
10
+ default_db_name = "task_output"
11
11
 
12
12
 
13
13
  class TaskOutputSQLiteStorage:
@@ -31,7 +31,7 @@ class TaskOutputSQLiteStorage:
31
31
  cursor = conn.cursor()
32
32
  cursor.execute(
33
33
  """
34
- CREATE TABLE IF NOT EXISTS task_outputs (
34
+ CREATE TABLE IF NOT EXISTS task_output (
35
35
  task_id TEXT PRIMARY KEY,
36
36
  output JSON,
37
37
  task_index INTEGER,
@@ -52,7 +52,7 @@ class TaskOutputSQLiteStorage:
52
52
  with sqlite3.connect(self.db_path) as conn:
53
53
  cursor = conn.cursor()
54
54
  cursor.execute(
55
- """INSERT OR REPLACE INTO task_outputs
55
+ """INSERT OR REPLACE INTO task_output
56
56
  (task_id, output, task_index, inputs, was_replayed, timestamp)
57
57
  VALUES (?, ?, ?, ?, ?, ?)
58
58
  """,
@@ -73,7 +73,7 @@ class TaskOutputSQLiteStorage:
73
73
  fields.append(f"{k} = ?")
74
74
  values.append(json.dumps(v) if isinstance(v, dict) else v)
75
75
 
76
- query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?"
76
+ query = f"UPDATE latest_kickoff_task_output SET {', '.join(fields)} WHERE task_index = ?"
77
77
  values.append(task_index)
78
78
  cursor.execute(query, tuple(values))
79
79
  conn.commit()
@@ -93,7 +93,7 @@ class TaskOutputSQLiteStorage:
93
93
  cursor = conn.cursor()
94
94
  cursor.execute("""
95
95
  SELECT *
96
- FROM task_outputs
96
+ FROM task_output
97
97
  ORDER BY task_index
98
98
  """)
99
99
 
@@ -120,7 +120,7 @@ class TaskOutputSQLiteStorage:
120
120
  try:
121
121
  with sqlite3.connect(self.db_path) as conn:
122
122
  cursor = conn.cursor()
123
- cursor.execute("DELETE FROM task_outputs")
123
+ cursor.execute("DELETE FROM task_output")
124
124
  conn.commit()
125
125
 
126
126
  except sqlite3.Error as e: