versionhq 1.2.2.3__py3-none-any.whl → 1.2.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +3 -1
- versionhq/_utils/llm_as_a_judge.py +65 -0
- versionhq/agent/model.py +43 -23
- versionhq/knowledge/source_docling.py +2 -1
- versionhq/knowledge/storage.py +2 -1
- versionhq/tool/model.py +1 -5
- versionhq/tool/rag_tool.py +112 -0
- {versionhq-1.2.2.3.dist-info → versionhq-1.2.2.4.dist-info}/METADATA +8 -3
- {versionhq-1.2.2.3.dist-info → versionhq-1.2.2.4.dist-info}/RECORD +12 -10
- {versionhq-1.2.2.3.dist-info → versionhq-1.2.2.4.dist-info}/LICENSE +0 -0
- {versionhq-1.2.2.3.dist-info → versionhq-1.2.2.4.dist-info}/WHEEL +0 -0
- {versionhq-1.2.2.3.dist-info → versionhq-1.2.2.4.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -21,6 +21,7 @@ from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, Depend
|
|
21
21
|
from versionhq.task.model import Task, TaskOutput, ResponseField, TaskExecutionType
|
22
22
|
from versionhq.task.evaluation import Evaluation, EvaluationItem
|
23
23
|
from versionhq.tool.model import Tool, ToolSet
|
24
|
+
from versionhq.tool.rag_tool import RagTool
|
24
25
|
from versionhq.tool.cache_handler import CacheHandler
|
25
26
|
from versionhq.tool.tool_handler import ToolHandler
|
26
27
|
from versionhq.tool.composio_tool import ComposioHandler
|
@@ -31,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
|
|
31
32
|
from versionhq.task_graph.draft import workflow
|
32
33
|
|
33
34
|
|
34
|
-
__version__ = "1.2.2.
|
35
|
+
__version__ = "1.2.2.4"
|
35
36
|
__all__ = [
|
36
37
|
"Agent",
|
37
38
|
|
@@ -80,6 +81,7 @@ __all__ = [
|
|
80
81
|
|
81
82
|
"Tool",
|
82
83
|
"ToolSet",
|
84
|
+
"RagTool",
|
83
85
|
"CacheHandler",
|
84
86
|
"ToolHandler",
|
85
87
|
"ComposioHandler",
|
@@ -0,0 +1,65 @@
|
|
1
|
+
import json
|
2
|
+
import numpy as np
|
3
|
+
from sklearn.metrics import precision_score, recall_score, roc_auc_score, cohen_kappa_score
|
4
|
+
from typing import List, Tuple, Dict, Any
|
5
|
+
from pathlib import Path
|
6
|
+
|
7
|
+
|
8
|
+
class LLMJudge:
|
9
|
+
|
10
|
+
class MockLLM:
|
11
|
+
def _generate(self, prompt: str) -> str:
|
12
|
+
return str(np.random.random())
|
13
|
+
|
14
|
+
|
15
|
+
def __init__(self, model: MockLLM = None):
|
16
|
+
self.model = model if model else self.MockLLM()
|
17
|
+
|
18
|
+
|
19
|
+
def judge_summary(self, original_text: str, summary: str) -> float:
|
20
|
+
prompt = f"""Evaluate the quality of the following summary on a scale of 0 to 1, where 0 is poor and 1 is excellent.
|
21
|
+
Consider accuracy, completeness, and conciseness.
|
22
|
+
Original text: {original_text}
|
23
|
+
Summary: {summary}
|
24
|
+
Quality score:"""
|
25
|
+
response = self.model._generate(prompt)
|
26
|
+
score = float(response.strip())
|
27
|
+
return score
|
28
|
+
|
29
|
+
|
30
|
+
def generate_summaries(file_path: str, data: List[Dict[str, Any]] = None, summarizer: Any = None) -> List[Tuple[str, str, float]]:
|
31
|
+
"""Generates a list of tuple with an original text, summary text, and human judge score."""
|
32
|
+
if not data:
|
33
|
+
with open(file_path, 'r') as file:
|
34
|
+
data = json.load(file)
|
35
|
+
summaries = []
|
36
|
+
for item in data:
|
37
|
+
original_text = item['text']
|
38
|
+
summary = summarizer.summarize(original_text)
|
39
|
+
human_score = item['human_score']
|
40
|
+
summaries.append((original_text, summary, human_score))
|
41
|
+
|
42
|
+
return summaries
|
43
|
+
|
44
|
+
|
45
|
+
def validate(judge: LLMJudge, data: List[Tuple[str, str, float]], threshold: float = 0.5):
|
46
|
+
human_scores = []
|
47
|
+
predicted_scores = []
|
48
|
+
|
49
|
+
for original_text, summary, human_score in data:
|
50
|
+
predicted_score = judge.judge_summary(original_text=original_text, summary=summary)
|
51
|
+
human_scores.append(human_score)
|
52
|
+
predicted_scores.append(predicted_score)
|
53
|
+
|
54
|
+
human_binary = [1 if score >= threshold else 0 for score in human_scores]
|
55
|
+
pred_binary = [1 if score >= threshold else 0 for score in predicted_scores]
|
56
|
+
precision = precision_score(human_binary, pred_binary, zero_division=0)
|
57
|
+
recall = recall_score(human_binary, pred_binary, zero_division=0)
|
58
|
+
auroc = roc_auc_score(human_binary, pred_binary, average='weighted')
|
59
|
+
kappa = cohen_kappa_score(human_binary, pred_binary)
|
60
|
+
return {
|
61
|
+
"precision": precision,
|
62
|
+
"recall": recall,
|
63
|
+
"auroc": auroc,
|
64
|
+
"cohen_kappa": kappa
|
65
|
+
}
|
versionhq/agent/model.py
CHANGED
@@ -8,7 +8,7 @@ from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_val
|
|
8
8
|
from pydantic_core import PydanticCustomError
|
9
9
|
|
10
10
|
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME, PROVIDERS
|
11
|
-
from versionhq.tool.model import Tool, ToolSet
|
11
|
+
from versionhq.tool.model import Tool, ToolSet, BaseTool
|
12
12
|
from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
|
13
13
|
from versionhq.memory.contextual_memory import ContextualMemory
|
14
14
|
from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
|
@@ -39,7 +39,7 @@ class Agent(BaseModel):
|
|
39
39
|
goal: str = Field(description="concise goal of the agent (details are set in the Task instance)")
|
40
40
|
backstory: Optional[str] = Field(default=None, description="developer prompt to the llm")
|
41
41
|
skillsets: Optional[List[str]] = Field(default_factory=list)
|
42
|
-
tools: Optional[List[
|
42
|
+
tools: Optional[List[Any]] = Field(default_factory=list)
|
43
43
|
|
44
44
|
# knowledge
|
45
45
|
knowledge_sources: Optional[List[BaseKnowledgeSource | Any]] = Field(default=None)
|
@@ -122,29 +122,41 @@ class Agent(BaseModel):
|
|
122
122
|
"""
|
123
123
|
Similar to the LLM set up, when the agent has tools, we will declare them using the Tool class.
|
124
124
|
"""
|
125
|
-
|
126
|
-
pass
|
125
|
+
from versionhq.tool.rag_tool import RagTool
|
127
126
|
|
128
|
-
|
129
|
-
|
127
|
+
if not self.tools:
|
128
|
+
return self
|
130
129
|
|
131
|
-
|
132
|
-
|
130
|
+
tool_list = []
|
131
|
+
for item in self.tools:
|
132
|
+
match item:
|
133
|
+
case RagTool() | BaseTool():
|
133
134
|
tool_list.append(item)
|
134
135
|
|
135
|
-
|
136
|
-
|
137
|
-
|
136
|
+
case Tool():
|
137
|
+
if item.func is not None:
|
138
|
+
tool_list.append(item)
|
138
139
|
|
139
|
-
|
140
|
-
|
140
|
+
case ToolSet():
|
141
|
+
if item.tool and item.tool.func is not None:
|
142
|
+
tool_list.append(item)
|
141
143
|
|
142
|
-
|
143
|
-
|
144
|
-
|
144
|
+
case dict():
|
145
|
+
if "func" in item:
|
146
|
+
tool = Tool(func=item["func"])
|
147
|
+
for k, v in item.items():
|
148
|
+
if k in Tool.model_fields.keys() and k != "func" and v is not None:
|
149
|
+
setattr(tool, k, v)
|
150
|
+
tool_list.append(tool)
|
145
151
|
|
146
|
-
|
152
|
+
case _:
|
153
|
+
if item.__base__ == BaseTool or item.__base__ == RagTool or item.__base__ == Tool:
|
154
|
+
tool_list.append(item)
|
155
|
+
else:
|
156
|
+
Logger(**self._logger_config, filename=self.key).log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
|
157
|
+
raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
|
147
158
|
|
159
|
+
self.tools = tool_list
|
148
160
|
return self
|
149
161
|
|
150
162
|
|
@@ -158,7 +170,7 @@ class Agent(BaseModel):
|
|
158
170
|
from versionhq.agent.TEMPLATES.Backstory import BACKSTORY_FULL, BACKSTORY_SHORT
|
159
171
|
backstory = ""
|
160
172
|
skills = ", ".join([item for item in self.skillsets]) if self.skillsets else ""
|
161
|
-
tools = ", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else ""
|
173
|
+
tools = ", ".join([item.name for item in self.tools if hasattr(item, "name") and item.name is not None]) if self.tools else ""
|
162
174
|
role = self.role.lower()
|
163
175
|
goal = self.goal.lower()
|
164
176
|
|
@@ -199,7 +211,7 @@ class Agent(BaseModel):
|
|
199
211
|
if isinstance(item, BaseKnowledgeSource):
|
200
212
|
knowledge_sources.append(item)
|
201
213
|
|
202
|
-
elif isinstance(item, str) and "http" in item:
|
214
|
+
elif isinstance(item, str) and "http" in item and DoclingSource._validate_url(url=item) == True:
|
203
215
|
docling_fp.append(item)
|
204
216
|
|
205
217
|
elif isinstance(item, str):
|
@@ -223,8 +235,8 @@ class Agent(BaseModel):
|
|
223
235
|
|
224
236
|
self._knowledge = Knowledge(sources=knowledge_sources, embedder_config=self.embedder_config, collection_name=collection_name)
|
225
237
|
|
226
|
-
except:
|
227
|
-
Logger(**self._logger_config, filename=self.key).log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
|
238
|
+
except Exception as e:
|
239
|
+
Logger(**self._logger_config, filename=self.key).log(level="warning", message=f"We cannot find the format for the source. Add BaseKnowledgeSource objects instead. {str(e)}", color="yellow")
|
228
240
|
|
229
241
|
return self
|
230
242
|
|
@@ -506,10 +518,13 @@ class Agent(BaseModel):
|
|
506
518
|
"""
|
507
519
|
|
508
520
|
from versionhq.task.model import Task
|
521
|
+
from versionhq.tool.rag_tool import RagTool
|
509
522
|
from versionhq.knowledge._utils import extract_knowledge_context
|
510
523
|
|
511
524
|
task: InstanceOf[Task] = task
|
512
|
-
|
525
|
+
all_tools: Optional[List[Tool | ToolSet | RagTool | Type[BaseTool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
|
526
|
+
rag_tools: Optional[List[RagTool]] = [item for item in all_tools if isinstance(item, RagTool)] if all_tools else None
|
527
|
+
tools: Optional[List[Tool | ToolSet | RagTool | Type[BaseTool]]] = [item for item in all_tools if not isinstance(item, RagTool)] if all_tools else None
|
513
528
|
|
514
529
|
if self.max_rpm and self._rpm_controller:
|
515
530
|
self._rpm_controller._reset_request_count()
|
@@ -523,6 +538,12 @@ class Agent(BaseModel):
|
|
523
538
|
if agent_knowledge_context:
|
524
539
|
task_prompt += agent_knowledge_context
|
525
540
|
|
541
|
+
if rag_tools:
|
542
|
+
for item in rag_tools:
|
543
|
+
rag_tool_context = item.run(agent=self, query=task.description)
|
544
|
+
if rag_tool_context:
|
545
|
+
task_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
|
546
|
+
|
526
547
|
if self.with_memory == True:
|
527
548
|
contextual_memory = ContextualMemory(
|
528
549
|
memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
|
@@ -533,7 +554,6 @@ class Agent(BaseModel):
|
|
533
554
|
if memory.strip() != "":
|
534
555
|
task_prompt += memory.strip()
|
535
556
|
|
536
|
-
|
537
557
|
## comment out for now
|
538
558
|
# if self.networks and self.networks._train:
|
539
559
|
# task_prompt = self._training_handler(task_prompt=task_prompt)
|
versionhq/knowledge/storage.py
CHANGED
@@ -125,7 +125,8 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|
125
125
|
def search(self, query: List[str], limit: int = 3, filter: Optional[dict] = None, score_threshold: float = 0.35) -> List[Dict[str, Any]]:
|
126
126
|
with suppress_logging():
|
127
127
|
if self.collection:
|
128
|
-
|
128
|
+
query_texts = ", ".join(query) if isinstance(query, list) else str(query)
|
129
|
+
fetched = self.collection.query(query_texts=query_texts, n_results=limit, where=filter)
|
129
130
|
results = []
|
130
131
|
for i in range(len(fetched["ids"][0])):
|
131
132
|
result = {
|
versionhq/tool/model.py
CHANGED
@@ -43,7 +43,6 @@ class BaseTool(ABC, BaseModel):
|
|
43
43
|
{ "__annotations__": { k: v for k, v in cls._run.__annotations__.items() if k != "return" }},
|
44
44
|
)
|
45
45
|
|
46
|
-
|
47
46
|
@field_validator("properties", mode="before")
|
48
47
|
@classmethod
|
49
48
|
def _default_properties(cls, v: Dict[str, Any]) -> Dict[str, Any]:
|
@@ -81,10 +80,9 @@ class BaseTool(ABC, BaseModel):
|
|
81
80
|
return self
|
82
81
|
|
83
82
|
@abstractmethod
|
84
|
-
def _run(self, *args: Any, **kwargs: Any
|
83
|
+
def _run(self, *args: Any, **kwargs: Any) -> Any:
|
85
84
|
"""any handling"""
|
86
85
|
|
87
|
-
|
88
86
|
@staticmethod
|
89
87
|
def _get_arg_annotations(annotation: type[Any] | None) -> str:
|
90
88
|
if annotation is None:
|
@@ -143,11 +141,9 @@ class BaseTool(ABC, BaseModel):
|
|
143
141
|
return create_model(schema_name, **fields)
|
144
142
|
|
145
143
|
|
146
|
-
|
147
144
|
class Tool(BaseTool):
|
148
145
|
func: Callable = Field(default=None)
|
149
146
|
|
150
|
-
|
151
147
|
@model_validator(mode="after")
|
152
148
|
def validate_func(self) -> Self:
|
153
149
|
if not self.func and not self._run:
|
@@ -0,0 +1,112 @@
|
|
1
|
+
import re
|
2
|
+
import requests
|
3
|
+
import html2text
|
4
|
+
import gzip
|
5
|
+
import http.client
|
6
|
+
import urllib.request
|
7
|
+
from urllib.request import Request
|
8
|
+
from textwrap import dedent
|
9
|
+
from typing import Any, Optional, List, Dict
|
10
|
+
|
11
|
+
from pydantic import Field
|
12
|
+
|
13
|
+
from versionhq.agent.model import Agent
|
14
|
+
from versionhq.tool.model import BaseTool
|
15
|
+
from versionhq._utils.logger import Logger
|
16
|
+
|
17
|
+
|
18
|
+
|
19
|
+
class RagTool(BaseTool):
|
20
|
+
"""A Pydantic class to store a RAG tool object. Inherited from BaseTool"""
|
21
|
+
|
22
|
+
api_key_name: str = Field(default=None)
|
23
|
+
api_endpoint: Optional[str] = Field(default=None)
|
24
|
+
|
25
|
+
url: Optional[str] = Field(default=None, description="url to scrape")
|
26
|
+
headers: Optional[Dict[str, Any]] = Field(default_factory=dict, description="request headers")
|
27
|
+
|
28
|
+
sources: Optional[List[Any]] = Field(default_factory=list, description="indexed data sources")
|
29
|
+
query: Optional[str] = Field(default=None)
|
30
|
+
text: Optional[str] = Field(default=None, description="text data source")
|
31
|
+
|
32
|
+
|
33
|
+
def _sanitize_source_code(self, source_code: str | bytes = None) -> str | None:
|
34
|
+
if not source_code:
|
35
|
+
return None
|
36
|
+
|
37
|
+
if isinstance(source_code, bytes):
|
38
|
+
source_code = source_code.decode('utf-8')
|
39
|
+
|
40
|
+
h = html2text.HTML2Text()
|
41
|
+
h.ignore_links = False
|
42
|
+
text = h.handle(source_code)
|
43
|
+
text = re.sub(r"[^a-zA-Z$0-9\s\n]", "", text)
|
44
|
+
return dedent(text)
|
45
|
+
|
46
|
+
|
47
|
+
def _scrape_url(self, url: str = None) -> str | None:
|
48
|
+
url = url if url else self.url
|
49
|
+
|
50
|
+
if not url:
|
51
|
+
return None
|
52
|
+
|
53
|
+
http.client.HTTPConnection.debuglevel = 1
|
54
|
+
|
55
|
+
try:
|
56
|
+
req = Request(url=url, headers=self.headers, origin_req_host=url, method="GET")
|
57
|
+
res = ""
|
58
|
+
|
59
|
+
with urllib.request.urlopen(req) as url:
|
60
|
+
if url.info().get("Content-Encoding") == "gzip":
|
61
|
+
res = gzip.decompress(url.read())
|
62
|
+
else:
|
63
|
+
res = url.read()
|
64
|
+
|
65
|
+
text = self._sanitize_source_code(source_code=res)
|
66
|
+
return text
|
67
|
+
|
68
|
+
except requests.exceptions.HTTPError as e:
|
69
|
+
Logger().log(level="error", message=f"HTTP error occurred: {str(e)}", color="red")
|
70
|
+
return None
|
71
|
+
|
72
|
+
except Exception as e:
|
73
|
+
Logger().log(level="error", message=f"Error fetching URL {self.api_endpoint}: {str(e)}", color="red")
|
74
|
+
return None
|
75
|
+
|
76
|
+
|
77
|
+
def store_data(self, agent: Agent = None) -> None:
|
78
|
+
"""Stores retrieved data in the storage"""
|
79
|
+
if not agent:
|
80
|
+
return
|
81
|
+
|
82
|
+
text = self.text if self.text else self._scrape_url(self.url)
|
83
|
+
self.text = text
|
84
|
+
knowledge_sources = [*agent.knowledge_sources, str(text), ] if agent.knowledge_sources else [str(text),]
|
85
|
+
agent.update(knowledge_sources=knowledge_sources)
|
86
|
+
|
87
|
+
|
88
|
+
def _run(self, agent: Agent = None, query: str = None) -> List[str]:
|
89
|
+
query = query if query else self.query
|
90
|
+
|
91
|
+
if not query or not agent:
|
92
|
+
text = self.text if self.text else self._scrape_url(self.url)
|
93
|
+
self.text = text
|
94
|
+
return [text,]
|
95
|
+
|
96
|
+
else:
|
97
|
+
results, res = [], []
|
98
|
+
if agent._knowledge:
|
99
|
+
res = agent._knowledge.query(query=[query], limit=5)
|
100
|
+
else:
|
101
|
+
self.store_data(agent=agent)
|
102
|
+
res = agent._knowledge.query(query=[query], limit=5)
|
103
|
+
|
104
|
+
for item in res:
|
105
|
+
if isinstance(item, dict):
|
106
|
+
results.append(item["context"])
|
107
|
+
else:
|
108
|
+
results.append(str(item))
|
109
|
+
return results
|
110
|
+
|
111
|
+
def run(self, *args, **kwargs):
|
112
|
+
return self._run(*args, **kwargs)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.2.
|
3
|
+
Version: 1.2.2.4
|
4
4
|
Summary: An agentic orchestration framework for building agent networks that handle task automation.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -50,7 +50,7 @@ Requires-Dist: werkzeug>=3.1.3
|
|
50
50
|
Requires-Dist: typing
|
51
51
|
Requires-Dist: json-repair
|
52
52
|
Requires-Dist: litellm>=1.55.8
|
53
|
-
Requires-Dist: openai>=1.
|
53
|
+
Requires-Dist: openai>=1.64.0
|
54
54
|
Requires-Dist: composio-openai>=0.6.9
|
55
55
|
Requires-Dist: composio>=0.1.0
|
56
56
|
Requires-Dist: setuptools>=75.6.0
|
@@ -69,7 +69,7 @@ Requires-Dist: matplotlib>=3.10.0
|
|
69
69
|
Provides-Extra: docling
|
70
70
|
Requires-Dist: docling>=2.17.0; extra == "docling"
|
71
71
|
Provides-Extra: mem0ai
|
72
|
-
Requires-Dist: mem0ai>=0.1.
|
72
|
+
Requires-Dist: mem0ai>=0.1.55; extra == "mem0ai"
|
73
73
|
Provides-Extra: pdfplumber
|
74
74
|
Requires-Dist: pdfplumber>=0.11.5; extra == "pdfplumber"
|
75
75
|
Provides-Extra: pandas
|
@@ -78,6 +78,11 @@ Provides-Extra: numpy
|
|
78
78
|
Requires-Dist: numpy>=1.26.4; extra == "numpy"
|
79
79
|
Provides-Extra: pygraphviz
|
80
80
|
Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
|
81
|
+
Provides-Extra: tools
|
82
|
+
Requires-Dist: html2text>=2024.2.26; extra == "tools"
|
83
|
+
Requires-Dist: sec-api>=1.0.28; extra == "tools"
|
84
|
+
Provides-Extra: eval
|
85
|
+
Requires-Dist: scikit-learn>=1.6.1; extra == "eval"
|
81
86
|
|
82
87
|
# Overview
|
83
88
|
|
@@ -1,13 +1,14 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=PqOgmog9JVNxGm2LP1RV5x3D-U7PClfcKFWVAE6dXCM,2980
|
2
2
|
versionhq/_utils/__init__.py,sha256=d-vYVcORZKG-kkLe_fzE8VbViDpAk9DDOKe2fVK25ew,178
|
3
3
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
4
|
+
versionhq/_utils/llm_as_a_judge.py,sha256=RM0oYfoeanuUyUL3Ewl6_8Xn1F5Axd285UMH46kxG1I,2378
|
4
5
|
versionhq/_utils/logger.py,sha256=iHxGjm3BvUo5dHKLU88_pc0Z45wzSHOjyJGQkb7OADk,3255
|
5
6
|
versionhq/_utils/process_config.py,sha256=YTGY_erW335RfceQfzS18YAqq-AAb-iSvKSjN7noD2E,782
|
6
7
|
versionhq/_utils/usage_metrics.py,sha256=xgYGRW3OTuK9EJyi3QYJeYcJl7dL27olcWaLo_7B3JE,2246
|
7
8
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
8
9
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
10
|
versionhq/agent/inhouse_agents.py,sha256=WAbyog-6pKwa8ru9u_KJgD_ViTLv4ZRECks1Znch47E,2638
|
10
|
-
versionhq/agent/model.py,sha256=
|
11
|
+
versionhq/agent/model.py,sha256=xebClhCdx0Xt-_or3Ne_fTh8eOWSDEbi4r3WvGSOYcI,26595
|
11
12
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
12
13
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
13
14
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
|
@@ -28,8 +29,8 @@ versionhq/knowledge/_utils.py,sha256=YWRF8U533cfZes_gZqUvdj-K24MD2ri1R0gjc_aPYyc
|
|
28
29
|
versionhq/knowledge/embedding.py,sha256=KfHc__1THxb5jrg1EMrF-v944RDuIr2hE0l-MtM3Bp0,6826
|
29
30
|
versionhq/knowledge/model.py,sha256=ixH8n5kLtJEp1nPAFYA0piYm-n0nnFDtWFp0r9YEVAs,1787
|
30
31
|
versionhq/knowledge/source.py,sha256=-hEUPtJUHHMx4rUKtiHl19J8xAMw-WVBw34zwa2jZ08,13630
|
31
|
-
versionhq/knowledge/source_docling.py,sha256=
|
32
|
-
versionhq/knowledge/storage.py,sha256=
|
32
|
+
versionhq/knowledge/source_docling.py,sha256=dcu1ITqPXwWZ_lK-6tykEKhhC82eNRTMoWRpxK9Kzls,5441
|
33
|
+
versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
|
33
34
|
versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
35
|
versionhq/llm/llm_vars.py,sha256=wjQK20cKvph6Vq1v71o4d16zBGcHlwq0bzOT_zWno7w,7041
|
35
36
|
versionhq/llm/model.py,sha256=HIBmf8FYV6-cDbZK1ZBu6z3dmF0ZUbKbCelfwxMlgyY,17177
|
@@ -58,10 +59,11 @@ versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-
|
|
58
59
|
versionhq/tool/composio_tool.py,sha256=IATfsEnF_1RPJyGtPBmAtEJh5XPcgDHpyG3SUR461Og,8572
|
59
60
|
versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtgpqOzKZQ,1843
|
60
61
|
versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
|
61
|
-
versionhq/tool/model.py,sha256=
|
62
|
+
versionhq/tool/model.py,sha256=Nc2f9frTK5tH4kh6EeEAk1Fi1w19kEXLOcsBwHCS1a4,12189
|
63
|
+
versionhq/tool/rag_tool.py,sha256=qm_nDWs-WyDvrxZeZAL2AkswfUWGPZS4zybz0o6wOFI,3653
|
62
64
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
63
|
-
versionhq-1.2.2.
|
64
|
-
versionhq-1.2.2.
|
65
|
-
versionhq-1.2.2.
|
66
|
-
versionhq-1.2.2.
|
67
|
-
versionhq-1.2.2.
|
65
|
+
versionhq-1.2.2.4.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
66
|
+
versionhq-1.2.2.4.dist-info/METADATA,sha256=fX-azucXgCg-Fm05snnLT02ttDz5u10xMOaBjGmpkp4,22213
|
67
|
+
versionhq-1.2.2.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
68
|
+
versionhq-1.2.2.4.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
69
|
+
versionhq-1.2.2.4.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|