versionhq 1.2.2.2__py3-none-any.whl → 1.2.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -17,10 +17,11 @@ from versionhq.clients.workflow.model import MessagingWorkflow, MessagingCompone
17
17
  from versionhq.knowledge.model import Knowledge, KnowledgeStorage
18
18
  from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
19
19
  from versionhq.knowledge.source_docling import DoclingSource
20
- from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType
20
+ from versionhq.task_graph.model import TaskStatus, TaskGraph, Node, Edge, DependencyType, Condition, ConditionType
21
21
  from versionhq.task.model import Task, TaskOutput, ResponseField, TaskExecutionType
22
22
  from versionhq.task.evaluation import Evaluation, EvaluationItem
23
23
  from versionhq.tool.model import Tool, ToolSet
24
+ from versionhq.tool.rag_tool import RagTool
24
25
  from versionhq.tool.cache_handler import CacheHandler
25
26
  from versionhq.tool.tool_handler import ToolHandler
26
27
  from versionhq.tool.composio_tool import ComposioHandler
@@ -31,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
31
32
  from versionhq.task_graph.draft import workflow
32
33
 
33
34
 
34
- __version__ = "1.2.2.2"
35
+ __version__ = "1.2.2.4"
35
36
  __all__ = [
36
37
  "Agent",
37
38
 
@@ -67,6 +68,8 @@ __all__ = [
67
68
  "Node",
68
69
  "Edge",
69
70
  "DependencyType",
71
+ "Condition",
72
+ "ConditionType",
70
73
 
71
74
  "Task",
72
75
  "TaskOutput",
@@ -78,6 +81,7 @@ __all__ = [
78
81
 
79
82
  "Tool",
80
83
  "ToolSet",
84
+ "RagTool",
81
85
  "CacheHandler",
82
86
  "ToolHandler",
83
87
  "ComposioHandler",
@@ -0,0 +1,65 @@
1
+ import json
2
+ import numpy as np
3
+ from sklearn.metrics import precision_score, recall_score, roc_auc_score, cohen_kappa_score
4
+ from typing import List, Tuple, Dict, Any
5
+ from pathlib import Path
6
+
7
+
8
+ class LLMJudge:
9
+
10
+ class MockLLM:
11
+ def _generate(self, prompt: str) -> str:
12
+ return str(np.random.random())
13
+
14
+
15
+ def __init__(self, model: MockLLM = None):
16
+ self.model = model if model else self.MockLLM()
17
+
18
+
19
+ def judge_summary(self, original_text: str, summary: str) -> float:
20
+ prompt = f"""Evaluate the quality of the following summary on a scale of 0 to 1, where 0 is poor and 1 is excellent.
21
+ Consider accuracy, completeness, and conciseness.
22
+ Original text: {original_text}
23
+ Summary: {summary}
24
+ Quality score:"""
25
+ response = self.model._generate(prompt)
26
+ score = float(response.strip())
27
+ return score
28
+
29
+
30
+ def generate_summaries(file_path: str, data: List[Dict[str, Any]] = None, summarizer: Any = None) -> List[Tuple[str, str, float]]:
31
+ """Generates a list of tuple with an original text, summary text, and human judge score."""
32
+ if not data:
33
+ with open(file_path, 'r') as file:
34
+ data = json.load(file)
35
+ summaries = []
36
+ for item in data:
37
+ original_text = item['text']
38
+ summary = summarizer.summarize(original_text)
39
+ human_score = item['human_score']
40
+ summaries.append((original_text, summary, human_score))
41
+
42
+ return summaries
43
+
44
+
45
+ def validate(judge: LLMJudge, data: List[Tuple[str, str, float]], threshold: float = 0.5):
46
+ human_scores = []
47
+ predicted_scores = []
48
+
49
+ for original_text, summary, human_score in data:
50
+ predicted_score = judge.judge_summary(original_text=original_text, summary=summary)
51
+ human_scores.append(human_score)
52
+ predicted_scores.append(predicted_score)
53
+
54
+ human_binary = [1 if score >= threshold else 0 for score in human_scores]
55
+ pred_binary = [1 if score >= threshold else 0 for score in predicted_scores]
56
+ precision = precision_score(human_binary, pred_binary, zero_division=0)
57
+ recall = recall_score(human_binary, pred_binary, zero_division=0)
58
+ auroc = roc_auc_score(human_binary, pred_binary, average='weighted')
59
+ kappa = cohen_kappa_score(human_binary, pred_binary)
60
+ return {
61
+ "precision": precision,
62
+ "recall": recall,
63
+ "auroc": auroc,
64
+ "cohen_kappa": kappa
65
+ }
versionhq/agent/model.py CHANGED
@@ -8,7 +8,7 @@ from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_val
8
8
  from pydantic_core import PydanticCustomError
9
9
 
10
10
  from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME, PROVIDERS
11
- from versionhq.tool.model import Tool, ToolSet
11
+ from versionhq.tool.model import Tool, ToolSet, BaseTool
12
12
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
13
13
  from versionhq.memory.contextual_memory import ContextualMemory
14
14
  from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
@@ -39,7 +39,7 @@ class Agent(BaseModel):
39
39
  goal: str = Field(description="concise goal of the agent (details are set in the Task instance)")
40
40
  backstory: Optional[str] = Field(default=None, description="developer prompt to the llm")
41
41
  skillsets: Optional[List[str]] = Field(default_factory=list)
42
- tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool] | Any]] = Field(default_factory=list)
42
+ tools: Optional[List[Any]] = Field(default_factory=list)
43
43
 
44
44
  # knowledge
45
45
  knowledge_sources: Optional[List[BaseKnowledgeSource | Any]] = Field(default=None)
@@ -122,29 +122,41 @@ class Agent(BaseModel):
122
122
  """
123
123
  Similar to the LLM set up, when the agent has tools, we will declare them using the Tool class.
124
124
  """
125
- if not self.tools:
126
- pass
125
+ from versionhq.tool.rag_tool import RagTool
127
126
 
128
- else:
129
- tool_list = []
127
+ if not self.tools:
128
+ return self
130
129
 
131
- for item in self.tools:
132
- if isinstance(item, Tool) or isinstance(item, ToolSet):
130
+ tool_list = []
131
+ for item in self.tools:
132
+ match item:
133
+ case RagTool() | BaseTool():
133
134
  tool_list.append(item)
134
135
 
135
- elif isinstance(item, dict) and "func" in item:
136
- tool = Tool(**item)
137
- tool_list.append(tool)
136
+ case Tool():
137
+ if item.func is not None:
138
+ tool_list.append(item)
138
139
 
139
- elif type(item) is Tool and hasattr(item, "func"):
140
- tool_list.append(item)
140
+ case ToolSet():
141
+ if item.tool and item.tool.func is not None:
142
+ tool_list.append(item)
141
143
 
142
- else:
143
- Logger(**self.loger_config, filename=self.key).log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
144
- raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
144
+ case dict():
145
+ if "func" in item:
146
+ tool = Tool(func=item["func"])
147
+ for k, v in item.items():
148
+ if k in Tool.model_fields.keys() and k != "func" and v is not None:
149
+ setattr(tool, k, v)
150
+ tool_list.append(tool)
145
151
 
146
- self.tools = tool_list
152
+ case _:
153
+ if item.__base__ == BaseTool or item.__base__ == RagTool or item.__base__ == Tool:
154
+ tool_list.append(item)
155
+ else:
156
+ Logger(**self._logger_config, filename=self.key).log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
157
+ raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
147
158
 
159
+ self.tools = tool_list
148
160
  return self
149
161
 
150
162
 
@@ -158,7 +170,7 @@ class Agent(BaseModel):
158
170
  from versionhq.agent.TEMPLATES.Backstory import BACKSTORY_FULL, BACKSTORY_SHORT
159
171
  backstory = ""
160
172
  skills = ", ".join([item for item in self.skillsets]) if self.skillsets else ""
161
- tools = ", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else ""
173
+ tools = ", ".join([item.name for item in self.tools if hasattr(item, "name") and item.name is not None]) if self.tools else ""
162
174
  role = self.role.lower()
163
175
  goal = self.goal.lower()
164
176
 
@@ -199,7 +211,7 @@ class Agent(BaseModel):
199
211
  if isinstance(item, BaseKnowledgeSource):
200
212
  knowledge_sources.append(item)
201
213
 
202
- elif isinstance(item, str) and "http" in item:
214
+ elif isinstance(item, str) and "http" in item and DoclingSource._validate_url(url=item) == True:
203
215
  docling_fp.append(item)
204
216
 
205
217
  elif isinstance(item, str):
@@ -223,8 +235,8 @@ class Agent(BaseModel):
223
235
 
224
236
  self._knowledge = Knowledge(sources=knowledge_sources, embedder_config=self.embedder_config, collection_name=collection_name)
225
237
 
226
- except:
227
- Logger(**self._logger_config, filename=self.key).log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
238
+ except Exception as e:
239
+ Logger(**self._logger_config, filename=self.key).log(level="warning", message=f"We cannot find the format for the source. Add BaseKnowledgeSource objects instead. {str(e)}", color="yellow")
228
240
 
229
241
  return self
230
242
 
@@ -506,10 +518,13 @@ class Agent(BaseModel):
506
518
  """
507
519
 
508
520
  from versionhq.task.model import Task
521
+ from versionhq.tool.rag_tool import RagTool
509
522
  from versionhq.knowledge._utils import extract_knowledge_context
510
523
 
511
524
  task: InstanceOf[Task] = task
512
- tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
525
+ all_tools: Optional[List[Tool | ToolSet | RagTool | Type[BaseTool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
526
+ rag_tools: Optional[List[RagTool]] = [item for item in all_tools if isinstance(item, RagTool)] if all_tools else None
527
+ tools: Optional[List[Tool | ToolSet | RagTool | Type[BaseTool]]] = [item for item in all_tools if not isinstance(item, RagTool)] if all_tools else None
513
528
 
514
529
  if self.max_rpm and self._rpm_controller:
515
530
  self._rpm_controller._reset_request_count()
@@ -523,6 +538,12 @@ class Agent(BaseModel):
523
538
  if agent_knowledge_context:
524
539
  task_prompt += agent_knowledge_context
525
540
 
541
+ if rag_tools:
542
+ for item in rag_tools:
543
+ rag_tool_context = item.run(agent=self, query=task.description)
544
+ if rag_tool_context:
545
+ task_prompt += ",".join(rag_tool_context) if isinstance(rag_tool_context, list) else str(rag_tool_context)
546
+
526
547
  if self.with_memory == True:
527
548
  contextual_memory = ContextualMemory(
528
549
  memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
@@ -533,7 +554,6 @@ class Agent(BaseModel):
533
554
  if memory.strip() != "":
534
555
  task_prompt += memory.strip()
535
556
 
536
-
537
557
  ## comment out for now
538
558
  # if self.networks and self.networks._train:
539
559
  # task_prompt = self._training_handler(task_prompt=task_prompt)
@@ -13,7 +13,7 @@ from pydantic_core import PydanticCustomError, core_schema
13
13
 
14
14
  from versionhq.agent.model import Agent
15
15
  from versionhq.task.model import Task, TaskOutput, TaskExecutionType, ResponseField
16
- from versionhq.task_graph.model import TaskGraph, Node, Edge, TaskStatus, DependencyType
16
+ from versionhq.task_graph.model import TaskGraph, Node, Edge, TaskStatus, DependencyType, Condition
17
17
  from versionhq._utils.logger import Logger
18
18
  # from versionhq.recording.usage_metrics import UsageMetrics
19
19
 
@@ -81,7 +81,7 @@ class AgentNetwork(BaseModel):
81
81
  # task execution rules
82
82
  prompt_file: str = Field(default="", description="absolute file path to the prompt file that stores jsonified prompts")
83
83
  process: TaskHandlingProcess = Field(default=TaskHandlingProcess.SEQUENTIAL)
84
- consent_trigger: Optional[Callable] = Field(default=None, description="returns bool")
84
+ consent_trigger: Optional[Callable | Condition] = Field(default=None, description="returns bool")
85
85
 
86
86
  # callbacks
87
87
  pre_launch_callbacks: List[Callable[..., Any]]= Field(default_factory=list, description="list of callback funcs called before the network launch")
@@ -111,6 +111,9 @@ class AgentNetwork(BaseModel):
111
111
  Logger().log(level="error", message="Need to define the consent trigger function that returns bool", color="red")
112
112
  raise PydanticCustomError("invalid_process", "Need to define the consent trigger function that returns bool", {})
113
113
 
114
+
115
+ if self.consent_trigger and isinstance(self.consent_trigger, Callable):
116
+ self.consent_trigger = Condition(methods={"0": self.consent_trigger})
114
117
  return self
115
118
 
116
119
 
@@ -277,6 +280,7 @@ class AgentNetwork(BaseModel):
277
280
  task_graph = TaskGraph(nodes={node.identifier: node for node in nodes})
278
281
 
279
282
  for i in range(0, len(nodes) - 1):
283
+ condition = self.consent_trigger if isinstance(self.consent_trigger, Condition) else Condition(methods={"0": self.consent_trigger }) if self.consent_trigger else None
280
284
  task_graph.add_edge(
281
285
  source=nodes[i].identifier,
282
286
  target=nodes[i+1].identifier,
@@ -284,7 +288,7 @@ class AgentNetwork(BaseModel):
284
288
  weight=3 if nodes[i].task in self.manager_tasks else 1,
285
289
  dependency_type=DependencyType.FINISH_TO_START if self.process == TaskHandlingProcess.HIERARCHY else DependencyType.START_TO_START,
286
290
  required=bool(self.process == TaskHandlingProcess.CONSENSUAL),
287
- condition=self.consent_trigger,
291
+ condition=condition,
288
292
  data_transfer=bool(self.process == TaskHandlingProcess.HIERARCHY),
289
293
  )
290
294
  )
@@ -83,7 +83,8 @@ class DoclingSource(BaseKnowledgeSource):
83
83
  yield chunk.text
84
84
 
85
85
 
86
- def _validate_url(self, url: str) -> bool:
86
+ @staticmethod
87
+ def _validate_url(url: str) -> bool:
87
88
  try:
88
89
  result = urlparse(url)
89
90
  return all(
@@ -125,7 +125,8 @@ class KnowledgeStorage(BaseKnowledgeStorage):
125
125
  def search(self, query: List[str], limit: int = 3, filter: Optional[dict] = None, score_threshold: float = 0.35) -> List[Dict[str, Any]]:
126
126
  with suppress_logging():
127
127
  if self.collection:
128
- fetched = self.collection.query(query_texts=query, n_results=limit, where=filter)
128
+ query_texts = ", ".join(query) if isinstance(query, list) else str(query)
129
+ fetched = self.collection.query(query_texts=query_texts, n_results=limit, where=filter)
129
130
  results = []
130
131
  for i in range(len(fetched["ids"][0])):
131
132
  result = {
@@ -103,7 +103,7 @@ class Evaluation(BaseModel):
103
103
  new_res = filter(lambda x: "score" in x["metadata"], res)
104
104
  new_res = list(new_res)
105
105
  new_res.sort(key=lambda x: x["metadata"]["score"], reverse=True)
106
- if new_res[0]['data']:
106
+ if new_res and new_res[0]['data']:
107
107
  c = new_res[0]['data']['task_output']
108
108
  w = new_res[len(new_res)-1]['data']['task_output'] if new_res[len(new_res)-1]['metadata']['score'] < new_res[0]['metadata']['score'] else ""
109
109
  shot_prompt = SHOTS.format(c=c, w=w)
@@ -8,14 +8,63 @@ import matplotlib.pyplot as plt
8
8
  from abc import ABC
9
9
  from concurrent.futures import Future
10
10
  from typing import List, Any, Optional, Callable, Dict, Type, Tuple
11
+ from typing_extensions import Self
11
12
 
12
- from pydantic import BaseModel, InstanceOf, Field, UUID4, field_validator
13
+ from pydantic import BaseModel, InstanceOf, Field, UUID4, field_validator, model_validator
13
14
  from pydantic_core import PydanticCustomError
14
15
 
15
16
  from versionhq.agent.model import Agent
16
- from versionhq.task.model import Task, TaskOutput
17
+ from versionhq.task.model import Task, TaskOutput, Evaluation
17
18
  from versionhq._utils.logger import Logger
18
19
 
20
+ class ConditionType(enum.Enum):
21
+ AND = 1
22
+ OR = 2
23
+
24
+
25
+ class Condition(BaseModel):
26
+ """
27
+ A Pydantic class to store edge conditions and their args and types.
28
+ """
29
+ # edge_id: UUID4 = uuid.uuid4()
30
+ methods: Dict[str, Callable | "Condition"] = dict()
31
+ args: Dict[str, Dict[str, Any]] = dict()
32
+ type: ConditionType = None
33
+
34
+ @model_validator(mode="after")
35
+ def validate_type(self) -> Self:
36
+ if len(self.methods.keys()) > 1 and self.type is None:
37
+ raise PydanticCustomError("missing_type", "Missing type", {})
38
+ return self
39
+
40
+ def _execute_method(self, key: str, method: Callable | "Condition") -> bool:
41
+ match method:
42
+ case Condition():
43
+ return method.condition_met()
44
+ case _:
45
+ args = self.args[key] if key in self.args else None
46
+ res = method(**args) if args else method()
47
+ return res
48
+
49
+
50
+ def condition_met(self) -> bool:
51
+ if not self.methods:
52
+ return True
53
+
54
+ if len(self.methods) == 1:
55
+ for k, v in self.methods.items():
56
+ return self._execute_method(key=k, method=v)
57
+
58
+ else:
59
+ cond_list = []
60
+ for k, v in self.methods.items():
61
+ res = self._execute_method(key=k, method=v)
62
+ if self.type == ConditionType.OR and res == True:
63
+ return True
64
+ elif self.type == ConditionType.AND and res == False:
65
+ return False
66
+ return bool(len([item for item in cond_list if item == True]) == len(cond_list))
67
+
19
68
 
20
69
  class TaskStatus(enum.Enum):
21
70
  """
@@ -52,7 +101,6 @@ class Node(BaseModel):
52
101
  assigned_to: InstanceOf[Agent] = Field(default=None)
53
102
  status: TaskStatus = Field(default=TaskStatus.NOT_STARTED)
54
103
 
55
-
56
104
  @field_validator("id", mode="before")
57
105
  @classmethod
58
106
  def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
@@ -83,7 +131,6 @@ class Node(BaseModel):
83
131
  self.status = TaskStatus.COMPLETED if res else TaskStatus.ERROR
84
132
  return res
85
133
 
86
-
87
134
  @property
88
135
  def in_degrees(self) -> int:
89
136
  return len(self.in_degree_nodes) if self.in_degree_nodes else 0
@@ -101,6 +148,11 @@ class Node(BaseModel):
101
148
  """Unique identifier of the node"""
102
149
  return f"{str(self.id)}"
103
150
 
151
+ @property
152
+ def label(self) -> str:
153
+ """Human friendly label for visualization"""
154
+ return self.task.name if self.task.name else self.task.description[0: 8]
155
+
104
156
  def __str__(self):
105
157
  if self.task:
106
158
  return f"{self.identifier}: {self.task.name if self.task.name else self.task.description[0: 12]}"
@@ -110,19 +162,17 @@ class Node(BaseModel):
110
162
 
111
163
  class Edge(BaseModel):
112
164
  """
113
- A class to store an edge object that connects source and target nodes.
165
+ A Pydantic class to store an edge object that connects source and target nodes.
114
166
  """
115
167
 
116
168
  source: Node = Field(default=None)
117
169
  target: Node = Field(default=None)
118
170
 
119
171
  description: Optional[str] = Field(default=None)
120
- weight: Optional[float | int] = Field(default=1, description="est. duration for the task execution or respective weight of the target node (1 low - 10 high priority)")
121
-
172
+ weight: Optional[float | int] = Field(default=1, description="est. duration of the task execution or respective weight of the target node at any scale i.e., 1 low - 10 high priority")
122
173
  dependency_type: DependencyType = Field(default=DependencyType.FINISH_TO_START)
123
174
  required: bool = Field(default=True, description="whether to consider the source's status")
124
- condition: Optional[Callable] = Field(default=None, description="conditional function to start executing the dependency")
125
- condition_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict)
175
+ condition: Optional[Condition] = Field(default=None)
126
176
 
127
177
  lag: Optional[float | int] = Field(default=None, description="lag time (sec) from the dependency met to the task execution")
128
178
  data_transfer: bool = Field(default=True, description="whether the data transfer is required. by default transfer plane text output from in-degree nodes as context")
@@ -150,35 +200,36 @@ class Edge(BaseModel):
150
200
  False Given Condition True (predecessor status irrelevant) Yes
151
201
  """
152
202
 
153
- if not self.required:
154
- return self.condition(**self.condition_kwargs) if self.condition else True
203
+ if self.required == False:
204
+ return self.condition.condition_met() if self.condition else True
205
+ # return self.condition(**self.condition_kwargs) if self.condition else True
155
206
 
156
207
  match self.dependency_type:
157
208
  case DependencyType.FINISH_TO_START:
158
209
  """target starts after source finishes"""
159
210
  if not self.source or self.source.status == TaskStatus.COMPLETED:
160
- return self.condition(**self.conditon_kwargs) if self.condition else True
211
+ return self.condition.condition_met() if self.condition else True
161
212
  else:
162
213
  return False
163
214
 
164
215
  case DependencyType.START_TO_START:
165
216
  """target starts when source starts"""
166
217
  if not self.source or self.source.status != TaskStatus.NOT_STARTED:
167
- return self.condition(**self.conditon_kwargs) if self.condition else True
218
+ return self.condition.condition_met() if self.condition else True
168
219
  else:
169
220
  return False
170
221
 
171
222
  case DependencyType.FINISH_TO_FINISH:
172
223
  """target finish when source start"""
173
224
  if not self.source or self.source.status != TaskStatus.COMPLETED:
174
- return self.condition(**self.conditon_kwargs) if self.condition else True
225
+ return self.condition.condition_met() if self.condition else True
175
226
  else:
176
227
  return False
177
228
 
178
229
  case DependencyType.START_TO_FINISH:
179
230
  """target finishes when source start"""
180
231
  if not self.source or self.source.status == TaskStatus.IN_PROGRESS:
181
- return self.condition(**self.conditon_kwargs) if self.condition else True
232
+ return self.condition.condition_met() if self.condition else True
182
233
  else:
183
234
  return False
184
235
 
@@ -204,6 +255,11 @@ class Edge(BaseModel):
204
255
  res = self.target.handle_task_execution(context=context, response_format=response_format)
205
256
  return res
206
257
 
258
+ @property
259
+ def label(self):
260
+ """Human friendly label for visualization."""
261
+ return f"e{self.source.label}-{self.target.label}"
262
+
207
263
 
208
264
  class Graph(ABC, BaseModel):
209
265
  """
@@ -383,7 +439,7 @@ class TaskGraph(Graph):
383
439
  edge = Edge()
384
440
  for k in Edge.model_fields.keys():
385
441
  v = edge_attributes.get(k, None)
386
- if v:
442
+ if v is not None:
387
443
  setattr(edge, k, v)
388
444
  else:
389
445
  pass
@@ -564,7 +620,7 @@ class TaskGraph(Graph):
564
620
  return res, self.outputs
565
621
 
566
622
 
567
- def evaluate(self, eval_criteria: List[str] = None):
623
+ def evaluate(self, eval_criteria: List[str] = None) -> Evaluation | None:
568
624
  """Evaluates the conclusion based on the given eval criteria."""
569
625
 
570
626
  if not isinstance(self.concl, TaskOutput):
versionhq/tool/model.py CHANGED
@@ -43,7 +43,6 @@ class BaseTool(ABC, BaseModel):
43
43
  { "__annotations__": { k: v for k, v in cls._run.__annotations__.items() if k != "return" }},
44
44
  )
45
45
 
46
-
47
46
  @field_validator("properties", mode="before")
48
47
  @classmethod
49
48
  def _default_properties(cls, v: Dict[str, Any]) -> Dict[str, Any]:
@@ -81,10 +80,9 @@ class BaseTool(ABC, BaseModel):
81
80
  return self
82
81
 
83
82
  @abstractmethod
84
- def _run(self, *args: Any, **kwargs: Any,) -> Any:
83
+ def _run(self, *args: Any, **kwargs: Any) -> Any:
85
84
  """any handling"""
86
85
 
87
-
88
86
  @staticmethod
89
87
  def _get_arg_annotations(annotation: type[Any] | None) -> str:
90
88
  if annotation is None:
@@ -143,11 +141,9 @@ class BaseTool(ABC, BaseModel):
143
141
  return create_model(schema_name, **fields)
144
142
 
145
143
 
146
-
147
144
  class Tool(BaseTool):
148
145
  func: Callable = Field(default=None)
149
146
 
150
-
151
147
  @model_validator(mode="after")
152
148
  def validate_func(self) -> Self:
153
149
  if not self.func and not self._run:
@@ -0,0 +1,112 @@
1
+ import re
2
+ import requests
3
+ import html2text
4
+ import gzip
5
+ import http.client
6
+ import urllib.request
7
+ from urllib.request import Request
8
+ from textwrap import dedent
9
+ from typing import Any, Optional, List, Dict
10
+
11
+ from pydantic import Field
12
+
13
+ from versionhq.agent.model import Agent
14
+ from versionhq.tool.model import BaseTool
15
+ from versionhq._utils.logger import Logger
16
+
17
+
18
+
19
+ class RagTool(BaseTool):
20
+ """A Pydantic class to store a RAG tool object. Inherited from BaseTool"""
21
+
22
+ api_key_name: str = Field(default=None)
23
+ api_endpoint: Optional[str] = Field(default=None)
24
+
25
+ url: Optional[str] = Field(default=None, description="url to scrape")
26
+ headers: Optional[Dict[str, Any]] = Field(default_factory=dict, description="request headers")
27
+
28
+ sources: Optional[List[Any]] = Field(default_factory=list, description="indexed data sources")
29
+ query: Optional[str] = Field(default=None)
30
+ text: Optional[str] = Field(default=None, description="text data source")
31
+
32
+
33
+ def _sanitize_source_code(self, source_code: str | bytes = None) -> str | None:
34
+ if not source_code:
35
+ return None
36
+
37
+ if isinstance(source_code, bytes):
38
+ source_code = source_code.decode('utf-8')
39
+
40
+ h = html2text.HTML2Text()
41
+ h.ignore_links = False
42
+ text = h.handle(source_code)
43
+ text = re.sub(r"[^a-zA-Z$0-9\s\n]", "", text)
44
+ return dedent(text)
45
+
46
+
47
+ def _scrape_url(self, url: str = None) -> str | None:
48
+ url = url if url else self.url
49
+
50
+ if not url:
51
+ return None
52
+
53
+ http.client.HTTPConnection.debuglevel = 1
54
+
55
+ try:
56
+ req = Request(url=url, headers=self.headers, origin_req_host=url, method="GET")
57
+ res = ""
58
+
59
+ with urllib.request.urlopen(req) as url:
60
+ if url.info().get("Content-Encoding") == "gzip":
61
+ res = gzip.decompress(url.read())
62
+ else:
63
+ res = url.read()
64
+
65
+ text = self._sanitize_source_code(source_code=res)
66
+ return text
67
+
68
+ except requests.exceptions.HTTPError as e:
69
+ Logger().log(level="error", message=f"HTTP error occurred: {str(e)}", color="red")
70
+ return None
71
+
72
+ except Exception as e:
73
+ Logger().log(level="error", message=f"Error fetching URL {self.api_endpoint}: {str(e)}", color="red")
74
+ return None
75
+
76
+
77
+ def store_data(self, agent: Agent = None) -> None:
78
+ """Stores retrieved data in the storage"""
79
+ if not agent:
80
+ return
81
+
82
+ text = self.text if self.text else self._scrape_url(self.url)
83
+ self.text = text
84
+ knowledge_sources = [*agent.knowledge_sources, str(text), ] if agent.knowledge_sources else [str(text),]
85
+ agent.update(knowledge_sources=knowledge_sources)
86
+
87
+
88
+ def _run(self, agent: Agent = None, query: str = None) -> List[str]:
89
+ query = query if query else self.query
90
+
91
+ if not query or not agent:
92
+ text = self.text if self.text else self._scrape_url(self.url)
93
+ self.text = text
94
+ return [text,]
95
+
96
+ else:
97
+ results, res = [], []
98
+ if agent._knowledge:
99
+ res = agent._knowledge.query(query=[query], limit=5)
100
+ else:
101
+ self.store_data(agent=agent)
102
+ res = agent._knowledge.query(query=[query], limit=5)
103
+
104
+ for item in res:
105
+ if isinstance(item, dict):
106
+ results.append(item["context"])
107
+ else:
108
+ results.append(str(item))
109
+ return results
110
+
111
+ def run(self, *args, **kwargs):
112
+ return self._run(*args, **kwargs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.2.2
3
+ Version: 1.2.2.4
4
4
  Summary: An agentic orchestration framework for building agent networks that handle task automation.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -50,7 +50,7 @@ Requires-Dist: werkzeug>=3.1.3
50
50
  Requires-Dist: typing
51
51
  Requires-Dist: json-repair
52
52
  Requires-Dist: litellm>=1.55.8
53
- Requires-Dist: openai>=1.57.0
53
+ Requires-Dist: openai>=1.64.0
54
54
  Requires-Dist: composio-openai>=0.6.9
55
55
  Requires-Dist: composio>=0.1.0
56
56
  Requires-Dist: setuptools>=75.6.0
@@ -69,7 +69,7 @@ Requires-Dist: matplotlib>=3.10.0
69
69
  Provides-Extra: docling
70
70
  Requires-Dist: docling>=2.17.0; extra == "docling"
71
71
  Provides-Extra: mem0ai
72
- Requires-Dist: mem0ai>=0.1.48; extra == "mem0ai"
72
+ Requires-Dist: mem0ai>=0.1.55; extra == "mem0ai"
73
73
  Provides-Extra: pdfplumber
74
74
  Requires-Dist: pdfplumber>=0.11.5; extra == "pdfplumber"
75
75
  Provides-Extra: pandas
@@ -78,6 +78,11 @@ Provides-Extra: numpy
78
78
  Requires-Dist: numpy>=1.26.4; extra == "numpy"
79
79
  Provides-Extra: pygraphviz
80
80
  Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
81
+ Provides-Extra: tools
82
+ Requires-Dist: html2text>=2024.2.26; extra == "tools"
83
+ Requires-Dist: sec-api>=1.0.28; extra == "tools"
84
+ Provides-Extra: eval
85
+ Requires-Dist: scikit-learn>=1.6.1; extra == "eval"
81
86
 
82
87
  # Overview
83
88
 
@@ -1,20 +1,21 @@
1
- versionhq/__init__.py,sha256=vGvxfqX6PuindBNVOX_OoewD27MqktESYpdzvLvkX_U,2857
1
+ versionhq/__init__.py,sha256=PqOgmog9JVNxGm2LP1RV5x3D-U7PClfcKFWVAE6dXCM,2980
2
2
  versionhq/_utils/__init__.py,sha256=d-vYVcORZKG-kkLe_fzE8VbViDpAk9DDOKe2fVK25ew,178
3
3
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
4
+ versionhq/_utils/llm_as_a_judge.py,sha256=RM0oYfoeanuUyUL3Ewl6_8Xn1F5Axd285UMH46kxG1I,2378
4
5
  versionhq/_utils/logger.py,sha256=iHxGjm3BvUo5dHKLU88_pc0Z45wzSHOjyJGQkb7OADk,3255
5
6
  versionhq/_utils/process_config.py,sha256=YTGY_erW335RfceQfzS18YAqq-AAb-iSvKSjN7noD2E,782
6
7
  versionhq/_utils/usage_metrics.py,sha256=xgYGRW3OTuK9EJyi3QYJeYcJl7dL27olcWaLo_7B3JE,2246
7
8
  versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
8
9
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
10
  versionhq/agent/inhouse_agents.py,sha256=WAbyog-6pKwa8ru9u_KJgD_ViTLv4ZRECks1Znch47E,2638
10
- versionhq/agent/model.py,sha256=cYplMaQXHocFeZGZ1WIZ5npZ_Edokj3qFPBE9PiBxHw,25285
11
+ versionhq/agent/model.py,sha256=xebClhCdx0Xt-_or3Ne_fTh8eOWSDEbi4r3WvGSOYcI,26595
11
12
  versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
12
13
  versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
13
14
  versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
14
15
  versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
16
  versionhq/agent_network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
17
  versionhq/agent_network/formation.py,sha256=CYKNUeKC392wW3leIDIAaGiKADSsumC_vTe0VOnNwRs,7901
17
- versionhq/agent_network/model.py,sha256=lr63cmH7ecQrfVAtnN44mtijrnOsfKe-8xOVNBXv-3Q,15696
18
+ versionhq/agent_network/model.py,sha256=aSGZf-ZLi4I2Da9-RW8CfZ32hk9MN_RIT9Cl4xvHlE8,16050
18
19
  versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
20
  versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
21
  versionhq/clients/customer/__init__.py,sha256=-YXh1FQfvpfLacK8SUC7bD7Wx_eIEi4yrkCC_cUasFg,217
@@ -28,8 +29,8 @@ versionhq/knowledge/_utils.py,sha256=YWRF8U533cfZes_gZqUvdj-K24MD2ri1R0gjc_aPYyc
28
29
  versionhq/knowledge/embedding.py,sha256=KfHc__1THxb5jrg1EMrF-v944RDuIr2hE0l-MtM3Bp0,6826
29
30
  versionhq/knowledge/model.py,sha256=ixH8n5kLtJEp1nPAFYA0piYm-n0nnFDtWFp0r9YEVAs,1787
30
31
  versionhq/knowledge/source.py,sha256=-hEUPtJUHHMx4rUKtiHl19J8xAMw-WVBw34zwa2jZ08,13630
31
- versionhq/knowledge/source_docling.py,sha256=OUdWUZ6CCaddvmOKNYpYVzXCzqy9kwuTqsl0supT6GI,5429
32
- versionhq/knowledge/storage.py,sha256=7oxCg3W9mFjYH1YmuH9kFtTbNxquzYFjuUjd_TlsB9E,8170
32
+ versionhq/knowledge/source_docling.py,sha256=dcu1ITqPXwWZ_lK-6tykEKhhC82eNRTMoWRpxK9Kzls,5441
33
+ versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
33
34
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
35
  versionhq/llm/llm_vars.py,sha256=wjQK20cKvph6Vq1v71o4d16zBGcHlwq0bzOT_zWno7w,7041
35
36
  versionhq/llm/model.py,sha256=HIBmf8FYV6-cDbZK1ZBu6z3dmF0ZUbKbCelfwxMlgyY,17177
@@ -44,7 +45,7 @@ versionhq/storage/rag_storage.py,sha256=bS2eE874obarYl-4hT6ZWYWTRsqtfuGpKgKzERmM
44
45
  versionhq/storage/task_output_storage.py,sha256=M8vInLJ5idGAq17w1juHKXtyPyF-B-rK_P8UcqD-Px8,5357
45
46
  versionhq/storage/utils.py,sha256=r5ghA_ktdR2IuzlzKqZYCjsNxztEMzyhWLneA4cFuWY,748
46
47
  versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
- versionhq/task/evaluation.py,sha256=mHLNabuvSbi9FGfblomr41vlc3quTHsugH3-3qQmcDw,4461
48
+ versionhq/task/evaluation.py,sha256=iRLzppqwKaiGpbsr9gMbf6T7NQe6rxTA6OBcWhmiCKs,4473
48
49
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
49
50
  versionhq/task/model.py,sha256=8qBxRgqMs_b8IZA45_gAU1uc5S5w9hdnyRG-Vwm8bVM,28901
50
51
  versionhq/task/structured_response.py,sha256=4q-hQPu7oMMHHXEzh9YW4SJ7N5eCZ7OfZ65juyl_jCI,5000
@@ -52,16 +53,17 @@ versionhq/task/TEMPLATES/Description.py,sha256=EkwJHc65G32MjWyn3rcp0ATmMaVPHuYKa
52
53
  versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
54
  versionhq/task_graph/colors.py,sha256=naJCx4Vho4iuJtbW8USUXb-M5uYvd5ds2p8qbjUfRus,669
54
55
  versionhq/task_graph/draft.py,sha256=l18XacRsbDhAv6CvKMnUMI26IDuizA1UNWHbL1q5gn4,5099
55
- versionhq/task_graph/model.py,sha256=53D_Bd-Uz6fv5Y1iVVDX1wzqcabE1BmLzqEO0oL8wrs,24628
56
+ versionhq/task_graph/model.py,sha256=3DzZXP4SSJP3xdgZfkJDgFXUjQ05CSYFoisShfmSdcs,26582
56
57
  versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
58
  versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
58
59
  versionhq/tool/composio_tool.py,sha256=IATfsEnF_1RPJyGtPBmAtEJh5XPcgDHpyG3SUR461Og,8572
59
60
  versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtgpqOzKZQ,1843
60
61
  versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
61
- versionhq/tool/model.py,sha256=PO4zNWBZcJhYVur381YL1dy6zqurio2jWjtbxOxZMGI,12194
62
+ versionhq/tool/model.py,sha256=Nc2f9frTK5tH4kh6EeEAk1Fi1w19kEXLOcsBwHCS1a4,12189
63
+ versionhq/tool/rag_tool.py,sha256=qm_nDWs-WyDvrxZeZAL2AkswfUWGPZS4zybz0o6wOFI,3653
62
64
  versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
63
- versionhq-1.2.2.2.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
64
- versionhq-1.2.2.2.dist-info/METADATA,sha256=XZ5QDEnQP1G4U-VFHBsGKBPvVc4vqZMafEp6rZvj89s,22015
65
- versionhq-1.2.2.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
66
- versionhq-1.2.2.2.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
67
- versionhq-1.2.2.2.dist-info/RECORD,,
65
+ versionhq-1.2.2.4.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
66
+ versionhq-1.2.2.4.dist-info/METADATA,sha256=fX-azucXgCg-Fm05snnLT02ttDz5u10xMOaBjGmpkp4,22213
67
+ versionhq-1.2.2.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
68
+ versionhq-1.2.2.4.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
69
+ versionhq-1.2.2.4.dist-info/RECORD,,