versionhq 1.1.13.0__py3-none-any.whl → 1.1.13.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -8,12 +8,15 @@ from dotenv import load_dotenv
8
8
  load_dotenv(override=True)
9
9
 
10
10
  from versionhq.agent.model import Agent
11
+ from versionhq.llm.model import LLM
12
+ from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, PARAMS, PROVIDERS, MODELS
11
13
  from versionhq.clients.customer.model import Customer
12
14
  from versionhq.clients.product.model import Product, ProductProvider
13
15
  from versionhq.clients.workflow.model import MessagingWorkflow, MessagingComponent
14
16
  from versionhq.knowledge.model import Knowledge, KnowledgeStorage
15
17
  from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
16
18
  from versionhq.knowledge.source_docling import DoclingSource
19
+ from versionhq.network.model import TaskStatus, TaskGraph, Node, Edge, DependencyType
17
20
  from versionhq.task.model import Task, TaskOutput, ConditionalTask, ResponseField
18
21
  from versionhq.task.evaluate import Evaluation, EvaluationItem
19
22
  from versionhq.team.model import Team, TeamOutput, Formation, Member, TaskHandlingProcess
@@ -27,10 +30,16 @@ from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, M
27
30
  from versionhq.task.formation import form_agent_network
28
31
 
29
32
 
30
- __version__ = "1.1.13.0"
33
+ __version__ = "1.1.13.1"
31
34
  __all__ = [
32
35
  "Agent",
33
36
 
37
+ "LLM",
38
+ "LLM_CONTEXT_WINDOW_SIZES",
39
+ "PARAMS",
40
+ "PROVIDERS",
41
+ "MODELS",
42
+
34
43
  "Customer",
35
44
  "Product",
36
45
  "ProductProvider",
@@ -47,6 +56,12 @@ __all__ = [
47
56
  "StringKnowledgeSource",
48
57
  "DoclingSource",
49
58
 
59
+ "TaskStatus",
60
+ "TaskGraph",
61
+ "Node",
62
+ "Edge",
63
+ "DependencyType",
64
+
50
65
  "Task",
51
66
  "TaskOutput",
52
67
  "ConditionalTask",
versionhq/agent/model.py CHANGED
@@ -23,25 +23,6 @@ load_dotenv(override=True)
23
23
  T = TypeVar("T", bound="Agent")
24
24
 
25
25
 
26
- # def mock_agent_ops_provider():
27
- # def track_agent(*args, **kwargs):
28
- # def noop(f):
29
- # return f
30
- # return noop
31
- # return track_agent
32
-
33
- # track_agent = mock_agent_ops_provider()
34
-
35
- # agentops = None
36
- # if os.environ.get("AGENTOPS_API_KEY"):
37
- # try:
38
- # from agentops import track_agent
39
- # except ImportError:
40
- # track_agent = mock_agent_ops_provider()
41
- # else:
42
- # track_agent = mock_agent_ops_provider()
43
-
44
-
45
26
  class TokenProcess:
46
27
  total_tokens: int = 0
47
28
  prompt_tokens: int = 0
@@ -76,10 +57,9 @@ class TokenProcess:
76
57
  # @track_agent()
77
58
  class Agent(BaseModel):
78
59
  """
79
- Agent class that run on LLM.
80
- Agents execute tasks alone or in the team, using RAG tools and knowledge base if any.
81
- Agents will prioritize team tasks when they belong to the team.
82
- * (Temp) Comment out all the optional fields except for Team and LLM settings for convenience.
60
+ A class to store agent information.
61
+ Agents must have `role`, `goal`, and `llm` = DEFAULT_MODEL_NAME as default.
62
+ Then run validation on `backstory`, `llm`, `tools`, `rpm` (request per min), `knowledge`, and `memory`.
83
63
  """
84
64
 
85
65
  __hash__ = object.__hash__
@@ -132,8 +112,6 @@ class Agent(BaseModel):
132
112
 
133
113
  # cache, error, ops handling
134
114
  formatting_errors: int = Field(default=0, description="number of formatting errors.")
135
- agent_ops_agent_name: str = None
136
- agent_ops_agent_id: str = None
137
115
 
138
116
 
139
117
  @field_validator("id", mode="before")
@@ -161,9 +139,8 @@ class Agent(BaseModel):
161
139
  @model_validator(mode="after")
162
140
  def set_up_llm(self) -> Self:
163
141
  """
164
- Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
142
+ Set up `llm` and `function_calling_llm` as valid LLM objects using the given kwargs.
165
143
  """
166
- self.agent_ops_agent_name = self.role
167
144
  self.llm = self._convert_to_llm_object(llm=self.llm)
168
145
 
169
146
  function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
@@ -177,12 +154,15 @@ class Agent(BaseModel):
177
154
  return self
178
155
 
179
156
 
180
- def _convert_to_llm_object(self, llm: Any | None) -> LLM:
157
+ def _convert_to_llm_object(self, llm: Any = None) -> LLM:
181
158
  """
182
159
  Convert the given value to LLM object.
183
160
  When `llm` is dict or self.llm_config is not None, add these values to the LLM object after validating them.
184
161
  """
185
- llm = llm if llm is not None else DEFAULT_MODEL_NAME
162
+ llm = llm if llm else self.llm if self.llm else DEFAULT_MODEL_NAME
163
+
164
+ if not llm:
165
+ pass
186
166
 
187
167
  match llm:
188
168
  case LLM():
@@ -380,16 +360,16 @@ class Agent(BaseModel):
380
360
  Set up memories: stm, ltm, and um
381
361
  """
382
362
 
383
- if self.use_memory == True:
384
- self.long_term_memory = self.long_term_memory if self.long_term_memory else LongTermMemory()
385
- self.short_term_memory = self.short_term_memory if self.short_term_memory else ShortTermMemory(agent=self, embedder_config=self.embedder_config)
363
+ # if self.use_memory == True:
364
+ self.long_term_memory = self.long_term_memory if self.long_term_memory else LongTermMemory()
365
+ self.short_term_memory = self.short_term_memory if self.short_term_memory else ShortTermMemory(agent=self, embedder_config=self.embedder_config)
386
366
 
387
- if hasattr(self, "memory_config") and self.memory_config is not None:
388
- user_id = self.memory_config.get("user_id", None)
389
- if user_id:
390
- self.user_memory = self.user_memory if self.user_memory else UserMemory(agent=self, user_id=user_id)
391
- else:
392
- self.user_memory = None
367
+ if hasattr(self, "memory_config") and self.memory_config is not None:
368
+ user_id = self.memory_config.get("user_id", None)
369
+ if user_id:
370
+ self.user_memory = self.user_memory if self.user_memory else UserMemory(agent=self, user_id=user_id)
371
+ else:
372
+ self.user_memory = None
393
373
 
394
374
  return self
395
375
 
@@ -402,6 +382,78 @@ class Agent(BaseModel):
402
382
  pass
403
383
 
404
384
 
385
+ def update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
386
+ """
387
+ Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
388
+ """
389
+
390
+ if not llm and not llm_config:
391
+ self._logger.log(level="error", message="Missing llm or llm_config values to update", color="red")
392
+ pass
393
+
394
+ self.llm = llm
395
+ if llm_config:
396
+ if self.llm_config:
397
+ self.llm_config.update(llm_config)
398
+ else:
399
+ self.llm_config = llm_config
400
+
401
+ return self.set_up_llm()
402
+
403
+
404
+ def update(self, **kwargs) -> Self:
405
+ """
406
+ Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
407
+ """
408
+
409
+ if not kwargs:
410
+ self._logger.log(level="error", message="Missing values to update", color="red")
411
+ return self
412
+
413
+ for k, v in kwargs.items():
414
+ match k:
415
+ case "tools":
416
+ self.tools = kwargs.get(k, self.tools)
417
+ self.set_up_tools()
418
+
419
+ case "role" | "goal":
420
+ self.role = kwargs.get("role", self.role)
421
+ self.goal = kwargs.get("goal", self.goal)
422
+ if not self.backstory:
423
+ self.set_up_backstory()
424
+
425
+ if self.backstory:
426
+ self.backstory += f"new role: {self.role}, new goal: {self.goal}"
427
+
428
+ case "max_rpm":
429
+ self.max_rpm = kwargs.get(k, self.max_rpm)
430
+ self.set_up_rpm()
431
+
432
+ case "knowledge_sources":
433
+ self.knowledge_sources = kwargs.get("knowledge_sources", self.knowledge_sources)
434
+ self.set_up_knowledge()
435
+
436
+ case "use_memory" | "memory_config":
437
+ self.use_memory = kwargs.get("use_memory", self.use_memory)
438
+ self.memory_config = kwargs.get("memory_config", self.memory_config)
439
+ self.set_up_memory()
440
+
441
+ case "llm" | "llm_config":
442
+ self.llm = kwargs.get("llm", self.llm)
443
+ self.llm_config = kwargs.get("llm_config", self.llm_config)
444
+ self.update_llm(llm=self.llm, llm_config=self.llm_config)
445
+
446
+ case _:
447
+ try:
448
+ setattr(self, k, v)
449
+ except Exception as e:
450
+ self._logger.log(level="error", message=f"Failed to update the key: {k} We'll skip. Error: {str(e)}", color="red")
451
+ pass
452
+
453
+ return self
454
+
455
+
456
+
405
457
  def invoke(
406
458
  self,
407
459
  prompts: str,
versionhq/llm/llm_vars.py CHANGED
@@ -1,4 +1,3 @@
1
- from enum import Enum
2
1
  from typing import Type
3
2
 
4
3
  JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
versionhq/llm/model.py CHANGED
@@ -111,7 +111,7 @@ class LLM(BaseModel):
111
111
 
112
112
 
113
113
  @model_validator(mode="after")
114
- def validate_model_providers(self) -> Self:
114
+ def validate_model_and_provider(self) -> Self:
115
115
  """
116
116
  Validate the given model, provider, interface provider.
117
117
  """
File without changes
@@ -0,0 +1,394 @@
1
+ import enum
2
+ import uuid
3
+ from abc import ABC
4
+ from typing import List, Any, Optional, Callable, Dict, Type
5
+
6
+ from pydantic import BaseModel, InstanceOf, Field, UUID4, PrivateAttr, field_validator
7
+ from pydantic_core import PydanticCustomError
8
+
9
+ from versionhq.task.model import Task
10
+ from versionhq.agent.model import Agent
11
+ from versionhq._utils.logger import Logger
12
+
13
+
14
+ try:
15
+ import networkx as ntx
16
+ except ImportError:
17
+ try:
18
+ import os
19
+ os.system("uv add networkx --optional networkx")
20
+ import networkx as ntx
21
+ except:
22
+ raise ImportError("networkx is not installed. Please install it with: uv add networkx --optional networkx")
23
+
24
+
25
+ try:
26
+ import matplotlib.pyplot as plt
27
+ except ImportError:
28
+ try:
29
+ import os
30
+ os.system("uv add matplotlib --optional matplotlib")
31
+ import matplotlib.pyplot as plt
32
+ except:
33
+ raise ImportError("matplotlib is not installed. Please install it with: uv add matplotlib --optional matplotlib")
34
+
35
+ import networkx as nx
36
+ import matplotlib.pyplot as plt
37
+
38
+
39
+
40
+ class TaskStatus(enum.Enum):
41
+ """
42
+ Enum to track the task execution status
43
+ """
44
+ NOT_STARTED = 1
45
+ IN_PROGRESS = 2
46
+ BLOCKED = 3 # task is waiting for its dependant tasks to complete. resumption set as AUTO.
47
+ COMPLETED = 4
48
+ DELAYED = 5 # task has begun - but is taking longer than expected duration and behind schedule.
49
+ ON_HOLD = 6 # task is temporarily & intentionally paused due to external factors and/or decisions. resumption set as DECISION.
50
+
51
+
52
+
53
+ class DependencyType(enum.Enum):
54
+ """
55
+ Concise enumeration of the edge type.
56
+ """
57
+
58
+ FINISH_TO_START = "FS" # Task B starts after Task A finishes
59
+ START_TO_START = "SS" # Task B starts when Task A starts
60
+ FINISH_TO_FINISH = "FF" # Task B finishes when Task A finishes
61
+ START_TO_FINISH = "SF" # Task B finishes when Task A starts
62
+
63
+
64
+
65
+ class TriggerEvent(enum.Enum):
66
+ """
67
+ Concise enumeration of key trigger events for task execution.
68
+ """
69
+ IMMEDIATE = 0 # execute immediately
70
+ DEPENDENCIES_MET = 1 # All/required dependencies are satisfied
71
+ RESOURCES_AVAILABLE = 2 # Necessary resources are available
72
+ SCHEDULED_TIME = 3 # Scheduled start time or time window reached
73
+ EXTERNAL_EVENT = 4 # Triggered by an external event/message
74
+ DATA_AVAILABLE = 5 # Required data is available both internal/external
75
+ APPROVAL_RECEIVED = 6 # Necessary approvals have been granted
76
+ STATUS_CHANGED = 7 # Relevant task/system status has changed
77
+ RULE_MET = 8 # A predefined rule or condition has been met
78
+ MANUAL_TRIGGER = 9 # Manually initiated by a user
79
+ ERROR_HANDLED = 10 # A previous error/exception has been handled
80
+
81
+
82
+
83
+ class Node(BaseModel):
84
+ """
85
+ A class to store a node object.
86
+ """
87
+ id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
88
+ task: InstanceOf[Task] = Field(default=None)
89
+ trigger_event: TriggerEvent = Field(default=TriggerEvent.IMMEDIATE, description="store trigger event to execute the task")
90
+ in_degree_nodes: List[Any] = Field(default=None, description="list of Node objects")
91
+ out_degree_nodes: List[Any] = Field(default=None, description="list of Node objects")
92
+ assigned_to: InstanceOf[Agent] = Field(default=None)
93
+ status: TaskStatus = Field(default=TaskStatus.NOT_STARTED)
94
+
95
+
96
+ @field_validator("id", mode="before")
97
+ @classmethod
98
+ def _deny_id(cls, v: Optional[UUID4]) -> None:
99
+ if v:
100
+ raise PydanticCustomError("may_not_set_field", "This field is not to be set by the user.", {})
101
+
102
+ def is_independent(self) -> bool:
103
+ return not self.in_degree_nodes and not self.out_degree_nodes
104
+
105
+ @property
106
+ def in_degrees(self) -> int:
107
+ return len(self.in_degree_nodes) if self.in_degree_nodes else 0
108
+
109
+ @property
110
+ def out_degrees(self) -> int:
111
+ return len(self.out_degree_nodes) if self.out_degree_nodes else 0
112
+
113
+ @property
114
+ def degrees(self) -> int:
115
+ return self.in_degrees + self.out_degrees
116
+
117
+ @property
118
+ def identifier(self) -> str:
119
+ """Unique identifier for the node"""
120
+ return f"{str(self.id)}"
121
+
122
+ def __str__(self):
123
+ return self.identifier
124
+
125
+
126
+ class Edge(BaseModel):
127
+ """
128
+ A class to store an edge object that connects multiple nodes as dependencies.
129
+ """
130
+ description: Optional[str] = Field(default=None)
131
+
132
+ type: DependencyType = Field(default=DependencyType.FINISH_TO_START)
133
+ weight: Optional[float] = Field(default=None, description="duration or weight of the dependency: 1 light 10 heavy")
134
+ lag: Optional[float] = Field(default=None, description="lag time for the dependency to be executed")
135
+ constraint: Optional[str] = Field(default=None, description="constraint to consider executing the dependency")
136
+ priority: Optional[int] = Field(default=None, description="priority of the dependency if multiple depencencies are given")
137
+
138
+ data_transfer: bool = Field(True, description="whether the data transfer is required")
139
+ data_format: Optional[str] = Field(default=None, description="Format of data transfer")
140
+
141
+ # execution logic
142
+ required: bool = Field(True, description="whether the execution of the dependency is required - False = conditional, optional task")
143
+ condition: Optional[Callable] = Field(default=None, description="conditional function to start executing the dependency")
144
+
145
+
146
+ def is_dependency_met(self, predecessor_node: Node = None) -> bool:
147
+ """
148
+ Define if the dependency is ready to execute:
149
+
150
+ required condition Dependency Met? Dependent Task Can Start?
151
+ True Not Given Predecessor task finished Yes (if other deps met)
152
+ True Given Predecessor task finished and condition True Yes (if other deps met)
153
+ False Not Given Always (regardless of predecessor status) Yes (if other deps met)
154
+ False Given Condition True (predecessor status irrelevant) Yes (if other deps met)
155
+ """
156
+
157
+ if self.required:
158
+ if predecessor_node.status == TaskStatus.COMPLETED:
159
+ return self.condition() if self.condtion else True
160
+ else:
161
+ return False
162
+ else:
163
+ return self.condition() if self.condition else True
164
+
165
+
166
+
167
+ class Graph(ABC, BaseModel):
168
+ """
169
+ An abstract class to store G using NetworkX library.
170
+ """
171
+
172
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
173
+ directed: bool = Field(default=False, description="Whether the graph is directed")
174
+ graph: Type[nx.Graph] = Field(default=None)
175
+ nodes: Dict[str, InstanceOf[Node]] = Field(default_factory=dict, description="identifier: Node - for the sake of ")
176
+ edges: Dict[str, InstanceOf[Edge]] = Field(default_factory=dict)
177
+
178
+ def __init__(self, directed: bool = False, **kwargs):
179
+
180
+ super().__init__(directed=directed, **kwargs)
181
+ self.graph = nx.DiGraph() if self.directed else nx.Graph()
182
+
183
+ def add_node(self, node: Node) -> None:
184
+ self.graph.add_node(node.identifier, **node.model_dump())
185
+ self.nodes[node.identifier] = node
186
+
187
+ def add_edge(self, source: str, target: str, edge: Edge) -> None:
188
+ self.graph.add_edge(source, target, **edge.model_dump())
189
+ self.edges[(source, target)] = edge
190
+
191
+ def add_weighted_edges_from(self, edges):
192
+ self.graph.add_weighted_edges_from(edges)
193
+
194
+ def get_neighbors(self, node: Node) -> List[Node]:
195
+ return list(self.graph.neighbors(node))
196
+
197
+ def get_in_degree(self, node: Node) -> int:
198
+ return self.graph.in_degree(node)
199
+
200
+ def get_out_degree(self, node: Node) -> int:
201
+ return self.graph.out_degree(node)
202
+
203
+ def find_path(self, source: str, target: str, weight: Any) -> Any:
204
+ try:
205
+ return nx.shortest_path(self.graph, source=source, target=target, weight=weight)
206
+ except nx.NetworkXNoPath:
207
+ return None
208
+
209
+ def find_all_paths(self, source: str, target: str) -> List[Any]:
210
+ return list(nx.all_simple_paths(self.graph, source=source, target=target))
211
+
212
+
213
+ def find_critical_path(self) -> tuple[List[Any], int, Dict[str, int]]:
214
+ """
215
+ Finds the critical path in the graph.
216
+ Returns:
217
+ A tuple containing:
218
+ - The critical path (a list of task names).
219
+ - The duration of the critical path.
220
+ - A dictionary of all paths and their durations.
221
+ """
222
+
223
+ all_paths = {}
224
+ for start_node in (v for k, v in self.nodes.items() if v.in_degrees == 0): # Start from nodes with 0 in-degree
225
+ for end_node in (v for k, v in self.nodes.items() if v.out_degrees == 0): # End at nodes with 0 out-degree
226
+ for edge in nx.all_simple_paths(self.graph, source=start_node.identifier, target=end_node.identifier):
227
+ edge_weight = sum(self.edges.get(item).weight if self.edges.get(item) else 0 for item in edge)
228
+ all_paths[tuple(edge)] = edge_weight
229
+
230
+ if not all_paths:
231
+ return [], 0, all_paths
232
+
233
+ critical_path = max(all_paths, key=all_paths.get)
234
+ critical_duration = all_paths[critical_path]
235
+
236
+ return list(critical_path), critical_duration, all_paths
237
+
238
+
239
+ def is_circled(self, node: Node) -> bool:
240
+ """Check if there's a path from the node to itself and return bool."""
241
+ try:
242
+ path = nx.shortest_path(self.graph, source=node, target=node)
243
+ return True if path else False
244
+ except nx.NetworkXNoPath:
245
+ return False
246
+
247
+
248
+ def visualize(self, title: str = "Graph Visualization", pos: Any = None, **graph_config):
249
+ pos = pos if pos else nx.spring_layout(self.graph, seed=42)
250
+ nx.draw(
251
+ self.graph,
252
+ pos,
253
+ with_labels=True, node_size=700, node_color="skyblue", font_size=10, font_color="black", arrowstyle='-|>', arrowsize=20, arrows=True,
254
+ **graph_config
255
+ )
256
+ edge_labels = {}
257
+ for u, v, data in self.graph.edges(data=True):
258
+ edge = self.edges.get((u,v))
259
+ if edge:
260
+ label_parts = []
261
+ if edge.type:
262
+ label_parts.append(f"Type: {edge.type}")
263
+ if edge.duration is not None:
264
+ label_parts.append(f"Duration: {edge.duration}")
265
+ if edge.lag is not None:
266
+ label_parts.append(f"Lag: {edge.lag}")
267
+ edge_labels[(u, v)] = "\n".join(label_parts) # Combine labels with newlines
268
+ nx.draw_networkx_edge_labels(self.graph, pos, edge_labels=edge_labels)
269
+ plt.title(title)
270
+ plt.show()
271
+
272
+
273
+
274
+ class TaskGraph(Graph):
275
+ id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
276
+ should_reform: bool = Field(default=False)
277
+ status: Dict[str, TaskStatus] = Field(default_factory=dict, description="store identifier (str) and TaskStatus of all task_nodes")
278
+
279
+
280
+ def add_task(self, task: Node | Task) -> Node:
281
+ """Convert `task` to a Node object and add it to G"""
282
+ task_node = task if isinstance(task, Node) else Node(task=task)
283
+ self.add_node(task_node)
284
+ self.status[task_node.identifier] = TaskStatus.NOT_STARTED
285
+ return task_node
286
+
287
+
288
+ def add_dependency(
289
+ self, source_task_node_identifier: str, target_task_node_identifier: str, **edge_attributes
290
+ ) -> None:
291
+ """
292
+ Add an edge that connect task 1 (source) and task 2 (target) using task_node.name as an identifier
293
+ """
294
+
295
+ if not edge_attributes:
296
+ self._logger.log(level="error", message="Edge attributes are missing.", color="red")
297
+
298
+ edge = Edge()
299
+ for k in Edge.model_fields.keys():
300
+ v = edge_attributes.get(k, None)
301
+ if v:
302
+ setattr(edge, k, v)
303
+ else:
304
+ pass
305
+
306
+ self.add_edge(source_task_node_identifier, target_task_node_identifier, edge)
307
+
308
+
309
+ def set_task_status(self, identifier: str, status: TaskStatus) -> None:
310
+ if identifier in self.status:
311
+ self.status[identifier] = status
312
+ else:
313
+ self._logger.log(level="warning", message=f"Task '{identifier}' not found in the graph.", color="yellow")
314
+ pass
315
+
316
+ def get_task_status(self, identifier):
317
+ if identifier in self.status:
318
+ return self.status[identifier]
319
+ else:
320
+ self._logger.log(level="warning", message=f"Task '{identifier}' not found in the graph.", color="yellow")
321
+ return None
322
+
323
+
324
+ def visualize(self, layout: str = None):
325
+ try:
326
+ pos = nx.drawing.nx_agraph.graphviz_layout(self.graph, prog='dot') # 'dot', 'neato', 'fdp', 'sfdp'
327
+ except ImportError:
328
+ pos = nx.spring_layout(self.graph, seed=42) # REFINEME - layout
329
+
330
+ node_colors = list()
331
+ for k, v in self.graph.nodes.items():
332
+ status = self.get_task_status(identifier=k)
333
+ if status == TaskStatus.NOT_STARTED:
334
+ node_colors.append("skyblue")
335
+ elif status == TaskStatus.IN_PROGRESS:
336
+ node_colors.append("lightgreen")
337
+ elif status == TaskStatus.BLOCKED:
338
+ node_colors.append("lightcoral")
339
+ elif status == TaskStatus.COMPLETED:
340
+ node_colors.append("black")
341
+ elif status == TaskStatus.DELAYED:
342
+ node_colors.append("orange")
343
+ elif status == TaskStatus.ON_HOLD:
344
+ node_colors.append("yellow")
345
+ else:
346
+ node_colors.append("grey")
347
+
348
+ critical_paths, duration, paths = self.find_critical_path()
349
+ edge_colors = ['red' if (u, v) in zip(critical_paths, critical_paths[1:]) else 'black' for u, v in self.graph.edges()]
350
+ edge_widths = []
351
+
352
+ for k, v in self.edges.items():
353
+ # edge_weights = nx.get_edge_attributes(self.graph, 'weight')
354
+ # edge_colors.append(plt.cm.viridis(v.weight / max(edge_weights.values())))
355
+ edge_widths.append(v.weight * 0.5) # Width proportional to weight (adjust scaling as needed)
356
+
357
+ nx.draw(
358
+ self.graph, pos,
359
+ with_labels=True,
360
+ node_size=700,
361
+ node_color=node_colors,
362
+ font_size=10,
363
+ font_color="black",
364
+ edge_color=edge_colors,
365
+ width=edge_widths,
366
+ arrows=True,
367
+ arrowsize=20,
368
+ arrowstyle='-|>'
369
+ )
370
+
371
+ edge_labels = nx.get_edge_attributes(G=self.graph, name="edges")
372
+ nx.draw_networkx_edge_labels(self.graph, pos, edge_labels=edge_labels)
373
+
374
+ plt.title("Project Network Diagram")
375
+ self._save()
376
+ plt.show()
377
+
378
+
379
+ def _save(self, abs_file_path: str = None) -> None:
380
+ """
381
+ Save the graph image in the local directory.
382
+ """
383
+
384
+ try:
385
+ import os
386
+ project_root = os.path.abspath(os.getcwd())
387
+ abs_file_path = abs_file_path if abs_file_path else f"{project_root}/uploads"
388
+
389
+ os.makedirs(abs_file_path, exist_ok=True)
390
+
391
+ plt.savefig(f"{abs_file_path}/{str(self.id)}.png")
392
+
393
+ except Exception as e:
394
+ self._logger.log(level="error", message=f"Failed to save the graph {str(self.id)}: {str(e)}", color="red")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.13.0
3
+ Version: 1.1.13.1
4
4
  Summary: An agentic orchestration framework for building agent networks that handle task automation.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -74,6 +74,12 @@ Provides-Extra: pandas
74
74
  Requires-Dist: pandas>=2.2.3; extra == "pandas"
75
75
  Provides-Extra: numpy
76
76
  Requires-Dist: numpy>=1.26.4; extra == "numpy"
77
+ Provides-Extra: pygraphviz
78
+ Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
79
+ Provides-Extra: networkx
80
+ Requires-Dist: networkx>=3.4.2; extra == "networkx"
81
+ Provides-Extra: matplotlib
82
+ Requires-Dist: matplotlib>=3.10.0; extra == "matplotlib"
77
83
 
78
84
  # Overview
79
85
 
@@ -103,15 +109,21 @@ A Python framework for agentic orchestration that handles complex task automatio
103
109
 
104
110
  - [Key Features](#key-features)
105
111
  - [Agent formation](#agent-formation)
112
+ - [Graph Theory Concept](#graph-theory-concept)
113
+ - [Agent optimization](#agent-optimization)
106
114
  - [Quick Start](#quick-start)
107
115
  - [Package installation](#package-installation)
108
116
  - [Forming a agent network](#forming-a-agent-network)
109
- - [Customizing AI agents](#customizing-ai-agents)
117
+ - [Executing tasks](#executing-tasks)
110
118
  - [Supervising](#supervising)
111
119
  - [Technologies Used](#technologies-used)
112
120
  - [Project Structure](#project-structure)
113
- - [Setting Up](#setting-up)
121
+ - [Setting Up a Project](#setting-up-a-project)
122
+ - [1. Installing package manager](#1-installing-package-manager)
123
+ - [2. Installing dependencies](#2-installing-dependencies)
124
+ - [3. Adding env secrets to .env file](#3-adding-env-secrets-to-env-file)
114
125
  - [Contributing](#contributing)
126
+ - [Steps](#steps)
115
127
  - [Package Management with uv](#package-management-with-uv)
116
128
  - [Pre-Commit Hooks](#pre-commit-hooks)
117
129
  - [Documentation](#documentation)
@@ -144,6 +156,73 @@ You can specify a desired formation or allow the agents to determine it autonomo
144
156
 
145
157
  <hr />
146
158
 
159
+ ### Graph Theory Concept
160
+
161
+ To completely automate task workflows, agents will build a `task-oriented network` by generating `nodes` that represent tasks and connecting them with dependency-defining `edges`.
162
+
163
+ Each node is triggered by specific events and executed by an assigned agent once all dependencies are met.
164
+
165
+ While the network automatically reconfigures itself, you retain the ability to direct the agents using `should_reform` variable.
166
+
167
+
168
+ The following code snippet demonstrates the `TaskGraph` and its visualization, saving the diagram to the `uploads` directory.
169
+
170
+ ```python
171
+ import versionhq as vhq
172
+
173
+ task_graph = vhq.TaskGraph(directed=False, should_reform=True) # triggering auto formation
174
+
175
+ task_a = vhq.Task(description="Research Topic")
176
+ task_b = vhq.Task(description="Outline Post")
177
+ task_c = vhq.Task(description="Write First Draft")
178
+
179
+ node_a = task_graph.add_task(task=task_a)
180
+ node_b = task_graph.add_task(task=task_b)
181
+ node_c = task_graph.add_task(task=task_c)
182
+
183
+ task_graph.add_dependency(
184
+ node_a.identifier, node_b.identifier,
185
+ type=vhq.DependencyType.FINISH_TO_START, weight=5, description="B depends on A"
186
+ )
187
+ task_graph.add_dependency(
188
+ node_a.identifier, node_c.identifier,
189
+ type=vhq.DependencyType.FINISH_TO_FINISH, lag=1, required=False, weight=3
190
+ )
191
+
192
+ task_graph.visualize()
193
+ ```
194
+
195
+ <hr />
196
+
197
+ ### Agent optimization
198
+
199
+ Agents are model-agnostic and can handle multiple tasks, leveraging their own and their peers' knowledge sources, memories, and tools.
200
+
201
+ Agents are optimized during network formation, but customization is possible before or after.
202
+
203
+ The following code snippet demonstrates agent customization:
204
+
205
+ ```python
206
+ import versionhq as vhq
207
+
208
+ agent = vhq.Agent(
209
+ role="Marketing Analyst",
210
+ goal="my amazing goal"
211
+ ) # assuming this agent was created during the network formation
212
+
213
+ # update the agent
214
+ agent.update(
215
+ llm="gemini-2.0", # updating LLM (Valid llm_config will be inherited to the new LLM.)
216
+ tools=[vhq.Tool(func=lambda x: x)], # adding tools
217
+ max_rpm=3,
218
+ knowledge_sources=["<KC1>", "<KS2>"], # adding knowledge sources. This will trigger the storage creation.
219
+ memory_config={"user_id": "0001"}, # adding memories
220
+ dummy="I am dummy" # <- invalid field will be automatically ignored
221
+ )
222
+ ```
223
+
224
+ <hr />
225
+
147
226
  ## Quick Start
148
227
 
149
228
  ### Package installation
@@ -169,11 +248,11 @@ You can specify a desired formation or allow the agents to determine it autonomo
169
248
  This will form a network with multiple agents on `Formation` and return `TaskOutput` object with output in JSON, plane text, Pydantic model format with evaluation.
170
249
 
171
250
 
172
- ### Customizing AI agents
251
+ ### Executing tasks
173
252
 
174
- You can simply build an agent using `Agent` model.
253
+ You can simply build an agent using `Agent` model and execute the task using `Task` class.
175
254
 
176
- By default, the agent prioritize JSON serializable outputs over plane texts.
255
+ By default, agents prioritize JSON over plane text outputs.
177
256
 
178
257
 
179
258
  ```python
@@ -253,21 +332,29 @@ Tasks can be delegated to a team manager, peers in the team, or completely new a
253
332
 
254
333
  ## Technologies Used
255
334
 
335
+ **Graph Theory (Analysis and Visualization)**
336
+
337
+ * [NetworkX](https://networkx.org/documentation/stable/reference/introduction.html): A Python package to analyze, create, and manipulate complex graph networks.
338
+ * [Matplotlib](https://matplotlib.org/stable/index.html): Visualization library
339
+ * [Graphviz](https://graphviz.org/about/): Graph visualization software
340
+
341
+
256
342
  **Schema, Data Validation**
257
343
 
258
344
  * [Pydantic](https://docs.pydantic.dev/latest/): Data validation and serialization library for Python.
259
345
  * [Upstage](https://console.upstage.ai/docs/getting-started/overview): Document processer for ML tasks. (Use `Document Parser API` to extract data from documents)
260
346
  * [Docling](https://ds4sd.github.io/docling/): Document parsing
261
347
 
348
+
262
349
  **Storage**
263
350
 
264
351
  * [mem0ai](https://docs.mem0.ai/quickstart#install-package): Agents' memory storage and management.
265
352
  * [Chroma DB](https://docs.trychroma.com/): Vector database for storing and querying usage data.
266
353
  * [SQLite](https://www.sqlite.org/docs.html): C-language library to implements a small SQL database engine.
267
354
 
268
- **LLM-curation**
355
+ **LLM Integration**
269
356
 
270
- * [LiteLLM](https://docs.litellm.ai/docs/providers): Curation platform to access LLMs
357
+ * [LiteLLM](https://docs.litellm.ai/docs/providers): Integration to diverse LLMs
271
358
 
272
359
  **Tools**
273
360
 
@@ -304,30 +391,30 @@ src/
304
391
  │ └── llm/
305
392
  │ └── ...
306
393
 
307
- └── uploads/ # Local directory that stores uloaded files
394
+ └── uploads/ [.gitignore] # Local directory to store uploaded files such as graphviz diagrams generatd by `Network` class
308
395
 
309
396
  ```
310
397
 
311
398
  <hr />
312
399
 
313
- ## Setting Up
400
+ ## Setting Up a Project
314
401
 
315
- 1. Install `uv` package manager:
402
+ ### 1. Installing package manager
316
403
 
317
- For MacOS:
404
+ For MacOS:
318
405
 
319
- ```
320
- brew install uv
321
- ```
406
+ ```
407
+ brew install uv
408
+ ```
322
409
 
323
- For Ubuntu/Debian:
410
+ For Ubuntu/Debian:
411
+ ```
412
+ sudo apt-get install uv
413
+ ```
324
414
 
325
- ```
326
- sudo apt-get install uv
327
- ```
328
415
 
416
+ ### 2. Installing dependencies
329
417
 
330
- 2. Install dependencies:
331
418
  ```
332
419
  uv venv
333
420
  source .venv/bin/activate
@@ -335,29 +422,51 @@ src/
335
422
  uv sync --all-extras
336
423
  ```
337
424
 
338
- * In case of AssertionError/module mismatch, run Python version control using `.pyenv`
339
- ```
340
- pyenv install 3.12.8
341
- pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
342
- uv python pin 3.12.8
343
- echo 3.12.8 >> .python-version
344
- ```
425
+ - AssertionError/module mismatch errors: Set up default Python version using `.pyenv`
426
+ ```
427
+ pyenv install 3.12.8
428
+ pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
429
+ uv python pin 3.12.8
430
+ echo 3.12.8 >> .python-version
431
+ ```
345
432
 
433
+ - `pygraphviz` related errors: Run the following commands:
434
+ ```
435
+ brew install graphviz
436
+ uv pip install --config-settings="--global-option=build_ext" \
437
+ --config-settings="--global-option=-I$(brew --prefix graphviz)/include/" \
438
+ --config-settings="--global-option=-L$(brew --prefix graphviz)/lib/" \
439
+ pygraphviz
440
+ ```
441
+
442
+ * If the error continues, skip pygraphviz installation by:
443
+ ```
444
+ uv sync --all-extras --no-extra pygraphviz
445
+ ```
446
+
447
+ - `torch`/`Docling` related errors: Set up default Python version either `3.11.x` or `3.12.x` (same as AssertionError)
448
+
449
+ ### 3. Adding env secrets to .env file
346
450
 
347
- 3. Add secrets to `.env` file in the project root:
451
+ Create `.env` file in the project root and add following:
348
452
 
349
453
  ```
350
- LITELLM_API_KEY=your-litellm-api-key
351
454
  OPENAI_API_KEY=your-openai-api-key
455
+ GEMINI_API_KEY=your-gemini-api-key
456
+ LITELLM_API_KEY=your-litellm-api-key
352
457
  COMPOSIO_API_KEY=your-composio-api-key
353
458
  COMPOSIO_CLI_KEY=your-composio-cli-key
354
- [LLM_INTERFACE_PROVIDER_OF_YOUR_CHOICE]_API_KEY=your-api-key
459
+ [OTHER_LLM_INTERFACE_PROVIDER_OF_YOUR_CHOICE]_API_KEY=your-api-key
355
460
  ```
356
461
 
357
462
  <hr />
358
463
 
359
464
  ## Contributing
360
465
 
466
+ `versionhq` is a open source project.
467
+
468
+ ### Steps
469
+
361
470
  1. Create your feature branch (`git checkout -b feature/your-amazing-feature`)
362
471
 
363
472
  2. Create amazing features
@@ -365,6 +474,7 @@ src/
365
474
  3. Add a test funcition to the `tests` directory and run **pytest**.
366
475
 
367
476
  - Add secret values defined in `.github/workflows/run_test.yml` to your Github `repository secrets` located at settings > secrets & variables > Actions.
477
+
368
478
  - Run a following command:
369
479
  ```
370
480
  uv run pytest tests -vv --cache-clear
@@ -376,6 +486,8 @@ src/
376
486
 
377
487
  * Test functions within the files must begin with `test_`.
378
488
 
489
+ * Pytest priorities are `1. playground demo > 2. docs use cases > 3. other features`
490
+
379
491
 
380
492
  4. Update `docs` accordingly.
381
493
 
@@ -418,7 +530,7 @@ src/
418
530
 
419
531
  Pre-commit hooks help maintain code quality by running checks for formatting, linting, and other issues before each commit.
420
532
 
421
- * To skip pre-commit hooks (NOT RECOMMENDED)
533
+ * To skip pre-commit hooks
422
534
  ```
423
535
  git commit --no-verify -m "your-commit-message"
424
536
  ```
@@ -427,7 +539,7 @@ Pre-commit hooks help maintain code quality by running checks for formatting, li
427
539
 
428
540
  * To edit the documentation, see `docs` repository and edit the respective component.
429
541
 
430
- * We use `mkdocs` to update the docs. You can run the doc locally at http://127.0.0.1:8000/:
542
+ * We use `mkdocs` to update the docs. You can run the docs locally at http://127.0.0.1:8000/.
431
543
 
432
544
  ```
433
545
  uv run python3 -m mkdocs serve --clean
@@ -1,4 +1,4 @@
1
- versionhq/__init__.py,sha256=lfts4IiFxMVN-gRb7eoit56jqfMgOSMG1C8803J_-UQ,2400
1
+ versionhq/__init__.py,sha256=JJMdTbmmTzzublAjg3tFmOm13CKLwVsNz3cdsDP0bJQ,2780
2
2
  versionhq/_utils/__init__.py,sha256=dzoZr4cBlh-2QZuPzTdehPUCe9lP1dmRtauD7qTjUaA,158
3
3
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
4
4
  versionhq/_utils/logger.py,sha256=j9SlQPIefdVUlwpGfJY83E2BUt1ejWgZ2M2I8aMyQ3c,1579
@@ -7,7 +7,7 @@ versionhq/_utils/usage_metrics.py,sha256=NXF18dn5NNvGK7EsQ4AAghpR8ppYOjMx6ABenLL
7
7
  versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
8
8
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  versionhq/agent/inhouse_agents.py,sha256=vSobrH1gXDWlaNsiges3sqETeUrEssRzQvCZCY2hQZA,2374
10
- versionhq/agent/model.py,sha256=8aJ4rdgGEph10DuB8zhJkiRWzQTZ2LGKNq1MTeQ9hM8,23342
10
+ versionhq/agent/model.py,sha256=NLHQPYqYFg9GX3NoDP1Rs3vHFlYv6Fl6JZ4QlNOYMn0,25429
11
11
  versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
12
12
  versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
13
13
  versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
@@ -28,11 +28,13 @@ versionhq/knowledge/source.py,sha256=30VXsl3uHdM0wK0Dik3XfFxpNpEiy539PBNBvg0Y4-g
28
28
  versionhq/knowledge/source_docling.py,sha256=hhHn3rS4KVsFKEPWcfllM8VxSL86PckZdAHDZNQNOq8,5411
29
29
  versionhq/knowledge/storage.py,sha256=7oxCg3W9mFjYH1YmuH9kFtTbNxquzYFjuUjd_TlsB9E,8170
30
30
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- versionhq/llm/llm_vars.py,sha256=3fax7EXNwCw1yapIoqRMmwgGmK3O37Wm1e8uvq8ObL4,7063
32
- versionhq/llm/model.py,sha256=QacjThF43Vfel6LIvSt5KkOZAbzo1jYjwFgFfhrv7ms,17174
31
+ versionhq/llm/llm_vars.py,sha256=wjQK20cKvph6Vq1v71o4d16zBGcHlwq0bzOT_zWno7w,7041
32
+ versionhq/llm/model.py,sha256=wlzDUMEyIOm808d1vzqu9gmbB4ch-s_EUvwFR60gR80,17177
33
33
  versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  versionhq/memory/contextual_memory.py,sha256=tCsOOAUnfrOL7YiakqGoi3uShzzS870TmGnlGd3z_A4,3556
35
35
  versionhq/memory/model.py,sha256=4wow2O3UuMZ0AbC2NyxddGZac3-_GjNZbK9wsA015NA,8145
36
+ versionhq/network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
+ versionhq/network/model.py,sha256=npSV5RUYdmkM9cMGkNLiKD4vYb3yNVyh6qCdNKkVIN8,15223
36
38
  versionhq/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
39
  versionhq/storage/base.py,sha256=p-Jas0fXQan_qotnRD6seQxrT2lj-uw9-SmHQhdppcs,355
38
40
  versionhq/storage/ltm_sqlite_storage.py,sha256=wdUiuwHfJocdk0UGqyrdU4S5Nae1rgsoRNu3LWmGFcI,3951
@@ -58,8 +60,8 @@ versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtg
58
60
  versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
59
61
  versionhq/tool/model.py,sha256=PO4zNWBZcJhYVur381YL1dy6zqurio2jWjtbxOxZMGI,12194
60
62
  versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
61
- versionhq-1.1.13.0.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
62
- versionhq-1.1.13.0.dist-info/METADATA,sha256=zNnwZLvuWsZPjXoCrsa7PqD8iR1sJ2zUjjj8wV0QkfA,17365
63
- versionhq-1.1.13.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
64
- versionhq-1.1.13.0.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
65
- versionhq-1.1.13.0.dist-info/RECORD,,
63
+ versionhq-1.1.13.1.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
64
+ versionhq-1.1.13.1.dist-info/METADATA,sha256=QAjiAx25V2-0nc9PGPNII3-31IHw8hHAghUfdQxZogA,21448
65
+ versionhq-1.1.13.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
66
+ versionhq-1.1.13.1.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
67
+ versionhq-1.1.13.1.dist-info/RECORD,,