versionhq 1.1.12.5__py3-none-any.whl → 1.1.13.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -4,16 +4,22 @@ warnings.filterwarnings(action="ignore", message="Pydantic serializer warnings:"
4
4
  warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic._internal")
5
5
  warnings.filterwarnings(action="ignore", module="LiteLLM:utils")
6
6
 
7
+ from dotenv import load_dotenv
8
+ load_dotenv(override=True)
9
+
7
10
  from versionhq.agent.model import Agent
11
+ from versionhq.llm.model import LLM
12
+ from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, PARAMS, PROVIDERS, MODELS
8
13
  from versionhq.clients.customer.model import Customer
9
14
  from versionhq.clients.product.model import Product, ProductProvider
10
15
  from versionhq.clients.workflow.model import MessagingWorkflow, MessagingComponent
11
16
  from versionhq.knowledge.model import Knowledge, KnowledgeStorage
12
17
  from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
13
18
  from versionhq.knowledge.source_docling import DoclingSource
19
+ from versionhq.network.model import TaskStatus, TaskGraph, Node, Edge, DependencyType
14
20
  from versionhq.task.model import Task, TaskOutput, ConditionalTask, ResponseField
15
21
  from versionhq.task.evaluate import Evaluation, EvaluationItem
16
- from versionhq.team.model import Team, TeamOutput, Formation, TeamMember, TaskHandlingProcess
22
+ from versionhq.team.model import Team, TeamOutput, Formation, Member, TaskHandlingProcess
17
23
  from versionhq.tool.model import Tool, ToolSet
18
24
  from versionhq.tool.cache_handler import CacheHandler
19
25
  from versionhq.tool.tool_handler import ToolHandler
@@ -24,10 +30,16 @@ from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, M
24
30
  from versionhq.task.formation import form_agent_network
25
31
 
26
32
 
27
- __version__ = "1.1.12.5"
33
+ __version__ = "1.1.13.1"
28
34
  __all__ = [
29
35
  "Agent",
30
36
 
37
+ "LLM",
38
+ "LLM_CONTEXT_WINDOW_SIZES",
39
+ "PARAMS",
40
+ "PROVIDERS",
41
+ "MODELS",
42
+
31
43
  "Customer",
32
44
  "Product",
33
45
  "ProductProvider",
@@ -44,6 +56,12 @@ __all__ = [
44
56
  "StringKnowledgeSource",
45
57
  "DoclingSource",
46
58
 
59
+ "TaskStatus",
60
+ "TaskGraph",
61
+ "Node",
62
+ "Edge",
63
+ "DependencyType",
64
+
47
65
  "Task",
48
66
  "TaskOutput",
49
67
  "ConditionalTask",
@@ -55,7 +73,7 @@ __all__ = [
55
73
  "Team",
56
74
  "TeamOutput",
57
75
  "Formation",
58
- "TeamMember",
76
+ "Member",
59
77
  "TaskHandlingProcess",
60
78
 
61
79
  "Tool",
@@ -3,14 +3,14 @@ from pydantic import BaseModel, Field
3
3
 
4
4
  class UsageMetrics(BaseModel):
5
5
  """
6
- Model to track usage metrics for the agent/team's execution.
6
+ Model to track usage
7
7
  """
8
8
 
9
- total_tokens: int = Field(default=0, description="Total number of tokens used")
10
- prompt_tokens: int = Field(default=0, description="Number of tokens used in prompts")
11
- cached_prompt_tokens: int = Field(default=0, description="Number of cached prompt tokens used")
12
- completion_tokens: int = Field(default=0, description="Number of tokens used in completions")
13
- successful_requests: int = Field(default=0, description="Number of successful requests made")
9
+ total_tokens: int = Field(default=0, description="total number of tokens used")
10
+ prompt_tokens: int = Field(default=0, description="number of tokens used in prompts")
11
+ cached_prompt_tokens: int = Field(default=0, description="number of cached prompt tokens used")
12
+ completion_tokens: int = Field(default=0, description="number of tokens used in completions")
13
+ successful_requests: int = Field(default=0, description="number of successful requests made")
14
14
 
15
15
  def add_usage_metrics(self, usage_metrics: "UsageMetrics") -> None:
16
16
  """
@@ -29,7 +29,7 @@ vhq_formation_planner = Agent(
29
29
  role="vhq-Formation Planner",
30
30
  goal="Plan a formation of agents based on the given task descirption.",
31
31
  llm="gemini/gemini-2.0-flash-exp",
32
- llm_config=dict(top_p=0.8, top_k=30, temperature=0.9),
32
+ llm_config=dict(top_p=0.8, topK=40, temperature=0.9),
33
33
  maxit=1,
34
34
  max_retry_limit=1,
35
35
  knowledge_sources=[
versionhq/agent/model.py CHANGED
@@ -23,25 +23,6 @@ load_dotenv(override=True)
23
23
  T = TypeVar("T", bound="Agent")
24
24
 
25
25
 
26
- # def mock_agent_ops_provider():
27
- # def track_agent(*args, **kwargs):
28
- # def noop(f):
29
- # return f
30
- # return noop
31
- # return track_agent
32
-
33
- # track_agent = mock_agent_ops_provider()
34
-
35
- # agentops = None
36
- # if os.environ.get("AGENTOPS_API_KEY"):
37
- # try:
38
- # from agentops import track_agent
39
- # except ImportError:
40
- # track_agent = mock_agent_ops_provider()
41
- # else:
42
- # track_agent = mock_agent_ops_provider()
43
-
44
-
45
26
  class TokenProcess:
46
27
  total_tokens: int = 0
47
28
  prompt_tokens: int = 0
@@ -76,10 +57,9 @@ class TokenProcess:
76
57
  # @track_agent()
77
58
  class Agent(BaseModel):
78
59
  """
79
- Agent class that run on LLM.
80
- Agents execute tasks alone or in the team, using RAG tools and knowledge base if any.
81
- Agents will prioritize team tasks when they belong to the team.
82
- * (Temp) Comment out all the optional fields except for Team and LLM settings for convenience.
60
+ A class to store agent information.
61
+ Agents must have `role`, `goal`, and `llm` = DEFAULT_MODEL_NAME as default.
62
+ Then run validation on `backstory`, `llm`, `tools`, `rpm` (request per min), `knowledge`, and `memory`.
83
63
  """
84
64
 
85
65
  __hash__ = object.__hash__
@@ -132,8 +112,6 @@ class Agent(BaseModel):
132
112
 
133
113
  # cache, error, ops handling
134
114
  formatting_errors: int = Field(default=0, description="number of formatting errors.")
135
- agent_ops_agent_name: str = None
136
- agent_ops_agent_id: str = None
137
115
 
138
116
 
139
117
  @field_validator("id", mode="before")
@@ -161,9 +139,8 @@ class Agent(BaseModel):
161
139
  @model_validator(mode="after")
162
140
  def set_up_llm(self) -> Self:
163
141
  """
164
- Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
142
+ Set up `llm` and `function_calling_llm` as valid LLM objects using the given kwargs.
165
143
  """
166
- self.agent_ops_agent_name = self.role
167
144
  self.llm = self._convert_to_llm_object(llm=self.llm)
168
145
 
169
146
  function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
@@ -177,12 +154,15 @@ class Agent(BaseModel):
177
154
  return self
178
155
 
179
156
 
180
- def _convert_to_llm_object(self, llm: Any | None) -> LLM:
157
+ def _convert_to_llm_object(self, llm: Any = None) -> LLM:
181
158
  """
182
159
  Convert the given value to LLM object.
183
160
  When `llm` is dict or self.llm_config is not None, add these values to the LLM object after validating them.
184
161
  """
185
- llm = llm if llm is not None else DEFAULT_MODEL_NAME
162
+ llm = llm if llm else self.llm if self.llm else DEFAULT_MODEL_NAME
163
+
164
+ if not llm:
165
+ pass
186
166
 
187
167
  match llm:
188
168
  case LLM():
@@ -380,16 +360,16 @@ class Agent(BaseModel):
380
360
  Set up memories: stm, ltm, and um
381
361
  """
382
362
 
383
- if self.use_memory == True:
384
- self.long_term_memory = self.long_term_memory if self.long_term_memory else LongTermMemory()
385
- self.short_term_memory = self.short_term_memory if self.short_term_memory else ShortTermMemory(agent=self, embedder_config=self.embedder_config)
363
+ # if self.use_memory == True:
364
+ self.long_term_memory = self.long_term_memory if self.long_term_memory else LongTermMemory()
365
+ self.short_term_memory = self.short_term_memory if self.short_term_memory else ShortTermMemory(agent=self, embedder_config=self.embedder_config)
386
366
 
387
- if hasattr(self, "memory_config") and self.memory_config is not None:
388
- user_id = self.memory_config.get("user_id", None)
389
- if user_id:
390
- self.user_memory = self.user_memory if self.user_memory else UserMemory(agent=self, user_id=user_id)
391
- else:
392
- self.user_memory = None
367
+ if hasattr(self, "memory_config") and self.memory_config is not None:
368
+ user_id = self.memory_config.get("user_id", None)
369
+ if user_id:
370
+ self.user_memory = self.user_memory if self.user_memory else UserMemory(agent=self, user_id=user_id)
371
+ else:
372
+ self.user_memory = None
393
373
 
394
374
  return self
395
375
 
@@ -402,6 +382,78 @@ class Agent(BaseModel):
402
382
  pass
403
383
 
404
384
 
385
+ def update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
386
+ """
387
+ Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
388
+ """
389
+
390
+ if not llm and not llm_config:
391
+ self._logger.log(level="error", message="Missing llm or llm_config values to update", color="red")
392
+ pass
393
+
394
+ self.llm = llm
395
+ if llm_config:
396
+ if self.llm_config:
397
+ self.llm_config.update(llm_config)
398
+ else:
399
+ self.llm_config = llm_config
400
+
401
+ return self.set_up_llm()
402
+
403
+
404
+ def update(self, **kwargs) -> Self:
405
+ """
406
+ Update the existing agent. Address variables that require runnning set_up_x methods first, then update remaining variables.
407
+ """
408
+
409
+ if not kwargs:
410
+ self._logger.log(level="error", message="Missing values to update", color="red")
411
+ return self
412
+
413
+ for k, v in kwargs.items():
414
+ match k:
415
+ case "tools":
416
+ self.tools = kwargs.get(k, self.tools)
417
+ self.set_up_tools()
418
+
419
+ case "role" | "goal":
420
+ self.role = kwargs.get("role", self.role)
421
+ self.goal = kwargs.get("goal", self.goal)
422
+ if not self.backstory:
423
+ self.set_up_backstory()
424
+
425
+ if self.backstory:
426
+ self.backstory += f"new role: {self.role}, new goal: {self.goal}"
427
+
428
+ case "max_rpm":
429
+ self.max_rpm = kwargs.get(k, self.max_rpm)
430
+ self.set_up_rpm()
431
+
432
+ case "knowledge_sources":
433
+ self.knowledge_sources = kwargs.get("knowledge_sources", self.knowledge_sources)
434
+ self.set_up_knowledge()
435
+
436
+ case "use_memory" | "memory_config":
437
+ self.use_memory = kwargs.get("use_memory", self.use_memory)
438
+ self.memory_config = kwargs.get("memory_config", self.memory_config)
439
+ self.set_up_memory()
440
+
441
+ case "llm" | "llm_config":
442
+ self.llm = kwargs.get("llm", self.llm)
443
+ self.llm_config = kwargs.get("llm_config", self.llm_config)
444
+ self.update_llm(llm=self.llm, llm_config=self.llm_config)
445
+
446
+ case _:
447
+ try:
448
+ setattr(self, k, v)
449
+ except Exception as e:
450
+ self._logger.log(level="error", message=f"Failed to update the key: {k} We'll skip. Error: {str(e)}", color="red")
451
+ pass
452
+
453
+ return self
454
+
455
+
456
+
405
457
  def invoke(
406
458
  self,
407
459
  prompts: str,
versionhq/llm/llm_vars.py CHANGED
@@ -1,4 +1,3 @@
1
- from enum import Enum
2
1
  from typing import Type
3
2
 
4
3
  JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
@@ -55,6 +54,11 @@ MODELS = {
55
54
  "openrouter": [
56
55
  "openrouter/deepseek/deepseek-r1",
57
56
  "openrouter/qwen/qwen-2.5-72b-instruct",
57
+ "openrouter/google/gemini-2.0-flash-thinking-exp:free",
58
+ "openrouter/google/gemini-2.0-flash-thinking-exp-1219:free",
59
+ "openrouter/google/gemini-2.0-flash-001",
60
+ "openrouter/meta-llama/llama-3.3-70b-instruct",
61
+ "openrouter/mistralai/mistral-large-2411",
58
62
  ],
59
63
  "huggingface": [
60
64
  "huggingface/qwen/qwen2.5-VL-72B-Instruct",
versionhq/llm/model.py CHANGED
@@ -111,7 +111,7 @@ class LLM(BaseModel):
111
111
 
112
112
 
113
113
  @model_validator(mode="after")
114
- def validate_model_providers(self) -> Self:
114
+ def validate_model_and_provider(self) -> Self:
115
115
  """
116
116
  Validate the given model, provider, interface provider.
117
117
  """
File without changes