versionhq 1.1.11.7__py3-none-any.whl → 1.1.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -1,11 +1,8 @@
1
+ # silence some warnings
1
2
  import warnings
2
-
3
- warnings.filterwarnings(
4
- "ignore",
5
- message="Pydantic serializer warnings:",
6
- category=UserWarning,
7
- module="pydantic.main",
8
- )
3
+ warnings.filterwarnings(action="ignore", message="Pydantic serializer warnings:", category=UserWarning, module="pydantic.main")
4
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic._internal")
5
+ warnings.filterwarnings(action="ignore", module="LiteLLM:utils")
9
6
 
10
7
  from versionhq.agent.model import Agent
11
8
  from versionhq.clients.customer.model import Customer
@@ -17,7 +14,7 @@ from versionhq.tool.model import Tool
17
14
  from versionhq.tool.composio_tool import ComposioHandler
18
15
 
19
16
 
20
- __version__ = "1.1.11.7"
17
+ __version__ = "1.1.12.1"
21
18
  __all__ = [
22
19
  "Agent",
23
20
  "Customer",
@@ -36,10 +36,15 @@ class Printer:
36
36
 
37
37
 
38
38
  class Logger(BaseModel):
39
+ """
40
+ Control CLI messages.
41
+ Color: red = error, yellow = warning, blue = info (from vhq), green = info (from third party)
42
+ """
43
+
39
44
  verbose: bool = Field(default=True)
40
45
  _printer: Printer = PrivateAttr(default_factory=Printer)
41
46
 
42
47
  def log(self, level, message, color="yellow"):
43
48
  if self.verbose:
44
49
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
45
- self._printer.print(f"\n{timestamp} - versionHQ - {level.upper()}: {message}", color=color)
50
+ self._printer.print(f"\n{timestamp} - versionHQ [{level.upper()}]: {message}", color=color)
@@ -0,0 +1,31 @@
1
+ from versionhq.agent.model import Agent
2
+ from versionhq.llm.model import DEFAULT_MODEL_NAME
3
+
4
+ """
5
+ In-house agents to be called across the project.
6
+ [Rules] agents' names and roles start with `vhq_`.
7
+ """
8
+
9
+ vhq_client_manager = Agent(
10
+ role="vhq-Client Manager",
11
+ goal="Efficiently communicate with the client on the task progress",
12
+ llm=DEFAULT_MODEL_NAME
13
+ )
14
+
15
+ vhq_task_evaluator = Agent(
16
+ role="vhq-Task Evaluator",
17
+ goal="score the output according to the given evaluation criteria.",
18
+ llm=DEFAULT_MODEL_NAME,
19
+ llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
20
+ maxit=1,
21
+ max_retry_limit=1
22
+ )
23
+
24
+ vhq_formation_planner = Agent(
25
+ role="vhq-Formation Planner",
26
+ goal="Plan a formation of agents based on the given task descirption.",
27
+ llm="gemini/gemini-2.0-flash-exp",
28
+ llm_config=dict(top_p=0.8, top_k=30, temperature=0.9),
29
+ maxit=1,
30
+ max_retry_limit=1
31
+ )
versionhq/agent/model.py CHANGED
@@ -6,10 +6,10 @@ from typing_extensions import Self
6
6
  from dotenv import load_dotenv
7
7
  import litellm
8
8
 
9
- from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator, ConfigDict
9
+ from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
10
10
  from pydantic_core import PydanticCustomError
11
11
 
12
- from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
12
+ from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME, PROVIDERS
13
13
  from versionhq.tool.model import Tool, ToolSet
14
14
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
15
15
  from versionhq.memory.contextual_memory import ContextualMemory
@@ -162,90 +162,47 @@ class Agent(BaseModel):
162
162
  @model_validator(mode="after")
163
163
  def set_up_llm(self) -> Self:
164
164
  """
165
- Set up the base model and function calling model (if any) using the LLM class.
166
- Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `callbacks`,`respect_context_window` to the LLM class.
167
- The base model is selected on the client app, else use the default model.
165
+ Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
168
166
  """
169
-
170
167
  self.agent_ops_agent_name = self.role
168
+ self.llm = self._convert_to_llm_class(llm=self.llm)
171
169
 
172
- if isinstance(self.llm, LLM):
173
- llm = self._set_llm_params(self.llm)
174
- self.llm = llm
175
-
176
- elif isinstance(self.llm, str) or self.llm is None:
177
- model_name = self.llm if self.llm is not None else DEFAULT_MODEL_NAME
178
- llm = LLM(model=model_name)
179
- updated_llm = self._set_llm_params(llm)
180
- self.llm = updated_llm
181
-
182
- else:
183
- if isinstance(self.llm, dict):
184
- model_name = self.llm.pop("model_name", self.llm.pop("deployment_name", str(self.llm)))
185
- llm = LLM(model=model_name if model_name is not None else DEFAULT_MODEL_NAME)
186
- updated_llm = self._set_llm_params(llm, { k: v for k, v in self.llm.items() if v is not None })
187
- self.llm = updated_llm
188
-
189
- else:
190
- model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
191
- llm = LLM(model=model_name)
192
- llm_params = {
193
- "max_tokens": (getattr(self.llm, "max_tokens") or self.max_tokens or 3000),
194
- "timeout": getattr(self.llm, "timeout", self.max_execution_time),
195
- "callbacks": getattr(self.llm, "callbacks", None),
196
- "temperature": getattr(self.llm, "temperature", None),
197
- "logprobs": getattr(self.llm, "logprobs", None),
198
- "api_key": getattr(self.llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
199
- "base_url": getattr(self.llm, "base_url", None),
200
- }
201
- updated_llm = self._set_llm_params(llm, llm_params)
202
- self.llm = updated_llm
203
-
204
-
205
- """
206
- Set up funcion_calling LLM as well.
207
- Check if the model supports function calling, setup LLM instance accordingly, using the same params with the LLM.
208
- """
209
- if self.function_calling_llm:
210
- if isinstance(self.function_calling_llm, LLM):
211
- if self.function_calling_llm._supports_function_calling() == False:
212
- self.function_calling_llm = LLM(model=DEFAULT_MODEL_NAME)
213
-
214
- updated_llm = self._set_llm_params(self.function_calling_llm)
215
- self.function_calling_llm = updated_llm
170
+ function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
171
+ function_calling_llm = self._convert_to_llm_class(llm=function_calling_llm)
172
+ if function_calling_llm._supports_function_calling():
173
+ self.function_calling_llm = function_calling_llm
174
+ return self
216
175
 
217
- elif isinstance(self.function_calling_llm, str):
218
- llm = LLM(model=self.function_calling_llm)
219
176
 
220
- if llm._supports_function_calling() == False:
221
- llm = LLM(model=DEFAULT_MODEL_NAME)
177
+ def _convert_to_llm_class(self, llm: Any | None) -> LLM:
178
+ llm = llm if llm is not None else DEFAULT_MODEL_NAME
222
179
 
223
- updated_llm = self._set_llm_params(llm)
224
- self.function_calling_llm = updated_llm
180
+ match llm:
181
+ case LLM():
182
+ return self._set_llm_params(llm=llm)
225
183
 
226
- else:
227
- if isinstance(self.function_calling_llm, dict):
228
- model_name = self.function_calling_llm.pop("model_name", self.function_calling_llm.pop("deployment_name", str(self.function_calling_llm)))
229
- llm = LLM(model=model_name)
230
- updated_llm = self._set_llm_params(llm, { k: v for k, v in self.function_calling_llm.items() if v is not None })
231
- self.function_calling_llm = updated_llm
184
+ case str():
185
+ llm_obj = LLM(model=llm)
186
+ return self._set_llm_params(llm=llm_obj)
232
187
 
233
- else:
234
- model_name = (getattr(self.function_calling_llm, "model_name") or getattr(self.function_calling_llm, "deployment_name") or str(self.function_calling_llm))
235
- llm = LLM(model=model_name)
236
- llm_params = {
237
- "max_tokens": (getattr(self.function_calling_llm, "max_tokens") or self.max_tokens or 3000),
238
- "timeout": getattr(self.function_calling_llm, "timeout", self.max_execution_time),
239
- "callbacks": getattr(self.function_calling_llm, "callbacks", None),
240
- "temperature": getattr(self.function_calling_llm, "temperature", None),
241
- "logprobs": getattr(self.function_calling_llm, "logprobs", None),
242
- "api_key": getattr(self.function_calling_llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
243
- "base_url": getattr(self.function_calling_llm, "base_url", None),
244
- }
245
- updated_llm = self._set_llm_params(llm, llm_params)
246
- self.function_calling_llm = updated_llm
188
+ case dict():
189
+ model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
190
+ llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
191
+ return self._set_llm_params(llm_obj, { k: v for k, v in llm.items() if v is not None })
247
192
 
248
- return self
193
+ case _:
194
+ model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
195
+ llm_obj = LLM(model=model_name)
196
+ llm_params = {
197
+ "max_tokens": (getattr(llm, "max_tokens") or self.max_tokens or 3000),
198
+ "timeout": getattr(llm, "timeout", self.max_execution_time),
199
+ "callbacks": getattr(llm, "callbacks", None),
200
+ "temperature": getattr(llm, "temperature", None),
201
+ "logprobs": getattr(llm, "logprobs", None),
202
+ "api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
203
+ "base_url": getattr(llm, "base_url", None),
204
+ }
205
+ return self._set_llm_params(llm=llm_obj, config=llm_params)
249
206
 
250
207
 
251
208
  def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
@@ -257,6 +214,11 @@ class Agent(BaseModel):
257
214
  llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
258
215
  llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
259
216
 
217
+ if llm.provider is None:
218
+ provider_name = llm.model.split("/")[0]
219
+ valid_provider = provider_name if provider_name in PROVIDERS else None
220
+ llm.provider = valid_provider
221
+
260
222
  if self.callbacks:
261
223
  llm.callbacks = self.callbacks
262
224
  llm._set_callbacks(llm.callbacks)
@@ -454,7 +416,7 @@ class Agent(BaseModel):
454
416
  task.tokens = self.llm._tokens
455
417
 
456
418
  task_execution_counter += 1
457
- self._logger.log(level="info", message=f"Agent response: {raw_response}", color="blue")
419
+ self._logger.log(level="info", message=f"Agent response: {raw_response}", color="green")
458
420
  return raw_response
459
421
 
460
422
  except Exception as e:
@@ -470,7 +432,7 @@ class Agent(BaseModel):
470
432
  iterations += 1
471
433
 
472
434
  task_execution_counter += 1
473
- self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="blue")
435
+ self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="green")
474
436
  return raw_response
475
437
 
476
438
  if not raw_response:
@@ -515,6 +477,7 @@ class Agent(BaseModel):
515
477
  task_prompt += memory.strip()
516
478
 
517
479
 
480
+ ## comment out for now
518
481
  # if self.team and self.team._train:
519
482
  # task_prompt = self._training_handler(task_prompt=task_prompt)
520
483
  # else:
@@ -12,17 +12,11 @@ try:
12
12
  except ImportError:
13
13
  import envoy
14
14
  envoy.run("uv add docling --optional docling")
15
-
16
- from docling.datamodel.base_models import InputFormat
17
- from docling.document_converter import DocumentConverter
18
- from docling.exceptions import ConversionError
19
- from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
20
- from docling_core.types.doc.document import DoclingDocument
21
15
  DOCLING_AVAILABLE = True
22
16
  except:
23
17
  DOCLING_AVAILABLE = False
24
18
 
25
- from pydantic import Field, InstanceOf
19
+ from pydantic import Field
26
20
 
27
21
  from versionhq.knowledge.source import BaseKnowledgeSource
28
22
  from versionhq.storage.utils import fetch_db_storage_path
@@ -52,11 +46,20 @@ class DoclingSource(BaseKnowledgeSource):
52
46
  ))
53
47
 
54
48
  def __init__(self, *args, **kwargs):
55
- if not DOCLING_AVAILABLE:
56
- raise ImportError("The docling package is required. Please install the package using: $ uv add docling.")
57
- else:
49
+ if DOCLING_AVAILABLE:
50
+ from docling.datamodel.base_models import InputFormat
51
+ from docling.document_converter import DocumentConverter
52
+ from docling.exceptions import ConversionError
53
+ from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
54
+ from docling_core.types.doc.document import DoclingDocument
55
+
58
56
  super().__init__(*args, **kwargs)
59
57
 
58
+ else:
59
+ raise ImportError("The docling package is required. Please install the package using: $ uv add docling.")
60
+ # else:
61
+ # super().__init__(*args, **kwargs)
62
+
60
63
 
61
64
  def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
62
65
  conv_results_iter = self.document_converter.convert_all(self.valid_file_paths)
versionhq/llm/llm_vars.py CHANGED
@@ -6,17 +6,21 @@ JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_
6
6
  PROVIDERS = [
7
7
  "openai",
8
8
  "gemini",
9
- "sagemaker",
10
-
9
+ "openrouter",
10
+ "huggingface",
11
11
  "anthropic",
12
+ "sagemaker",
13
+ "bedrock",
12
14
  "ollama",
13
15
  "watson",
14
- "bedrock",
15
16
  "azure",
16
17
  "cerebras",
17
18
  "llama",
18
19
  ]
19
20
 
21
+ ENDPOINT_PROVIDERS = [
22
+ "huggingface",
23
+ ]
20
24
 
21
25
  """
22
26
  List of models available on the framework.
@@ -44,10 +48,17 @@ MODELS = {
44
48
  "anthropic": [
45
49
  "claude-3-5-sonnet-20241022",
46
50
  "claude-3-5-sonnet-20240620",
47
- "claude-3-sonnet-20240229",
51
+ "claude-3-haiku-2024030",
48
52
  "claude-3-opus-20240229",
49
53
  "claude-3-haiku-20240307",
50
54
  ],
55
+ "openrouter": [
56
+ "openrouter/deepseek/deepseek-r1:free",
57
+ "openrouter/qwen/qwen-2.5-72b-instruct",
58
+ ],
59
+ "huggingface": [
60
+ "huggingface/qwen/qwen2.5-VL-72B-Instruct",
61
+ ],
51
62
  # "sagemaker": [
52
63
  # "sagemaker/huggingface-text2text-flan-t5-base",
53
64
  # "sagemaker/huggingface-llm-gemma-7b",
@@ -61,13 +72,7 @@ MODELS = {
61
72
  "ollama/llama3.1",
62
73
  "ollama/mixtral",
63
74
  "ollama/mixtral-8x22B-Instruct-v0.1",
64
-
65
- ],
66
- "deepseek": [
67
- "deepseek/deepseek-reasoner",
68
-
69
75
  ],
70
-
71
76
  # "watson": [
72
77
  # "watsonx/meta-llama/llama-3-1-70b-instruct",
73
78
  # "watsonx/meta-llama/llama-3-1-8b-instruct",
@@ -83,7 +88,6 @@ MODELS = {
83
88
  "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
84
89
  "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
85
90
  "bedrock/anthropic.claude-3-opus-20240229-v1:0",
86
- # "bedrock/anthropic.claude-v2:1",
87
91
  "bedrock/anthropic.claude-v2",
88
92
  "bedrock/anthropic.claude-instant-v1",
89
93
  "bedrock/meta.llama3-1-405b-instruct-v1:0",
@@ -109,24 +113,17 @@ MODELS = {
109
113
  KEYS = {
110
114
  "openai": ["OPENAI_API_KEY"],
111
115
  "gemini": ["GEMINI_API_KEY"],
112
- "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
113
116
  "anthropic": ["ANTHROPIC_API_KEY"],
117
+ "huggingface": ["HUGGINGFACE_API_KEY", ],
118
+ "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
114
119
  }
115
120
 
116
121
 
117
- """
118
- Use base_url to specify
119
- """
120
- BASE_URLS = {
121
- "deepseek": "https://api.deepseek.com"
122
- }
123
-
124
122
 
125
123
  """
126
124
  Max input token size by the model.
127
125
  """
128
126
  LLM_CONTEXT_WINDOW_SIZES = {
129
- "gpt-3.5-turbo": 8192,
130
127
  "gpt-4": 8192,
131
128
  "gpt-4o": 128000,
132
129
  "gpt-4o-mini": 128000,
@@ -145,6 +142,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
145
142
  "claude-3-sonnet-20240229": 200000,
146
143
  "claude-3-opus-20240229": 200000,
147
144
  "claude-3-haiku-20240307": 200000,
145
+ "claude-3-5-sonnet-2024102": 200000,
148
146
 
149
147
  "deepseek-chat": 128000,
150
148
  "deepseek/deepseek-reasoner": 8192,
@@ -162,111 +160,18 @@ LLM_CONTEXT_WINDOW_SIZES = {
162
160
  "llama3-70b-8192": 8192,
163
161
  "llama3-8b-8192": 8192,
164
162
  "mixtral-8x7b-32768": 32768,
165
- "claude-3-5-sonnet-2024102": 200000,
166
- }
167
-
168
-
169
-
170
-
171
- LLM_BASE_URL_KEY_NAMES = {
172
- "openai": "OPENAI_API_BASE",
173
- "gemini": "GEMINI_API_BASE",
174
- "anthropic": "ANTHROPIC_API_BASE",
175
- }
176
-
177
- LLM_VARS = {
178
- "openai": [
179
- {
180
- "prompt": "Enter your OPENAI API key (press Enter to skip)",
181
- "key_name": "OPENAI_API_KEY",
182
- }
183
- ],
184
- "anthropic": [
185
- {
186
- "prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
187
- "key_name": "ANTHROPIC_API_KEY",
188
- }
189
- ],
190
- "gemini": [
191
- {
192
- "prompt": "Enter your GEMINI API key (press Enter to skip)",
193
- "key_name": "GEMINI_API_KEY",
194
- }
195
- ],
196
- "watson": [
197
- {
198
- "prompt": "Enter your WATSONX URL (press Enter to skip)",
199
- "key_name": "WATSONX_URL",
200
- },
201
- {
202
- "prompt": "Enter your WATSONX API Key (press Enter to skip)",
203
- "key_name": "WATSONX_APIKEY",
204
- },
205
- {
206
- "prompt": "Enter your WATSONX Project Id (press Enter to skip)",
207
- "key_name": "WATSONX_PROJECT_ID",
208
- },
209
- ],
210
- "ollama": [
211
- {
212
- "default": True,
213
- "API_BASE": "http://localhost:11434",
214
- }
215
- ],
216
- "bedrock": [
217
- {
218
- "prompt": "Enter your AWS Access Key ID (press Enter to skip)",
219
- "key_name": "AWS_ACCESS_KEY_ID",
220
- },
221
- {
222
- "prompt": "Enter your AWS Secret Access Key (press Enter to skip)",
223
- "key_name": "AWS_SECRET_ACCESS_KEY",
224
- },
225
- {
226
- "prompt": "Enter your AWS Region Name (press Enter to skip)",
227
- "key_name": "AWS_REGION_NAME",
228
- },
229
- ],
230
- "azure": [
231
- {
232
- "prompt": "Enter your Azure deployment name (must start with 'azure/')",
233
- "key_name": "model",
234
- },
235
- {
236
- "prompt": "Enter your AZURE API key (press Enter to skip)",
237
- "key_name": "AZURE_API_KEY",
238
- },
239
- {
240
- "prompt": "Enter your AZURE API base URL (press Enter to skip)",
241
- "key_name": "AZURE_API_BASE",
242
- },
243
- {
244
- "prompt": "Enter your AZURE API version (press Enter to skip)",
245
- "key_name": "AZURE_API_VERSION",
246
- },
247
- ],
248
- "cerebras": [
249
- {
250
- "prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
251
- "key_name": "model",
252
- },
253
- {
254
- "prompt": "Enter your Cerebras API version (press Enter to skip)",
255
- "key_name": "CEREBRAS_API_KEY",
256
- },
257
- ],
258
163
  }
259
164
 
260
165
 
261
166
 
262
167
  """
263
- Params for litellm.completion() func. Address common/unique params to each provider.
168
+ Params for litellm.completion().
264
169
  """
265
170
 
266
171
  PARAMS = {
267
172
  "litellm": [
268
173
  "api_base",
269
- "api_version,"
174
+ "api_version,",
270
175
  "num_retries",
271
176
  "context_window_fallback_dict",
272
177
  "fallbacks",
versionhq/llm/model.py CHANGED
@@ -1,4 +1,3 @@
1
- import logging
2
1
  import json
3
2
  import os
4
3
  import sys
@@ -6,14 +5,13 @@ import threading
6
5
  import warnings
7
6
  from dotenv import load_dotenv
8
7
  import litellm
9
- from litellm import get_supported_openai_params, JSONSchemaValidationError
8
+ from litellm import JSONSchemaValidationError
10
9
  from contextlib import contextmanager
11
10
  from typing import Any, Dict, List, Optional
12
11
  from typing_extensions import Self
13
- from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
14
- from pydantic_core import PydanticCustomError
12
+ from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
15
13
 
16
- from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS
14
+ from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS
17
15
  from versionhq.tool.model import Tool, ToolSet
18
16
  from versionhq._utils.logger import Logger
19
17
 
@@ -22,11 +20,8 @@ load_dotenv(override=True)
22
20
  LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
23
21
  LITELLM_API_BASE = os.environ.get("LITELLM_API_BASE")
24
22
  DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
25
- DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME")
26
-
27
- # proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
28
- # openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
29
-
23
+ DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME", "gpt-4o-mini")
24
+ DEFAULT_MODEL_PROVIDER_NAME = os.environ.get("DEFAULT_MODEL_PROVIDER_NAME", "openai")
30
25
 
31
26
  class FilteredStream:
32
27
  def __init__(self, original_stream):
@@ -52,7 +47,8 @@ class FilteredStream:
52
47
  @contextmanager
53
48
  def suppress_warnings():
54
49
  with warnings.catch_warnings():
55
- warnings.filterwarnings("ignore")
50
+ litellm.set_verbose = False
51
+ warnings.filterwarnings(action="ignore")
56
52
  old_stdout = sys.stdout
57
53
  old_stderr = sys.stderr
58
54
  sys.stdout = FilteredStream(old_stdout)
@@ -67,10 +63,7 @@ def suppress_warnings():
67
63
 
68
64
  class LLM(BaseModel):
69
65
  """
70
- An LLM class to store params except for response formats which will be given in the task handling process.
71
- Use LiteLLM to connect with the model of choice.
72
- Some optional params are passed by the agent, else follow the default settings of the model provider.
73
- Ref. https://docs.litellm.ai/docs/completion/input
66
+ An LLM class to store params to send to the LLM. Use LiteLLM or custom providers for the endpoint.
74
67
  """
75
68
 
76
69
  _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
@@ -78,17 +71,18 @@ class LLM(BaseModel):
78
71
  _tokens: int = PrivateAttr(default=0) # accumulate total tokens used for the call
79
72
  model_config = ConfigDict(extra="allow")
80
73
 
81
- model: str = Field(default=DEFAULT_MODEL_NAME)
82
- provider: Optional[str] = Field(default=None, description="model provider or custom model provider")
83
- base_url: Optional[str] = Field(default=None, description="api base of the model provider")
84
- api_key: Optional[str] = Field(default=None, description="api key of the model provider")
74
+ model: str = Field(default=None)
75
+ provider: Optional[str] = Field(default=None, description="model provider")
76
+ endpoint_provider: Optional[str] = Field(default=None, description="custom endpoint provider for pass through llm call. must need base_url")
77
+ base_url: Optional[str] = Field(default=None, description="api base url for endpoint provider")
78
+ api_key: Optional[str] = Field(default=None, description="api key to access the model")
85
79
 
86
80
  # optional params
81
+ response_format: Optional[Any] = Field(default=None)
87
82
  timeout: Optional[float | int] = Field(default=None)
88
83
  max_tokens: Optional[int] = Field(default=None)
89
84
  max_completion_tokens: Optional[int] = Field(default=None)
90
85
  context_window_size: Optional[int] = Field(default=DEFAULT_CONTEXT_WINDOW_SIZE)
91
- callbacks: List[Any] = Field(default_factory=list)
92
86
  temperature: Optional[float] = Field(default=None)
93
87
  top_p: Optional[float] = Field(default=None)
94
88
  n: Optional[int] = Field(default=None)
@@ -99,8 +93,8 @@ class LLM(BaseModel):
99
93
  seed: Optional[int] = Field(default=None)
100
94
  logprobs: Optional[bool] = Field(default=None)
101
95
  top_logprobs: Optional[int] = Field(default=None)
102
- response_format: Optional[Any] = Field(default=None)
103
96
  tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
97
+ callbacks: List[Any] = Field(default_factory=list)
104
98
 
105
99
  # LiteLLM specific fields
106
100
  api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
@@ -114,54 +108,89 @@ class LLM(BaseModel):
114
108
  litellm.set_verbose = True
115
109
  os.environ['LITELLM_LOG'] = 'DEBUG'
116
110
 
111
+
117
112
  @model_validator(mode="after")
118
- def validate_base_params(self) -> Self:
113
+ def validate_model_providers(self) -> Self:
119
114
  """
120
- 1) Set up a valid model name with the provider name using the MODEL list.
121
- * Assign a default model and provider based on the given information when no model key is found in the MODEL list.
122
-
123
- 2) Set up other base parameters for the model and LiteLLM.
115
+ Validate the given model, provider, interface provider.
124
116
  """
125
117
 
126
- if self.model is None:
127
- self._logger.log(level="error", message="Model name is missing.", color="red")
128
- raise PydanticCustomError("model_missing", "The model name must be provided.", {})
118
+ self._init_model_name = self.model
129
119
 
120
+ if self.model is None and self.provider is None:
121
+ self.model = DEFAULT_MODEL_NAME
122
+ self.provider = DEFAULT_MODEL_PROVIDER_NAME
130
123
 
131
- self._init_model_name = self.model
132
- self.model = None
133
- self._tokens = 0
124
+ elif self.model is None and self.provider:
125
+ if self.provider not in PROVIDERS:
126
+ self._logger.log(level="warning", message=f"Invalid model provider is provided. We will assign a default model.", color="yellow")
127
+ self.model = DEFAULT_MODEL_NAME
128
+ self.provider = DEFAULT_MODEL_PROVIDER_NAME
134
129
 
135
- if self.provider and MODELS.get(self.provider):
136
- provider_model_list = MODELS.get(self.provider)
137
- for item in provider_model_list:
138
- if self.model is None:
139
- if item == self._init_model_name:
140
- self.model = item
141
- elif self._init_model_name in item and self.model is None:
142
- self.model = item
143
- else:
144
- temp_model = provider_model_list[0]
145
- self._logger.log(level="warning", message=f"The provided model: {self._init_model_name} is not in the list. We'll assign a model: {temp_model} from the selected model provider: {self.provider}.", color="yellow")
146
- self.model = temp_model
130
+ else:
131
+ provider_model_list = MODELS.get(self.provider)
132
+ if provider_model_list:
133
+ self.model = provider_model_list[0]
134
+ self.provider = self.provider
135
+ else:
136
+ self._logger.log(level="warning", message=f"This provider has not models to be called. We will assign a default model.", color="yellow")
137
+ self.model = DEFAULT_MODEL_NAME
138
+ self.provider = DEFAULT_MODEL_PROVIDER_NAME
139
+
140
+ elif self.model and self.provider is None:
141
+ model_match = [
142
+ item for item in [
143
+ [val for val in v if val == self.model][0] for k, v in MODELS.items() if [val for val in v if val == self.model]
144
+ ] if item
145
+ ]
146
+ model_partial_match = [
147
+ item for item in [
148
+ [val for val in v if val.find(self.model) != -1][0] for k, v in MODELS.items() if [val for val in v if val.find(self.model) != -1]
149
+ ] if item
150
+ ]
151
+ provider_match = [k for k, v in MODELS.items() if k == self.model]
152
+
153
+ if model_match:
154
+ self.model = model_match[0]
155
+ self.provider = [k for k, v in MODELS.items() if self.model in v][0]
156
+
157
+ elif model_partial_match:
158
+ self.model = model_partial_match[0]
159
+ self.provider = [k for k, v in MODELS.items() if [item for item in v if item.find(self.model) != -1]][0]
160
+
161
+ elif provider_match:
162
+ provider = provider_match[0]
163
+ if self.MODELS.get(provider):
164
+ self.provider = provider
165
+ self.model = self.MODELS.get(provider)[0]
166
+ else:
167
+ self.provider = DEFAULT_MODEL_PROVIDER_NAME
168
+ self.model = DEFAULT_MODEL_NAME
169
+
170
+ else:
171
+ self.model = DEFAULT_MODEL_NAME
172
+ self.provider = DEFAULT_MODEL_PROVIDER_NAME
147
173
 
148
174
  else:
149
- for k, v in MODELS.items():
150
- for item in v:
151
- if self.model is None:
152
- if self._init_model_name == item:
153
- self.model = item
154
- self.provider = k
155
-
156
- elif self.model is None and self._init_model_name in item:
157
- self.model = item
158
- self.provider = k
159
-
160
- if self.model is None:
161
- self._logger.log(level="warning", message=f"The provided model \'{self.model}\' is not in the list. We'll assign a default model.", color="yellow")
175
+ provider_model_list = MODELS.get(self.provider)
176
+ if self.model not in provider_model_list:
177
+ self._logger.log(level="warning", message=f"The provided model: {self._init_model_name} is not in the list. We will assign a default model.", color="yellow")
162
178
  self.model = DEFAULT_MODEL_NAME
163
- self.provider = "openai"
179
+ self.provider = DEFAULT_MODEL_PROVIDER_NAME
164
180
 
181
+ # trigger pass-through custom endpoint.
182
+ if self.provider in ENDPOINT_PROVIDERS:
183
+ self.endpoint_provider = self.provider
184
+
185
+ return self
186
+
187
+
188
+ @model_validator(mode="after")
189
+ def validate_model_params(self) -> Self:
190
+ """
191
+ Set up valid params to the model after setting up a valid model, provider, interface provider names.
192
+ """
193
+ self._tokens = 0
165
194
 
166
195
  if self.callbacks:
167
196
  self._set_callbacks(self.callbacks)
@@ -173,7 +202,8 @@ class LLM(BaseModel):
173
202
  self.api_key = os.environ.get(api_key_name, None)
174
203
 
175
204
 
176
- base_url_key_name = self.provider.upper() + "_API_BASE" if self.provider else None
205
+ base_url_key_name = self.endpoint_provider.upper() + "_API_BASE" if self.endpoint_provider else None
206
+
177
207
  if base_url_key_name:
178
208
  self.base_url = os.environ.get(base_url_key_name)
179
209
  self.api_base = self.base_url
@@ -181,22 +211,28 @@ class LLM(BaseModel):
181
211
  return self
182
212
 
183
213
 
184
- def _create_valid_params(self, config: Dict[str, Any], provider: str = None) -> Dict[str, Any]:
185
- params = dict()
186
- valid_keys = list()
214
+ def _create_valid_params(self, config: Dict[str, Any]) -> Dict[str, Any]:
215
+ """
216
+ Return valid params (model + litellm original params) from the given config dict.
217
+ """
187
218
 
188
- if not provider:
189
- valid_keys = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(self.provider) if self.provider else PARAMS.get("litellm") + PARAMS.get("common")
190
- else:
191
- valid_keys = PARAMS.get("common") + PARAMS.get(self.provider)
219
+ valid_params, valid_keys = dict(), list()
220
+
221
+ if self.model:
222
+ valid_keys = litellm.get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
223
+
224
+ if not valid_keys:
225
+ valid_keys = PARAMS.get("common")
226
+
227
+ valid_keys += PARAMS.get("litellm")
192
228
 
193
229
  for item in valid_keys:
194
230
  if hasattr(self, item) and getattr(self, item):
195
- params[item] = getattr(self, item)
196
- elif item in config:
197
- params[item] = config[item]
231
+ valid_params[item] = getattr(self, item)
232
+ elif item in config and config[item]:
233
+ valid_params[item] = config[item]
198
234
 
199
- return params
235
+ return valid_params
200
236
 
201
237
 
202
238
  def call(
@@ -218,21 +254,18 @@ class LLM(BaseModel):
218
254
  self._set_callbacks(self.callbacks) # passed by agent
219
255
 
220
256
  try:
221
- provider = self.provider if self.provider else "openai"
222
257
  self.response_format = { "type": "json_object" } if tool_res_as_final == True else response_format
223
258
 
224
259
  if not tools:
225
260
  params = self._create_valid_params(config=config)
226
- res = litellm.completion(messages=messages, stream=False, **params)
261
+ res = litellm.completion(model=self.model, messages=messages, stream=False, **params)
227
262
  self._tokens += int(res["usage"]["total_tokens"])
228
263
  return res["choices"][0]["message"]["content"]
229
264
 
230
265
  else:
231
266
  self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
232
-
233
- # if provider == "openai":
234
- params = self._create_valid_params(config=config, provider=provider)
235
- res = litellm.completion(messages=messages, model=self.model, tools=self.tools)
267
+ params = self._create_valid_params(config=config)
268
+ res = litellm.completion(model=self.model, messages=messages, **params)
236
269
  tool_calls = res.choices[0].message.tool_calls
237
270
  tool_res = ""
238
271
 
@@ -272,7 +305,7 @@ class LLM(BaseModel):
272
305
  if tool_res_as_final:
273
306
  return tool_res
274
307
  else:
275
- res = litellm.completione(messages=messages, model=self.model, tools=self.tools)
308
+ res = litellm.completion(model=self.model, messages=messages, **params)
276
309
  self._tokens += int(res["usage"]["total_tokens"])
277
310
  return res.choices[0].message.content
278
311
 
@@ -288,20 +321,17 @@ class LLM(BaseModel):
288
321
 
289
322
  def _supports_function_calling(self) -> bool:
290
323
  try:
291
- params = get_supported_openai_params(model=self.model)
292
- return "response_format" in params
324
+ if self.model:
325
+ params = litellm.get_supported_openai_params(model=self.model)
326
+ return "response_format" in params if params else False
293
327
  except Exception as e:
294
- self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
328
+ self._logger.log(level="warning", message=f"Failed to get supported params: {str(e)}", color="yellow")
295
329
  return False
296
330
 
297
331
 
298
332
  def _supports_stop_words(self) -> bool:
299
- try:
300
- params = get_supported_openai_params(model=self.model)
301
- return "stop" in params
302
- except Exception as e:
303
- self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
304
- return False
333
+ supported_params = litellm.get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider)
334
+ return "stop" in supported_params if supported_params else False
305
335
 
306
336
 
307
337
  def _get_context_window_size(self) -> int:
@@ -1,15 +1,13 @@
1
1
  from typing import List, Optional, Dict, Any
2
2
  from typing_extensions import Self
3
3
 
4
- from pydantic import BaseModel, Field, InstanceOf, model_validator
4
+ from pydantic import BaseModel, Field, model_validator
5
5
 
6
6
  """
7
7
  Evaluate task output from accuracy, token consumption, latency perspectives, and mark the score from 0 to 1.
8
8
  """
9
9
 
10
10
 
11
-
12
-
13
11
  class ScoreFormat:
14
12
  def __init__(self, rate: float | int = 0, weight: int = 1):
15
13
  self.rate = rate
@@ -72,16 +70,16 @@ class EvaluationItem(BaseModel):
72
70
 
73
71
 
74
72
  class Evaluation(BaseModel):
75
- # expected_outcome: Optional[str] = Field(default=None, description="human input on expected outcome")
76
73
  items: List[EvaluationItem] = []
77
74
  latency: int = Field(default=None, description="seconds")
78
75
  tokens: int = Field(default=None, description="tokens consumed")
79
76
  responsible_agent: Any = Field(default=None, description="store agent instance that evaluates the outcome")
80
77
 
78
+
81
79
  @model_validator(mode="after")
82
80
  def set_up_responsible_agent(self) -> Self:
83
- from versionhq.agent.default_agents import task_evaluator
84
- self.responsible_agent = task_evaluator
81
+ from versionhq.agent.inhouse_agents import vhq_task_evaluator
82
+ self.responsible_agent = vhq_task_evaluator
85
83
  return self
86
84
 
87
85
 
versionhq/task/model.py CHANGED
@@ -8,7 +8,7 @@ from hashlib import md5
8
8
  from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type, TypeVar
9
9
  from typing_extensions import Annotated, Self
10
10
 
11
- from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, field_validator
11
+ from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, InstanceOf, field_validator
12
12
  from pydantic_core import PydanticCustomError
13
13
 
14
14
  from versionhq._utils.process_config import process_config
@@ -286,7 +286,7 @@ class Task(BaseModel):
286
286
  processed_by_agents: Set[str] = Field(default_factory=set, description="store responsible agents' roles")
287
287
  tools_errors: int = 0
288
288
  delegations: int = 0
289
- latency: int | float = 0 # execution latency in sec
289
+ latency: int | float = 0 # job latency in sec
290
290
  tokens: int = 0 # tokens consumed
291
291
 
292
292
 
@@ -412,37 +412,38 @@ Ref. Output image: {output_formats_to_follow}
412
412
 
413
413
  response_format: Dict[str, Any] = None
414
414
 
415
- # match model_provider:
416
- # case "openai":
417
- if self.response_fields:
418
- properties, required_fields = {}, []
419
- for i, item in enumerate(self.response_fields):
420
- if item:
421
- if item.data_type is dict:
422
- properties.update(item._format_props())
423
- else:
424
- properties.update(item._format_props())
425
-
426
- required_fields.append(item.title)
427
-
428
- response_schema = {
429
- "type": "object",
430
- "properties": properties,
431
- "required": required_fields,
432
- "additionalProperties": False,
433
- }
415
+ if model_provider == "openrouter":
416
+ return response_format
434
417
 
435
- response_format = {
436
- "type": "json_schema",
437
- "json_schema": { "name": "outcome", "schema": response_schema }
438
- }
418
+ else:
419
+ if self.response_fields:
420
+ properties, required_fields = {}, []
421
+ for i, item in enumerate(self.response_fields):
422
+ if item:
423
+ if item.data_type is dict:
424
+ properties.update(item._format_props())
425
+ else:
426
+ properties.update(item._format_props())
427
+
428
+ required_fields.append(item.title)
429
+
430
+ response_schema = {
431
+ "type": "object",
432
+ "properties": properties,
433
+ "required": required_fields,
434
+ "additionalProperties": False,
435
+ }
436
+
437
+ response_format = {
438
+ "type": "json_schema",
439
+ "json_schema": { "name": "outcome", "schema": response_schema }
440
+ }
439
441
 
440
442
 
441
- elif self.pydantic_output:
442
- response_format = StructuredOutput(response_format=self.pydantic_output)._format()
443
+ elif self.pydantic_output:
444
+ response_format = StructuredOutput(response_format=self.pydantic_output)._format()
443
445
 
444
- # case "gemini":
445
- return response_format
446
+ return response_format
446
447
 
447
448
 
448
449
  def _create_json_output(self, raw: str) -> Dict[str, Any]:
@@ -612,7 +613,7 @@ Ref. Output image: {output_formats_to_follow}
612
613
  task_output: InstanceOf[TaskOutput] = None
613
614
  tool_output: str | list = None
614
615
  task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
615
- started_at = datetime.datetime.now()
616
+ started_at, ended_at = datetime.datetime.now(), datetime.datetime.now()
616
617
 
617
618
  if self.tools:
618
619
  for item in self.tools:
@@ -638,11 +639,16 @@ Ref. Output image: {output_formats_to_follow}
638
639
 
639
640
 
640
641
  if self.tool_res_as_final == True:
642
+ started_at = datetime.datetime.now()
641
643
  tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
642
- task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=tool_output)
644
+ ended_at = datetime.datetime.now()
645
+ task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=str(tool_output) if tool_output else "")
643
646
 
644
647
  else:
648
+ started_at = datetime.datetime.now()
645
649
  raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
650
+ ended_at = datetime.datetime.now()
651
+
646
652
  json_dict_output = self._create_json_output(raw=raw_output)
647
653
  if "outcome" in json_dict_output:
648
654
  json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
@@ -656,9 +662,8 @@ Ref. Output image: {output_formats_to_follow}
656
662
  json_dict=json_dict_output
657
663
  )
658
664
 
659
- ended_at = datetime.datetime.now()
660
- self.latency = (ended_at - started_at).total_seconds()
661
665
 
666
+ self.latency = (ended_at - started_at).total_seconds()
662
667
  self.output = task_output
663
668
  self.processed_by_agents.add(agent.role)
664
669
 
versionhq/team/model.py CHANGED
@@ -20,7 +20,6 @@ from versionhq._utils.usage_metrics import UsageMetrics
20
20
 
21
21
  initial_match_type = GenerateSchema.match_type
22
22
 
23
-
24
23
  def match_type(self, obj):
25
24
  if getattr(obj, "__name__", None) == "datetime":
26
25
  return core_schema.datetime_schema()
@@ -28,7 +27,6 @@ def match_type(self, obj):
28
27
 
29
28
 
30
29
  GenerateSchema.match_type = match_type
31
-
32
30
  warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
33
31
  load_dotenv(override=True)
34
32
 
@@ -108,9 +106,14 @@ class TeamOutput(BaseModel):
108
106
 
109
107
 
110
108
  class TeamMember(BaseModel):
111
- agent: Agent | None = Field(default=None, description="store the agent to be a member")
109
+ """
110
+ A class to store a team member
111
+ """
112
+ agent: Agent | None = Field(default=None)
112
113
  is_manager: bool = Field(default=False)
113
- task: Optional[Task] = Field(default=None)
114
+ can_share_knowledge: bool = Field(default=True, description="whether to share the agent's knowledge in the team")
115
+ can_share_memory: bool = Field(default=True, description="whether to share the agent's memory in the team")
116
+ task: Optional[Task] = Field(default=None, description="task assigned to the agent")
114
117
 
115
118
  @property
116
119
  def is_idling(self):
@@ -125,17 +128,18 @@ class Team(BaseModel):
125
128
 
126
129
  __hash__ = object.__hash__
127
130
  _execution_span: Any = PrivateAttr()
128
- _logger: Logger = PrivateAttr()
131
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
129
132
  _inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
130
133
 
131
134
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
132
135
  name: Optional[str] = Field(default=None)
133
- members: List[TeamMember] = Field(default_factory=list, description="store agents' uuids and bool if it is manager")
136
+ members: List[TeamMember] = Field(default_factory=list)
137
+
138
+ # formation planning
139
+ planning_llm: Optional[Any] = Field(default=None, description="llm to generate formation")
140
+ team_tasks: Optional[List[Task]] = Field(default_factory=list, description="optional tasks for the team. can be assigned to team members later")
134
141
 
135
- # work as a team
136
- team_tasks: Optional[List[Task]] = Field(default_factory=list, description="optional tasks for the team")
137
- planning_llm: Optional[Any] = Field(default=None, description="llm to handle the planning of the team tasks (if any)")
138
- function_calling_llm: Optional[Any] = Field(default=None, description="llm to execute func after all agent execution (if any)")
142
+ # task execution rules
139
143
  prompt_file: str = Field(default="", description="path to the prompt json file to be used by the team.")
140
144
  process: TaskHandlingProcess = Field(default=TaskHandlingProcess.sequential)
141
145
 
@@ -150,7 +154,6 @@ class Team(BaseModel):
150
154
  )
151
155
  step_callback: Optional[Any] = Field(default=None, description="callback to be executed after each step for all agents execution")
152
156
 
153
- verbose: bool = Field(default=True)
154
157
  cache: bool = Field(default=True)
155
158
  memory: bool = Field(default=False, description="whether the team should use memory to store memories of its execution")
156
159
  execution_logs: List[Dict[str, Any]] = Field(default=[], description="list of execution logs for tasks")
@@ -236,7 +239,7 @@ class Team(BaseModel):
236
239
  return self
237
240
 
238
241
 
239
- def _get_responsible_agent(self, task: Task) -> Agent:
242
+ def _get_responsible_agent(self, task: Task) -> Agent | None:
240
243
  if task is None:
241
244
  return None
242
245
  else:
@@ -244,7 +247,7 @@ class Team(BaseModel):
244
247
  return None if len(res) == 0 else res[0]
245
248
 
246
249
 
247
- def _handle_team_planning(self) -> None:
250
+ def _handle_agent_formation(self) -> None:
248
251
  """
249
252
  Form a team considering agents and tasks given, and update `self.members` field:
250
253
  1. Idling managers to take the team tasks.
@@ -373,7 +376,7 @@ class Team(BaseModel):
373
376
 
374
377
  responsible_agent = self._get_responsible_agent(task)
375
378
  if responsible_agent is None:
376
- self._handle_team_planning()
379
+ self._handle_agent_formation()
377
380
 
378
381
  if isinstance(task, ConditionalTask):
379
382
  skipped_task_output = task._handle_conditional_task(task_outputs, futures, task_index, was_replayed)
@@ -415,7 +418,7 @@ class Team(BaseModel):
415
418
  metrics: List[UsageMetrics] = []
416
419
 
417
420
  if self.team_tasks or self.member_tasks_without_agent:
418
- self._handle_team_planning()
421
+ self._handle_agent_formation()
419
422
 
420
423
  if kwargs_before is not None:
421
424
  for before_callback in self.before_kickoff_callbacks:
@@ -432,9 +435,6 @@ class Team(BaseModel):
432
435
  agent = member.agent
433
436
  agent.team = self
434
437
 
435
- if not agent.function_calling_llm and self.function_calling_llm:
436
- agent.function_calling_llm = self.function_calling_llm
437
-
438
438
  if self.step_callback:
439
439
  agent.callbacks.append(self.step_callback)
440
440
 
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2024 Version IO Sdn. Bhd.
3
+ Copyright (c) 2024-2025 Version IO Sdn. Bhd.
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.11.7
3
+ Version: 1.1.12.1
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
7
7
 
8
- Copyright (c) 2024 Version IO Sdn. Bhd.
8
+ Copyright (c) 2024-2025 Version IO Sdn. Bhd.
9
9
 
10
10
  Permission is hereby granted, free of charge, to any person obtaining a copy
11
11
  of this software and associated documentation files (the "Software"), to deal
@@ -78,12 +78,12 @@ Requires-Dist: numpy>=1.26.4; extra == "numpy"
78
78
 
79
79
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
80
80
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
81
- ![PyPI](https://img.shields.io/badge/PyPI->=v1.1.11.4-blue)
82
- ![python ver](https://img.shields.io/badge/Python->=3.12-purple)
81
+ ![PyPI](https://img.shields.io/badge/PyPI-v1.1.12+-blue)
82
+ ![python ver](https://img.shields.io/badge/Python-3.11+-purple)
83
83
  ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
84
84
 
85
85
 
86
- LLM orchestration frameworks to deploy multi-agent systems with task-based formation.
86
+ LLM orchestration frameworks to deploy multi-agent systems and automate complex tasks with network formations.
87
87
 
88
88
  **Visit:**
89
89
 
@@ -122,15 +122,16 @@ LLM orchestration frameworks to deploy multi-agent systems with task-based forma
122
122
 
123
123
  ## Key Features
124
124
 
125
- Generate mulit-agent systems depending on the complexity of the task, and execute the task with agents of choice.
125
+ Generate multi-agent systems based on the task complexity, execute tasks, and evaluate output based on the given criteria.
126
126
 
127
- Model-agnostic agents can handle RAG tools, tools, callbacks, and knowledge sharing among other agents.
127
+ Agents are model-agnostic, and can handle and share RAG tools, knowledge, memory, and callbacks among other agents. (self-learn)
128
128
 
129
129
 
130
130
  ### Agent formation
131
- Depending on the task complexity, agents can make a different formation.
132
131
 
133
- You can specify which formation you want them to generate, or let the agent decide if you don’t have a clear plan.
132
+ Agents adapt their formation based on task complexity.
133
+
134
+ You can specify a desired formation or allow the agents to determine it autonomously (default).
134
135
 
135
136
 
136
137
  | | **Solo Agent** | **Supervising** | **Network** | **Random** |
@@ -1,13 +1,13 @@
1
- versionhq/__init__.py,sha256=P2XOAcL21Bc7HXFQRtWWRJG5a1XB3GZSv86y7GHlZfc,863
1
+ versionhq/__init__.py,sha256=oJDsufVGH28Hszr45GDKyjfs_OKDZaNrAhMOS4f9RmY,1031
2
2
  versionhq/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
4
- versionhq/_utils/logger.py,sha256=U-MpeGueA6YS8Ptfy0VnU_ePsZP-8Pvkvi0tZ4s_UMg,1438
4
+ versionhq/_utils/logger.py,sha256=j9SlQPIefdVUlwpGfJY83E2BUt1ejWgZ2M2I8aMyQ3c,1579
5
5
  versionhq/_utils/process_config.py,sha256=jbPGXK2Kb4iyCugJ3FwRJuU0wL5Trq2x4xFQz2uOyFY,746
6
6
  versionhq/_utils/usage_metrics.py,sha256=hhq1OCW8Z4V93vwW2O2j528EyjOlF8wlTsX5IL-7asA,1106
7
7
  versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
8
8
  versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- versionhq/agent/default_agents.py,sha256=Sea3xDswxxMccer1vVDhp1E5etXW3ddf2n20JTMHgqs,503
10
- versionhq/agent/model.py,sha256=AL0t_knk_PX-mLO8a9PKxt12MavSraBHNLynI4ezpq8,24779
9
+ versionhq/agent/inhouse_agents.py,sha256=DLwSREmFICF0Wv9IvEXp0gu1yOXJYOex0980Vi0NRWw,913
10
+ versionhq/agent/model.py,sha256=1Y5au6ue6B4UbtoBL00kVATbl_0_IzY_-SBkuzVvL94,22423
11
11
  versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
12
12
  versionhq/agent/rpm_controller.py,sha256=7AKIEPbWBq_ESOZCaiKVOGjfSPHd2qwg6-wbBlhqC0g,2367
13
13
  versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
@@ -25,11 +25,11 @@ versionhq/knowledge/_utils.py,sha256=YWRF8U533cfZes_gZqUvdj-K24MD2ri1R0gjc_aPYyc
25
25
  versionhq/knowledge/embedding.py,sha256=KfHc__1THxb5jrg1EMrF-v944RDuIr2hE0l-MtM3Bp0,6826
26
26
  versionhq/knowledge/model.py,sha256=n7kU4jQ24BUIxwosSVRK8tYhAFYhgc4yf7e4Q-bq4bk,1832
27
27
  versionhq/knowledge/source.py,sha256=WOARChmm_cNtBD-xGo4RoYmcuodzdalctXI-gDBCW6k,13610
28
- versionhq/knowledge/source_docling.py,sha256=uj7mX1VjUr3cucAjZCuRcrKNQdae38I8Y7KyXrcqaS8,5322
28
+ versionhq/knowledge/source_docling.py,sha256=hhHn3rS4KVsFKEPWcfllM8VxSL86PckZdAHDZNQNOq8,5411
29
29
  versionhq/knowledge/storage.py,sha256=7oxCg3W9mFjYH1YmuH9kFtTbNxquzYFjuUjd_TlsB9E,8170
30
30
  versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- versionhq/llm/llm_vars.py,sha256=f8kPWKYLt5pPDqXQ4-McoUySM15J4N7a3tHMoMaxNzs,9264
32
- versionhq/llm/model.py,sha256=FACbUOPmhyatO34sXj7KI4HlHUmjujc-7KX8MfInpLc,14064
31
+ versionhq/llm/llm_vars.py,sha256=asJtkKCcD0WWIbyVn7CYOWg-WZ6MSKS9lIRaYIkdib4,6778
32
+ versionhq/llm/model.py,sha256=wSjRGyk9AZtvDqNfcyyeOMoV_hpTaUHCM849hBF0MhU,15145
33
33
  versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  versionhq/memory/contextual_memory.py,sha256=tCsOOAUnfrOL7YiakqGoi3uShzzS870TmGnlGd3z_A4,3556
35
35
  versionhq/memory/model.py,sha256=6Sy-cnrhHNIx3ZN38uNO7d8YywIl_uo_OvDVzVM-w14,5755
@@ -41,14 +41,14 @@ versionhq/storage/rag_storage.py,sha256=fBk-RJuFRPOH4gI9E67tkhJnTBoChWocSP2PdWUY
41
41
  versionhq/storage/task_output_storage.py,sha256=gCsZywZ2SaNA1pYIsJk6BTrcpGp79TZTviZkWQF5USs,4579
42
42
  versionhq/storage/utils.py,sha256=ByYXPoEIGJYLUqz-DWjbCAnneNrH1otiYbp12SCILpM,747
43
43
  versionhq/task/__init__.py,sha256=l2r_g01i91JAGlOoHZP_Gh2WCk6mo9D19lcqt7sKMpQ,186
44
- versionhq/task/evaluate.py,sha256=RCaFa9N4IibAYLWKUlTn6lWiQoI7t4f_XZVUvecjTxs,3486
44
+ versionhq/task/evaluate.py,sha256=sG_PFotpuRbDLW0rGDULseBk1uP8I0vYYtkh1npO0KE,3374
45
45
  versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
46
46
  versionhq/task/log_handler.py,sha256=KJRrcNZgFSKhlNzvtYFnvtp6xukaF1s7ifX9u4zWrN8,1683
47
- versionhq/task/model.py,sha256=ACHtRG2xtcSKin70wSp4GHg_B3wnPguaoVekESbG8VU,30134
47
+ versionhq/task/model.py,sha256=kR4oqoT44xVbbTGK-gtVdfMz3m-riV62XumzRyqomiU,30418
48
48
  versionhq/task/structured_response.py,sha256=YxuWcDMHcZLzdxI1ihW99Y-i6nl8yXBQ5Q_dFQac8jw,4837
49
49
  versionhq/task/TEMPLATES/Description.py,sha256=bChflSWGGQo9JpnO6QX6Ng9pnONiTf-zwQ3ke4xQgSQ,357
50
50
  versionhq/team/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
- versionhq/team/model.py,sha256=NzcRXWwP0adWL9vsnsmI-A5dOcE3199FGmGgemUB2VA,20043
51
+ versionhq/team/model.py,sha256=MgjqigmEDLnmDzYfVnbGnwp7MeXmZ-zsgyWR2Vb2FAU,20036
52
52
  versionhq/team/team_planner.py,sha256=UyIpw7GoRQXlgLNaojoi-G8F1sYaf5hTpLcTvWjRvlA,3596
53
53
  versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
54
54
  versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
@@ -57,8 +57,8 @@ versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtg
57
57
  versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
58
58
  versionhq/tool/model.py,sha256=7ccEnje_8LuxLVeog6pL38nToArXQXk4KY7A9hfprDo,12239
59
59
  versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
60
- versionhq-1.1.11.7.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
61
- versionhq-1.1.11.7.dist-info/METADATA,sha256=Rra00hIoAMSKGzNfYAW0MfaXZ1g2bffT37D0yCutvao,18672
62
- versionhq-1.1.11.7.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
63
- versionhq-1.1.11.7.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
64
- versionhq-1.1.11.7.dist-info/RECORD,,
60
+ versionhq-1.1.12.1.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
61
+ versionhq-1.1.12.1.dist-info/METADATA,sha256=rPZzAQlTUGo_Fh7FUyQFBELbuzxAOqnC9b83z-qvv20,18694
62
+ versionhq-1.1.12.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
63
+ versionhq-1.1.12.1.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
64
+ versionhq-1.1.12.1.dist-info/RECORD,,
@@ -1,15 +0,0 @@
1
- from versionhq.agent.model import Agent
2
- from versionhq.llm.model import DEFAULT_MODEL_NAME
3
-
4
- """
5
- List up agents to be called across the project.
6
- """
7
-
8
- client_manager = Agent(role="Client Manager", goal="communicate with clients on the task progress", llm=DEFAULT_MODEL_NAME)
9
-
10
- task_evaluator = Agent(
11
- role="Task Evaluator",
12
- goal="score the output according to the given evaluation criteria.",
13
- llm=DEFAULT_MODEL_NAME,
14
- llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9)
15
- )