versionhq 1.2.2.5__py3-none-any.whl → 1.2.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/agent/model.py +15 -41
- versionhq/llm/llm_vars.py +32 -73
- versionhq/llm/model.py +62 -45
- versionhq/task/model.py +3 -3
- {versionhq-1.2.2.5.dist-info → versionhq-1.2.2.7.dist-info}/METADATA +47 -49
- {versionhq-1.2.2.5.dist-info → versionhq-1.2.2.7.dist-info}/RECORD +10 -10
- {versionhq-1.2.2.5.dist-info → versionhq-1.2.2.7.dist-info}/WHEEL +1 -1
- {versionhq-1.2.2.5.dist-info → versionhq-1.2.2.7.dist-info}/LICENSE +0 -0
- {versionhq-1.2.2.5.dist-info → versionhq-1.2.2.7.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
versionhq/agent/model.py
CHANGED
@@ -68,10 +68,10 @@ class Agent(BaseModel):
|
|
68
68
|
# llm settings cascaded to the LLM model
|
69
69
|
llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
70
70
|
func_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
71
|
-
respect_context_window: bool = Field(default=True,description="keep messages under the context window size")
|
71
|
+
respect_context_window: bool = Field(default=True, description="keep messages under the context window size")
|
72
72
|
max_execution_time: Optional[int] = Field(default=None, description="max. task execution time in seconds")
|
73
73
|
max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute")
|
74
|
-
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM
|
74
|
+
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM class")
|
75
75
|
|
76
76
|
# # cache, error, ops handling
|
77
77
|
# formatting_errors: int = Field(default=0, description="number of formatting errors.")
|
@@ -276,8 +276,8 @@ class Agent(BaseModel):
|
|
276
276
|
return self._set_llm_params(llm=llm, config=self.llm_config)
|
277
277
|
|
278
278
|
case str():
|
279
|
-
|
280
|
-
return self._set_llm_params(llm=
|
279
|
+
llm = LLM(model=llm)
|
280
|
+
return self._set_llm_params(llm=llm, config=self.llm_config)
|
281
281
|
|
282
282
|
case dict():
|
283
283
|
model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
|
@@ -287,53 +287,21 @@ class Agent(BaseModel):
|
|
287
287
|
|
288
288
|
case _:
|
289
289
|
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
290
|
-
|
290
|
+
llm = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
291
291
|
llm_params = {
|
292
|
-
"max_tokens": (getattr(llm, "max_tokens") or 3000),
|
293
292
|
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
294
293
|
"callbacks": getattr(llm, "callbacks", None),
|
295
|
-
"
|
296
|
-
"logprobs": getattr(llm, "logprobs", None),
|
297
|
-
"api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
294
|
+
"llm_config": getattr(llm, "llm_config", None),
|
298
295
|
"base_url": getattr(llm, "base_url", None),
|
299
296
|
}
|
300
297
|
config = llm_params.update(self.llm_config) if self.llm_config else llm_params
|
301
|
-
return self._set_llm_params(llm=
|
298
|
+
return self._set_llm_params(llm=llm, config=config)
|
302
299
|
|
303
300
|
|
304
301
|
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
305
302
|
"""
|
306
303
|
Add valid params to the LLM object.
|
307
304
|
"""
|
308
|
-
|
309
|
-
import litellm
|
310
|
-
from versionhq.llm.llm_vars import PARAMS
|
311
|
-
|
312
|
-
valid_config = {k: v for k, v in config.items() if v} if config else {}
|
313
|
-
|
314
|
-
if valid_config:
|
315
|
-
valid_keys = list()
|
316
|
-
try:
|
317
|
-
valid_keys = litellm.get_supported_openai_params(model=llm.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
|
318
|
-
if not valid_keys:
|
319
|
-
valid_keys = PARAMS.get("common")
|
320
|
-
except:
|
321
|
-
valid_keys = PARAMS.get("common")
|
322
|
-
|
323
|
-
valid_keys += PARAMS.get("litellm")
|
324
|
-
|
325
|
-
for key in valid_keys:
|
326
|
-
if key in valid_config and valid_config[key]:
|
327
|
-
val = valid_config[key]
|
328
|
-
if [key == k for k, v in LLM.model_fields.items()]:
|
329
|
-
setattr(llm, key, val)
|
330
|
-
else:
|
331
|
-
llm.other_valid_config.update({ key: val})
|
332
|
-
|
333
|
-
|
334
|
-
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
335
|
-
# llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
336
|
-
|
337
305
|
if llm.provider is None:
|
338
306
|
provider_name = llm.model.split("/")[0]
|
339
307
|
valid_provider = provider_name if provider_name in PROVIDERS else None
|
@@ -346,6 +314,12 @@ class Agent(BaseModel):
|
|
346
314
|
if self.respect_context_window == False:
|
347
315
|
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
348
316
|
|
317
|
+
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
318
|
+
|
319
|
+
if config:
|
320
|
+
llm.llm_config = {k: v for k, v in config.items() if v or v == False}
|
321
|
+
llm.setup_config()
|
322
|
+
|
349
323
|
return llm
|
350
324
|
|
351
325
|
|
@@ -494,7 +468,7 @@ class Agent(BaseModel):
|
|
494
468
|
Defines and executes a task when it is not given and returns TaskOutput object.
|
495
469
|
"""
|
496
470
|
|
497
|
-
if not self.role
|
471
|
+
if not self.role:
|
498
472
|
return None
|
499
473
|
|
500
474
|
from versionhq.task.model import Task
|
@@ -504,7 +478,7 @@ class Agent(BaseModel):
|
|
504
478
|
steps: list[str]
|
505
479
|
|
506
480
|
task = Task(
|
507
|
-
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal}. If needed, list up necessary steps in concise manner.",
|
481
|
+
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
|
508
482
|
pydantic_output=Output,
|
509
483
|
tool_res_as_final=tool_res_as_final,
|
510
484
|
)
|
versionhq/llm/llm_vars.py
CHANGED
@@ -6,30 +6,16 @@ PROVIDERS = [
|
|
6
6
|
"openai",
|
7
7
|
"gemini",
|
8
8
|
"openrouter",
|
9
|
-
"huggingface",
|
10
9
|
"anthropic",
|
11
|
-
"sagemaker",
|
12
10
|
"bedrock",
|
13
|
-
"
|
14
|
-
"
|
15
|
-
"azure",
|
16
|
-
"cerebras",
|
17
|
-
"llama",
|
11
|
+
"bedrock/converse",
|
12
|
+
"huggingface",
|
18
13
|
]
|
19
14
|
|
20
15
|
ENDPOINT_PROVIDERS = [
|
21
16
|
"huggingface",
|
22
17
|
]
|
23
18
|
|
24
|
-
"""
|
25
|
-
List of models available on the framework.
|
26
|
-
Model names align with the LiteLLM's key names defined in the JSON URL.
|
27
|
-
Provider names align with the custom provider or model provider names.
|
28
|
-
-> model_key = custom_provider_name/model_name
|
29
|
-
|
30
|
-
Option
|
31
|
-
litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=1)
|
32
|
-
"""
|
33
19
|
|
34
20
|
MODELS = {
|
35
21
|
"openai": [
|
@@ -45,11 +31,10 @@ MODELS = {
|
|
45
31
|
"gemini/gemini-2.0-flash-exp",
|
46
32
|
],
|
47
33
|
"anthropic": [
|
48
|
-
"claude-3-
|
49
|
-
"claude-3-5-
|
50
|
-
"claude-3-
|
51
|
-
"claude-3-opus-
|
52
|
-
"claude-3-haiku-20240307",
|
34
|
+
"claude-3-7-sonnet-latest",
|
35
|
+
"claude-3-5-haiku-latest",
|
36
|
+
"claude-3-5-sonnet-latest",
|
37
|
+
"claude-3-opus-latest",
|
53
38
|
],
|
54
39
|
"openrouter": [
|
55
40
|
"openrouter/deepseek/deepseek-r1",
|
@@ -59,71 +44,42 @@ MODELS = {
|
|
59
44
|
"openrouter/google/gemini-2.0-flash-001",
|
60
45
|
"openrouter/meta-llama/llama-3.3-70b-instruct",
|
61
46
|
"openrouter/mistralai/mistral-large-2411",
|
47
|
+
"openrouter/cohere/command-r-plus",
|
48
|
+
"openrouter/databricks/dbrx-instruct",
|
62
49
|
],
|
63
|
-
"huggingface": [
|
64
|
-
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
65
|
-
],
|
66
|
-
# "sagemaker": [
|
67
|
-
# "sagemaker/huggingface-text2text-flan-t5-base",
|
68
|
-
# "sagemaker/huggingface-llm-gemma-7b",
|
69
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b",
|
70
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b",
|
71
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-8b",
|
72
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-70b",
|
73
|
-
# "sagemaker/huggingface-llm-mistral-7b"
|
74
|
-
# ], #https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-latest.html
|
75
|
-
"ollama": [
|
76
|
-
"ollama/llama3.1",
|
77
|
-
"ollama/mixtral",
|
78
|
-
"ollama/mixtral-8x22B-Instruct-v0.1",
|
79
|
-
],
|
80
|
-
# "watson": [
|
81
|
-
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
82
|
-
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
83
|
-
# "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
84
|
-
# "watsonx/meta-llama/llama-3-2-1b-instruct",
|
85
|
-
# "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
86
|
-
# "watsonx/meta-llama/llama-3-405b-instruct",
|
87
|
-
# "watsonx/mistral/mistral-large",
|
88
|
-
# "watsonx/ibm/granite-3-8b-instruct",
|
89
|
-
# ],
|
90
50
|
"bedrock": [
|
91
|
-
"bedrock/
|
92
|
-
"bedrock/
|
93
|
-
"bedrock/
|
94
|
-
"bedrock/
|
95
|
-
"bedrock/
|
96
|
-
"bedrock/
|
97
|
-
"bedrock/
|
98
|
-
"bedrock/
|
99
|
-
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
100
|
-
"bedrock/meta.llama3-70b-instruct-v1:0",
|
101
|
-
"bedrock/meta.llama3-8b-instruct-v1:0",
|
51
|
+
"bedrock/converse/us.meta.llama3-3-70b-instruct-v1:0",
|
52
|
+
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
53
|
+
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
54
|
+
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
55
|
+
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
|
56
|
+
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
57
|
+
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
58
|
+
"bedrock/mistral.mistral-large-2407-v1:0",
|
102
59
|
"bedrock/amazon.titan-text-lite-v1",
|
103
60
|
"bedrock/amazon.titan-text-express-v1",
|
61
|
+
"bedrock/amazon.titan-text-premier-v1:0",
|
62
|
+
"bedrock/cohere.command-r-plus-v1:0",
|
63
|
+
"bedrock/cohere.command-r-v1:0",
|
104
64
|
"bedrock/cohere.command-text-v14",
|
105
|
-
"bedrock/
|
106
|
-
|
107
|
-
|
108
|
-
"
|
109
|
-
"bedrock/meta.llama2-70b-chat-v1",
|
110
|
-
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
111
|
-
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
65
|
+
"bedrock/cohere.command-light-text-v14",
|
66
|
+
],
|
67
|
+
"huggingface": [
|
68
|
+
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
112
69
|
],
|
113
70
|
}
|
114
71
|
|
115
72
|
|
116
|
-
|
117
|
-
KEYS = {
|
73
|
+
ENV_VARS = {
|
118
74
|
"openai": ["OPENAI_API_KEY"],
|
119
75
|
"gemini": ["GEMINI_API_KEY"],
|
120
76
|
"anthropic": ["ANTHROPIC_API_KEY"],
|
121
77
|
"huggingface": ["HUGGINGFACE_API_KEY", ],
|
122
|
-
"
|
78
|
+
"bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
79
|
+
"sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
123
80
|
}
|
124
81
|
|
125
82
|
|
126
|
-
|
127
83
|
"""
|
128
84
|
Max input token size by the model.
|
129
85
|
"""
|
@@ -193,8 +149,8 @@ PARAMS = {
|
|
193
149
|
"response_format",
|
194
150
|
"n",
|
195
151
|
"stop",
|
196
|
-
"base_url",
|
197
|
-
"api_key",
|
152
|
+
# "base_url",
|
153
|
+
# "api_key",
|
198
154
|
],
|
199
155
|
"openai": [
|
200
156
|
"timeout",
|
@@ -216,7 +172,10 @@ PARAMS = {
|
|
216
172
|
],
|
217
173
|
"gemini": [
|
218
174
|
"topK",
|
219
|
-
]
|
175
|
+
],
|
176
|
+
"bedrock": {
|
177
|
+
"top-k",
|
178
|
+
}
|
220
179
|
}
|
221
180
|
|
222
181
|
|
versionhq/llm/model.py
CHANGED
@@ -4,14 +4,15 @@ import sys
|
|
4
4
|
import threading
|
5
5
|
import warnings
|
6
6
|
from dotenv import load_dotenv
|
7
|
-
import litellm
|
8
|
-
from litellm import JSONSchemaValidationError
|
9
7
|
from contextlib import contextmanager
|
10
8
|
from typing import Any, Dict, List, Optional
|
11
9
|
from typing_extensions import Self
|
10
|
+
|
11
|
+
import litellm
|
12
|
+
from litellm import JSONSchemaValidationError
|
12
13
|
from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
|
13
14
|
|
14
|
-
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS
|
15
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS, ENV_VARS
|
15
16
|
from versionhq.tool.model import Tool, ToolSet
|
16
17
|
from versionhq._utils.logger import Logger
|
17
18
|
|
@@ -68,36 +69,23 @@ class LLM(BaseModel):
|
|
68
69
|
|
69
70
|
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
70
71
|
_init_model_name: str = PrivateAttr(default=None)
|
71
|
-
|
72
|
-
|
72
|
+
# _init_config: Optional[Dict[str, Any]] = PrivateAttr(default_factory=dict) # stores llm config passed by client or agent
|
73
|
+
_tokens: int = PrivateAttr(default=0) # aggregate number of tokens consumed
|
73
74
|
|
74
75
|
model: str = Field(default=None)
|
75
76
|
provider: Optional[str] = Field(default=None, description="model provider")
|
76
|
-
endpoint_provider: Optional[str] = Field(default=None, description="custom endpoint provider for pass through llm call.
|
77
|
+
endpoint_provider: Optional[str] = Field(default=None, description="custom endpoint provider for pass through llm call. require base_url")
|
77
78
|
base_url: Optional[str] = Field(default=None, description="api base url for endpoint provider")
|
78
|
-
api_key: Optional[str] = Field(default=None, description="api key to access the model")
|
79
79
|
|
80
80
|
# optional params
|
81
81
|
response_format: Optional[Any] = Field(default=None)
|
82
|
+
llm_config: Optional[Dict[str, Any]] = Field(default_factory=dict, description="stores valid llm config params")
|
83
|
+
callbacks: Optional[List[Any]] = Field(default_factory=list)
|
84
|
+
tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="stores a list of tool properties")
|
82
85
|
timeout: Optional[float | int] = Field(default=None)
|
83
|
-
max_tokens: Optional[int] = Field(default=None)
|
84
|
-
max_completion_tokens: Optional[int] = Field(default=None)
|
85
86
|
context_window_size: Optional[int] = Field(default=DEFAULT_CONTEXT_WINDOW_SIZE)
|
86
|
-
|
87
|
-
|
88
|
-
n: Optional[int] = Field(default=None)
|
89
|
-
stop: Optional[str | List[str]] = Field(default=None)
|
90
|
-
presence_penalty: Optional[float] = Field(default=None)
|
91
|
-
frequency_penalty: Optional[float] = Field(default=None)
|
92
|
-
logit_bias: Optional[Dict[int, float]] = Field(default=None)
|
93
|
-
seed: Optional[int] = Field(default=None)
|
94
|
-
logprobs: Optional[bool] = Field(default=None)
|
95
|
-
top_logprobs: Optional[int] = Field(default=None)
|
96
|
-
tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
|
97
|
-
callbacks: List[Any] = Field(default_factory=list)
|
98
|
-
other_valid_config: Optional[Dict[str, Any]] = Field(default_factory=dict, description="store other valid values in dict to cascade to the model")
|
99
|
-
|
100
|
-
# LiteLLM specific fields
|
87
|
+
|
88
|
+
# LiteLLM specific config
|
101
89
|
api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
|
102
90
|
api_version: Optional[str] = Field(default=None)
|
103
91
|
num_retries: Optional[int] = Field(default=1)
|
@@ -105,6 +93,8 @@ class LLM(BaseModel):
|
|
105
93
|
fallbacks: Optional[List[Any]]= Field(default=None, description="A list of model names + params to be used, in case the initial call fails")
|
106
94
|
metadata: Optional[Dict[str, Any]] = Field(default=None)
|
107
95
|
|
96
|
+
model_config = ConfigDict(extra="allow")
|
97
|
+
|
108
98
|
litellm.drop_params = True
|
109
99
|
litellm.set_verbose = True
|
110
100
|
os.environ['LITELLM_LOG'] = 'DEBUG'
|
@@ -187,9 +177,9 @@ class LLM(BaseModel):
|
|
187
177
|
|
188
178
|
|
189
179
|
@model_validator(mode="after")
|
190
|
-
def
|
180
|
+
def setup_config(self) -> Self:
|
191
181
|
"""
|
192
|
-
Set up valid params
|
182
|
+
Set up valid config params after setting up a valid model, provider, interface provider names.
|
193
183
|
"""
|
194
184
|
self._tokens = 0
|
195
185
|
|
@@ -198,28 +188,28 @@ class LLM(BaseModel):
|
|
198
188
|
|
199
189
|
self.context_window_size = self._get_context_window_size()
|
200
190
|
|
201
|
-
api_key_name = self.provider.upper() + "_API_KEY" if self.provider else None
|
202
|
-
if api_key_name:
|
203
|
-
self.api_key = os.environ.get(api_key_name, None)
|
204
|
-
|
205
191
|
base_url_key_name = self.endpoint_provider.upper() + "_API_BASE" if self.endpoint_provider else None
|
206
|
-
|
207
192
|
if base_url_key_name:
|
208
193
|
self.base_url = os.environ.get(base_url_key_name)
|
209
194
|
self.api_base = self.base_url
|
210
195
|
|
196
|
+
if self.llm_config:
|
197
|
+
self._create_valid_params(config=self.llm_config)
|
198
|
+
|
211
199
|
return self
|
212
200
|
|
213
201
|
|
214
202
|
def _create_valid_params(self, config: Dict[str, Any]) -> Dict[str, Any]:
|
215
203
|
"""
|
216
|
-
|
204
|
+
Returns valid params incl. model + litellm original params) from the given config dict.
|
217
205
|
"""
|
218
206
|
|
219
|
-
|
207
|
+
valid_config, valid_keys = dict(), list()
|
220
208
|
|
221
209
|
if self.model:
|
222
|
-
valid_keys = litellm.get_supported_openai_params(
|
210
|
+
valid_keys = litellm.get_supported_openai_params(
|
211
|
+
model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion"
|
212
|
+
)
|
223
213
|
|
224
214
|
if not valid_keys:
|
225
215
|
valid_keys = PARAMS.get("common")
|
@@ -227,14 +217,38 @@ class LLM(BaseModel):
|
|
227
217
|
valid_keys += PARAMS.get("litellm")
|
228
218
|
|
229
219
|
for item in valid_keys:
|
230
|
-
if hasattr(self, item) and getattr(self, item):
|
231
|
-
|
232
|
-
elif item in self.other_valid_config and self.other_valid_config[item]:
|
233
|
-
valid_params[item] = self.other_valid_config[item]
|
234
|
-
elif item in config and config[item]:
|
235
|
-
valid_params[item] = config[item]
|
220
|
+
if hasattr(self, item) and (getattr(self, item) or getattr(self, item) == False):
|
221
|
+
valid_config[item] = getattr(self, item)
|
236
222
|
|
237
|
-
|
223
|
+
elif item in self.llm_config and (self.llm_config[item] or self.llm_config[item]==False):
|
224
|
+
valid_config[item] = self.llm_config[item]
|
225
|
+
|
226
|
+
elif item in config and (config[item] or config[item] == False):
|
227
|
+
valid_config[item] = config[item]
|
228
|
+
|
229
|
+
else:
|
230
|
+
pass
|
231
|
+
|
232
|
+
self.llm_config = valid_config
|
233
|
+
return valid_config
|
234
|
+
|
235
|
+
|
236
|
+
def _set_env_vars(self) -> Dict[str, Any]:
|
237
|
+
if self.provider == "openai":
|
238
|
+
return {}
|
239
|
+
|
240
|
+
cred = dict()
|
241
|
+
env_vars = ENV_VARS.get(self.provider, None) if self.provider else None
|
242
|
+
|
243
|
+
if not env_vars:
|
244
|
+
return {}
|
245
|
+
|
246
|
+
for item in env_vars:
|
247
|
+
val = os.environ.get(item, None)
|
248
|
+
if val:
|
249
|
+
cred[str(item).lower()] = val
|
250
|
+
|
251
|
+
return cred
|
238
252
|
|
239
253
|
|
240
254
|
def _supports_function_calling(self) -> bool:
|
@@ -242,6 +256,8 @@ class LLM(BaseModel):
|
|
242
256
|
if self.model:
|
243
257
|
params = litellm.get_supported_openai_params(model=self.model)
|
244
258
|
return "response_format" in params if params else False
|
259
|
+
else:
|
260
|
+
return False
|
245
261
|
except Exception as e:
|
246
262
|
self._logger.log(level="warning", message=f"Failed to get supported params: {str(e)}", color="yellow")
|
247
263
|
return False
|
@@ -288,15 +304,16 @@ class LLM(BaseModel):
|
|
288
304
|
|
289
305
|
with suppress_warnings():
|
290
306
|
if len(self.callbacks) > 0:
|
291
|
-
self._set_callbacks(self.callbacks)
|
307
|
+
self._set_callbacks(self.callbacks)
|
292
308
|
|
293
309
|
try:
|
294
310
|
res, tool_res = None, ""
|
311
|
+
cred = self._set_env_vars()
|
295
312
|
|
296
313
|
if not tools:
|
297
314
|
self.response_format = response_format
|
298
315
|
params = self._create_valid_params(config=config)
|
299
|
-
res = litellm.completion(model=self.model, messages=messages, stream=False, **params)
|
316
|
+
res = litellm.completion(model=self.model, messages=messages, stream=False, **params, **cred)
|
300
317
|
self._tokens += int(res["usage"]["total_tokens"])
|
301
318
|
return res["choices"][0]["message"]["content"]
|
302
319
|
|
@@ -305,7 +322,7 @@ class LLM(BaseModel):
|
|
305
322
|
self.response_format = { "type": "json_object" } if tool_res_as_final and self.provider != "gemini" else response_format
|
306
323
|
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
307
324
|
params = self._create_valid_params(config=config)
|
308
|
-
res = litellm.completion(model=self.model, messages=messages, **params)
|
325
|
+
res = litellm.completion(model=self.model, messages=messages, **params, **cred)
|
309
326
|
tool_calls = res.choices[0].message.tool_calls
|
310
327
|
|
311
328
|
if tool_calls:
|
@@ -367,7 +384,7 @@ class LLM(BaseModel):
|
|
367
384
|
if tool_res_as_final:
|
368
385
|
return tool_res
|
369
386
|
else:
|
370
|
-
res = litellm.completion(model=self.model, messages=messages, **params)
|
387
|
+
res = litellm.completion(model=self.model, messages=messages, **params, **cred)
|
371
388
|
self._tokens += int(res["usage"]["total_tokens"])
|
372
389
|
return res.choices[0].message.content
|
373
390
|
|
versionhq/task/model.py
CHANGED
@@ -709,11 +709,11 @@ Ref. Output image: {output_formats_to_follow}
|
|
709
709
|
# )
|
710
710
|
# self._save_file(content)
|
711
711
|
|
712
|
-
|
713
|
-
# successful output will be evaluated and stored in the logs
|
714
712
|
if raw_output:
|
715
713
|
if self.should_evaluate:
|
716
714
|
task_output.evaluate(task=self)
|
715
|
+
self.output = task_output
|
716
|
+
|
717
717
|
self._create_short_and_long_term_memories(agent=agent, task_output=task_output)
|
718
718
|
|
719
719
|
if self.callback and isinstance(self.callback, Callable):
|
@@ -723,7 +723,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
723
723
|
valid_kwargs = { k: kwargs[k] if k in kwargs else None for k in valid_keys }
|
724
724
|
callback_res = self.callback(**valid_kwargs)
|
725
725
|
task_output.callback_output = callback_res
|
726
|
-
|
726
|
+
self.output = task_output
|
727
727
|
self._store_logs()
|
728
728
|
|
729
729
|
return task_output
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.2.
|
3
|
+
Version: 1.2.2.7
|
4
4
|
Summary: An agentic orchestration framework for building agent networks that handle task automation.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -66,6 +66,7 @@ Requires-Dist: envoy>=0.0.3
|
|
66
66
|
Requires-Dist: composio-core==0.7.0
|
67
67
|
Requires-Dist: networkx>=3.4.2
|
68
68
|
Requires-Dist: matplotlib>=3.10.0
|
69
|
+
Requires-Dist: boto3>=1.37.1
|
69
70
|
Provides-Extra: docling
|
70
71
|
Requires-Dist: docling>=2.17.0; extra == "docling"
|
71
72
|
Provides-Extra: mem0ai
|
@@ -98,10 +99,10 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
|
|
98
99
|
|
99
100
|
**Visit:**
|
100
101
|
|
101
|
-
- [Playground](https://versi0n.io/
|
102
|
+
- [Playground](https://versi0n.io/)
|
102
103
|
- [Docs](https://docs.versi0n.io)
|
103
|
-
- [Github
|
104
|
-
- [
|
104
|
+
- [Github](https://github.com/versionHQ/)
|
105
|
+
- [Python SDK](https://pypi.org/project/versionhq/)
|
105
106
|
|
106
107
|
<hr />
|
107
108
|
|
@@ -116,9 +117,10 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
|
|
116
117
|
- [Optimization](#optimization)
|
117
118
|
- [Quick Start](#quick-start)
|
118
119
|
- [Package installation](#package-installation)
|
119
|
-
- [
|
120
|
-
- [
|
121
|
-
- [
|
120
|
+
- [Launching an agent](#launching-an-agent)
|
121
|
+
- [Automating workflows](#automating-workflows)
|
122
|
+
- [Executing a single task](#executing-a-single-task)
|
123
|
+
- [Supervising agents](#supervising-agents)
|
122
124
|
- [Technologies Used](#technologies-used)
|
123
125
|
- [Project Structure](#project-structure)
|
124
126
|
- [Setting Up Your Project](#setting-up-your-project)
|
@@ -141,7 +143,7 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
|
|
141
143
|
|
142
144
|
`versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
|
143
145
|
|
144
|
-
Agents are model-agnostic, and will improve task output, while
|
146
|
+
Agents are model-agnostic, and will improve task output, while optimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
|
145
147
|
|
146
148
|
|
147
149
|
### Agent Network
|
@@ -246,33 +248,45 @@ agent.update(
|
|
246
248
|
|
247
249
|
### Package installation
|
248
250
|
|
249
|
-
|
250
|
-
|
251
|
-
|
251
|
+
```
|
252
|
+
pip install versionhq
|
253
|
+
```
|
252
254
|
|
253
255
|
(Python 3.11 / 3.12)
|
254
256
|
|
255
|
-
### Forming a agent network
|
256
257
|
|
257
|
-
|
258
|
-
import versionhq as vhq
|
258
|
+
### Launching an agent
|
259
259
|
|
260
|
-
network = work(
|
261
|
-
task="YOUR AMAZING TASK OVERVIEW",
|
262
|
-
expected_outcome="YOUR OUTCOME EXPECTATION",
|
263
|
-
)
|
264
|
-
res, tg = network.launch()
|
265
|
-
```
|
266
260
|
|
267
|
-
|
261
|
+
```python
|
262
|
+
import versionhq as vhq
|
268
263
|
|
264
|
+
agent = vhq.Agent(role="Marketer")
|
265
|
+
res = agent.start()
|
266
|
+
|
267
|
+
assert isinstance(res, vhq.TaskOutput) # contains agent's response in text, JSON, Pydantic formats with usage recordes and eval scores.
|
268
|
+
```
|
269
269
|
|
270
|
-
### Executing tasks
|
271
270
|
|
272
|
-
|
271
|
+
### Automating workflows
|
273
272
|
|
274
|
-
|
273
|
+
```python
|
274
|
+
import versionhq as vhq
|
275
275
|
|
276
|
+
network = vhq.form_agent_network(
|
277
|
+
task="draft a promo plan",
|
278
|
+
expected_outcome="marketing plan, budget, KPI targets",
|
279
|
+
)
|
280
|
+
res, tg = network.launch()
|
281
|
+
|
282
|
+
assert isinstance(res, vhq.TaskOutput) # the latest output from the workflow
|
283
|
+
assert isinstance(tg, vhq.TaskGraph) # contains task nodes and edges that connect the nodes with dep-met conditions
|
284
|
+
```
|
285
|
+
|
286
|
+
|
287
|
+
### Executing a single task
|
288
|
+
|
289
|
+
You can simply build and execute a task using `Task` class.
|
276
290
|
|
277
291
|
```python
|
278
292
|
import versionhq as vhq
|
@@ -292,34 +306,20 @@ task = vhq.Task(
|
|
292
306
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
293
307
|
)
|
294
308
|
|
295
|
-
res = task.execute(context="
|
296
|
-
|
309
|
+
res = task.execute(context="testing a task function")
|
297
310
|
assert isinstance(res, vhq.TaskOutput)
|
298
311
|
```
|
299
312
|
|
300
|
-
This will return a `TaskOutput` object that stores response in plane text, JSON, and Pydantic model: `CustomOutput` formats with a callback result, tool output (if given), and evaluation results (if given).
|
301
313
|
|
302
|
-
|
303
|
-
res == vhq.TaskOutput(
|
304
|
-
task_id=UUID('<TASK UUID>'),
|
305
|
-
raw='{\"test1\":\"random str\", \"test2\":[\"str item 1\", \"str item 2\", \"str item 3\"]}',
|
306
|
-
json_dict={'test1': 'random str', 'test2': ['str item 1', 'str item 2', 'str item 3']},
|
307
|
-
pydantic=<class '__main__.CustomOutput'>,
|
308
|
-
tool_output=None,
|
309
|
-
callback_output='Hi! Here is the result: random str, str item 1, str item 2, str item 3', # returned a plain text summary
|
310
|
-
evaluation=None
|
311
|
-
)
|
312
|
-
```
|
313
|
-
|
314
|
-
### Supervising
|
314
|
+
### Supervising agents
|
315
315
|
|
316
316
|
To create an agent network with one or more manager agents, designate members using the `is_manager` tag.
|
317
317
|
|
318
318
|
```python
|
319
319
|
import versionhq as vhq
|
320
320
|
|
321
|
-
agent_a = vhq.Agent(role="
|
322
|
-
agent_b = vhq.Agent(role="
|
321
|
+
agent_a = vhq.Agent(role="Member", llm="gpt-4o")
|
322
|
+
agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
|
323
323
|
|
324
324
|
task_1 = vhq.Task(
|
325
325
|
description="Analyze the client's business model.",
|
@@ -341,11 +341,9 @@ network =vhq.AgentNetwork(
|
|
341
341
|
)
|
342
342
|
res, tg = network.launch()
|
343
343
|
|
344
|
-
assert isinstance(res, vhq.
|
345
|
-
assert
|
346
|
-
assert
|
347
|
-
|
348
|
-
assert isinstance(tg, vhq.TaskGraph)
|
344
|
+
assert isinstance(res, vhq.NetworkOutput)
|
345
|
+
assert not [item for item in task_1.processed_agents if "vhq-Delegated-Agent" == item]
|
346
|
+
assert [item for item in task_1.processed_agents if "agent b" == item]
|
349
347
|
```
|
350
348
|
|
351
349
|
This will return a list with dictionaries with keys defined in the `ResponseField` of each task.
|
@@ -509,7 +507,7 @@ Create `.env` file in the project root and add secret vars following `.env.sampl
|
|
509
507
|
|
510
508
|
* Test functions within the files must begin with `test_`.
|
511
509
|
|
512
|
-
* Pytest priorities are `1. playground
|
510
|
+
* Pytest priorities are `1. playground > 2. docs use cases > 3. other features`
|
513
511
|
|
514
512
|
|
515
513
|
4. Update `docs` accordingly.
|
@@ -597,4 +595,4 @@ Common issues and solutions:
|
|
597
595
|
## Frequently Asked Questions (FAQ)
|
598
596
|
**Q. Where can I see if the agent is working?**
|
599
597
|
|
600
|
-
A. Visit [playground](https://versi0n.io
|
598
|
+
A. Visit [playground](https://versi0n.io).
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256
|
1
|
+
versionhq/__init__.py,sha256=-pPBsfa-w3JKQ9z2qbo57J_UO_x-c7j02SgPIHhCh-U,2980
|
2
2
|
versionhq/_utils/__init__.py,sha256=d-vYVcORZKG-kkLe_fzE8VbViDpAk9DDOKe2fVK25ew,178
|
3
3
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
4
4
|
versionhq/_utils/llm_as_a_judge.py,sha256=RM0oYfoeanuUyUL3Ewl6_8Xn1F5Axd285UMH46kxG1I,2378
|
@@ -8,7 +8,7 @@ versionhq/_utils/usage_metrics.py,sha256=xgYGRW3OTuK9EJyi3QYJeYcJl7dL27olcWaLo_7
|
|
8
8
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
9
9
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
versionhq/agent/inhouse_agents.py,sha256=BPkvEyMH8VnZWsMeCwsGplDT_kLwlIejeRcr-6ItGqQ,2637
|
11
|
-
versionhq/agent/model.py,sha256=
|
11
|
+
versionhq/agent/model.py,sha256=JU5Yu2ODUAfODS5brm4yXsVWNGjjkJqfPcGJ1NZ8VnI,25392
|
12
12
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
13
13
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
14
14
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
|
@@ -32,8 +32,8 @@ versionhq/knowledge/source.py,sha256=-hEUPtJUHHMx4rUKtiHl19J8xAMw-WVBw34zwa2jZ08
|
|
32
32
|
versionhq/knowledge/source_docling.py,sha256=dcu1ITqPXwWZ_lK-6tykEKhhC82eNRTMoWRpxK9Kzls,5441
|
33
33
|
versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
|
34
34
|
versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35
|
-
versionhq/llm/llm_vars.py,sha256=
|
36
|
-
versionhq/llm/model.py,sha256=
|
35
|
+
versionhq/llm/llm_vars.py,sha256=oAZr03IfeJzzFvIDptrXYK-UB86epsGGpR9-Vhzpg4Q,5445
|
36
|
+
versionhq/llm/model.py,sha256=PZm4gvQNJTW_ZwJE4MEF9JC05zbxS8RfSFQ8pFcNW-A,17146
|
37
37
|
versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
38
38
|
versionhq/memory/contextual_memory.py,sha256=QEMVvHuEXxY7M6-12S8HhyFKf108KfX8Zzt7paPW048,3882
|
39
39
|
versionhq/memory/model.py,sha256=VQR1229t7GQPMItlGAHLtJrb6LrZfSoRA1DRW4z0SOU,8234
|
@@ -47,7 +47,7 @@ versionhq/storage/utils.py,sha256=r5ghA_ktdR2IuzlzKqZYCjsNxztEMzyhWLneA4cFuWY,74
|
|
47
47
|
versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
48
48
|
versionhq/task/evaluation.py,sha256=iRLzppqwKaiGpbsr9gMbf6T7NQe6rxTA6OBcWhmiCKs,4473
|
49
49
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
50
|
-
versionhq/task/model.py,sha256=
|
50
|
+
versionhq/task/model.py,sha256=rJmyEUM1DnX1mkN_0etYXqBJP_YADXVXZhFR5R-h8ZA,28915
|
51
51
|
versionhq/task/structured_response.py,sha256=4q-hQPu7oMMHHXEzh9YW4SJ7N5eCZ7OfZ65juyl_jCI,5000
|
52
52
|
versionhq/task/TEMPLATES/Description.py,sha256=EkwJHc65G32MjWyn3rcp0ATmMaVPHuYKaykyByU5r4g,751
|
53
53
|
versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -62,8 +62,8 @@ versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1
|
|
62
62
|
versionhq/tool/model.py,sha256=Nc2f9frTK5tH4kh6EeEAk1Fi1w19kEXLOcsBwHCS1a4,12189
|
63
63
|
versionhq/tool/rag_tool.py,sha256=qm_nDWs-WyDvrxZeZAL2AkswfUWGPZS4zybz0o6wOFI,3653
|
64
64
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
65
|
-
versionhq-1.2.2.
|
66
|
-
versionhq-1.2.2.
|
67
|
-
versionhq-1.2.2.
|
68
|
-
versionhq-1.2.2.
|
69
|
-
versionhq-1.2.2.
|
65
|
+
versionhq-1.2.2.7.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
66
|
+
versionhq-1.2.2.7.dist-info/METADATA,sha256=Xvu5BpJFQxN5NAY0Afc7k9LeDAR-OZrMLzoGeD4mdhc,21647
|
67
|
+
versionhq-1.2.2.7.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
68
|
+
versionhq-1.2.2.7.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
69
|
+
versionhq-1.2.2.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|