versionhq 1.1.9.12__py3-none-any.whl → 1.1.9.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/agent/model.py +103 -112
- versionhq/llm/{llm_vars.py → llm_variables.py} +150 -62
- versionhq/llm/model.py +142 -107
- versionhq/task/__init__.py +1 -1
- versionhq/task/model.py +8 -8
- versionhq/team/team_planner.py +1 -1
- {versionhq-1.1.9.12.dist-info → versionhq-1.1.9.14.dist-info}/METADATA +2 -2
- {versionhq-1.1.9.12.dist-info → versionhq-1.1.9.14.dist-info}/RECORD +12 -12
- {versionhq-1.1.9.12.dist-info → versionhq-1.1.9.14.dist-info}/LICENSE +0 -0
- {versionhq-1.1.9.12.dist-info → versionhq-1.1.9.14.dist-info}/WHEEL +0 -0
- {versionhq-1.1.9.12.dist-info → versionhq-1.1.9.14.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
versionhq/agent/model.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
import os
|
2
2
|
import uuid
|
3
|
-
from typing import Any, Dict, List, Optional, TypeVar
|
3
|
+
from typing import Any, Dict, List, Optional, TypeVar, Callable
|
4
4
|
from typing_extensions import Self
|
5
5
|
from dotenv import load_dotenv
|
6
6
|
from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
@@ -9,8 +9,8 @@ from pydantic_core import PydanticCustomError
|
|
9
9
|
from versionhq._utils.logger import Logger
|
10
10
|
from versionhq._utils.rpm_controller import RPMController
|
11
11
|
from versionhq._utils.usage_metrics import UsageMetrics
|
12
|
-
from versionhq.llm.
|
13
|
-
from versionhq.llm.model import LLM,
|
12
|
+
from versionhq.llm.llm_variables import LLM_VARS
|
13
|
+
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
|
14
14
|
from versionhq.task import TaskOutputFormat
|
15
15
|
from versionhq.task.model import ResponseField
|
16
16
|
from versionhq.tool.model import Tool, ToolSet
|
@@ -87,6 +87,7 @@ class Agent(BaseModel):
|
|
87
87
|
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
88
88
|
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
89
89
|
_times_executed: int = PrivateAttr(default=0)
|
90
|
+
config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
|
90
91
|
|
91
92
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
92
93
|
role: str = Field(description="role of the agent - used in summary and logs")
|
@@ -102,11 +103,11 @@ class Agent(BaseModel):
|
|
102
103
|
allow_code_execution: Optional[bool] = Field(default=False, description="Enable code execution for the agent.")
|
103
104
|
max_retry_limit: int = Field(default=2,description="max. number of retries for the task execution when an error occurs. cascaed to the `invoke` function")
|
104
105
|
max_iter: Optional[int] = Field(default=25,description="max. number of iterations for an agent to execute a task")
|
105
|
-
step_callback: Optional[Any] = Field(default=None,description="
|
106
|
+
step_callback: Optional[Callable | Any] = Field(default=None, description="callback to be executed after each step of the agent execution")
|
106
107
|
|
107
108
|
# llm settings cascaded to the LLM model
|
108
|
-
llm: str | InstanceOf[LLM] | Any = Field(default=None)
|
109
|
-
function_calling_llm: str | InstanceOf[LLM] | Any = Field(default=None)
|
109
|
+
llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
110
|
+
function_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
110
111
|
respect_context_window: bool = Field(default=True,description="Keep messages under the context window size by summarizing content")
|
111
112
|
max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
|
112
113
|
max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
|
@@ -119,16 +120,11 @@ class Agent(BaseModel):
|
|
119
120
|
response_template: Optional[str] = Field(default=None, description="Response format for the agent.")
|
120
121
|
|
121
122
|
# config, cache, error handling
|
122
|
-
config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="Configuration for the agent")
|
123
123
|
formatting_errors: int = Field(default=0, description="Number of formatting errors.")
|
124
124
|
agent_ops_agent_name: str = None
|
125
125
|
agent_ops_agent_id: str = None
|
126
126
|
|
127
127
|
|
128
|
-
def __repr__(self):
|
129
|
-
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
130
|
-
|
131
|
-
|
132
128
|
@field_validator("id", mode="before")
|
133
129
|
@classmethod
|
134
130
|
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
@@ -141,7 +137,7 @@ class Agent(BaseModel):
|
|
141
137
|
required_fields = ["role", "goal"]
|
142
138
|
for field in required_fields:
|
143
139
|
if getattr(self, field) is None:
|
144
|
-
raise ValueError(
|
140
|
+
raise ValueError(f"{field} must be provided either directly or through config")
|
145
141
|
return self
|
146
142
|
|
147
143
|
|
@@ -154,109 +150,84 @@ class Agent(BaseModel):
|
|
154
150
|
"""
|
155
151
|
|
156
152
|
self.agent_ops_agent_name = self.role
|
157
|
-
unaccepted_attributes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"]
|
158
|
-
callbacks = ([self.step_callback,]if self.step_callback is not None else [])
|
153
|
+
# unaccepted_attributes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"]
|
159
154
|
|
160
155
|
if isinstance(self.llm, LLM):
|
161
|
-
|
162
|
-
self.llm
|
163
|
-
self.llm.context_window_size = (self.llm.get_context_window_size() if self.respect_context_window == True else DEFAULT_CONTEXT_WINDOW)
|
164
|
-
self.llm.callbacks = callbacks
|
156
|
+
llm = self._set_llm_params(self.llm)
|
157
|
+
self.llm = llm
|
165
158
|
|
166
159
|
elif isinstance(self.llm, str) or self.llm is None:
|
167
|
-
model_name =
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
"max_tokens": self.max_tokens,
|
172
|
-
"callbacks": callbacks,
|
173
|
-
"api_key": os.environ.get("LITELLM_API_KEY", None),
|
174
|
-
"base_url": os.environ.get("OPENAI_API_BASE", os.environ.get("OPENAI_BASE_URL", None))
|
175
|
-
}
|
176
|
-
|
177
|
-
set_provider = model_name.split("/")[0] if "/" in model_name else "openai" #! REFINEME
|
178
|
-
for provider, env_vars in LLM_VARS.items():
|
179
|
-
if provider == set_provider:
|
180
|
-
for env_var in env_vars:
|
181
|
-
key_name = env_var.get("key_name")
|
182
|
-
|
183
|
-
if key_name and key_name not in unaccepted_attributes:
|
184
|
-
env_value = os.environ.get(key_name)
|
185
|
-
if env_value:
|
186
|
-
key_name = ("api_key" if "API_KEY" in key_name else key_name)
|
187
|
-
key_name = ("api_base" if "API_BASE" in key_name else key_name)
|
188
|
-
key_name = ("api_version" if "API_VERSION" in key_name else key_name)
|
189
|
-
llm_params[key_name] = env_value
|
190
|
-
elif env_var.get("default", False):
|
191
|
-
for key, value in env_var.items():
|
192
|
-
if key not in ["prompt", "key_name", "default"]:
|
193
|
-
if key in os.environ:
|
194
|
-
llm_params[key] = value
|
195
|
-
self.llm = LLM(**llm_params)
|
196
|
-
context_window_size = (self.llm.get_context_window_size() if self.respect_context_window == True else DEFAULT_CONTEXT_WINDOW)
|
197
|
-
self.llm.context_window_size = context_window_size
|
160
|
+
model_name = self.llm if self.llm is not None else DEFAULT_MODEL_NAME
|
161
|
+
llm = LLM(model=model_name)
|
162
|
+
updated_llm = self._set_llm_params(llm)
|
163
|
+
self.llm = updated_llm
|
198
164
|
|
199
165
|
else:
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
166
|
+
if isinstance(self.llm, dict):
|
167
|
+
model_name = self.llm.pop("model_name", self.llm.pop("deployment_name", str(self.llm)))
|
168
|
+
llm = LLM(model=model_name if model_name is not None else DEFAULT_MODEL_NAME)
|
169
|
+
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.llm.items() if v is not None })
|
170
|
+
self.llm = updated_llm
|
171
|
+
|
172
|
+
else:
|
173
|
+
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
174
|
+
llm = LLM(model=model_name)
|
175
|
+
llm_params = {
|
176
|
+
"max_tokens": (getattr(self.llm, "max_tokens") or self.max_tokens or 3000),
|
177
|
+
"timeout": getattr(self.llm, "timeout", self.max_execution_time),
|
178
|
+
"callbacks": getattr(self.llm, "callbacks", None),
|
179
|
+
"temperature": getattr(self.llm, "temperature", None),
|
180
|
+
"logprobs": getattr(self.llm, "logprobs", None),
|
181
|
+
"api_key": getattr(self.llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
182
|
+
"base_url": getattr(self.llm, "base_url", None),
|
183
|
+
}
|
184
|
+
updated_llm = self._set_llm_params(llm, llm_params)
|
185
|
+
self.llm = updated_llm
|
186
|
+
|
213
187
|
|
214
188
|
"""
|
215
|
-
Set up funcion_calling LLM as well.
|
189
|
+
Set up funcion_calling LLM as well.
|
190
|
+
Check if the model supports function calling, setup LLM instance accordingly, using the same params with the LLM.
|
216
191
|
"""
|
217
192
|
if self.function_calling_llm:
|
218
193
|
if isinstance(self.function_calling_llm, LLM):
|
219
|
-
self.function_calling_llm.
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
if self.respect_context_window == True
|
225
|
-
else DEFAULT_CONTEXT_WINDOW
|
226
|
-
)
|
227
|
-
self.function_calling_llm.context_window_size = context_window_size
|
194
|
+
if self.function_calling_llm._supports_function_calling() == False:
|
195
|
+
self.function_calling_llm = LLM(model=DEFAULT_MODEL_NAME)
|
196
|
+
|
197
|
+
updated_llm = self._set_llm_params(self.function_calling_llm)
|
198
|
+
self.function_calling_llm = updated_llm
|
228
199
|
|
229
200
|
elif isinstance(self.function_calling_llm, str):
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
)
|
236
|
-
|
237
|
-
self.function_calling_llm.get_context_window_size()
|
238
|
-
if self.respect_context_window == True
|
239
|
-
else DEFAULT_CONTEXT_WINDOW
|
240
|
-
)
|
241
|
-
self.function_calling_llm.context_window_size = context_window_size
|
201
|
+
llm = LLM(model=self.function_calling_llm)
|
202
|
+
|
203
|
+
if llm._supports_function_calling() == False:
|
204
|
+
llm = LLM(model=DEFAULT_MODEL_NAME)
|
205
|
+
|
206
|
+
updated_llm = self._set_llm_params(llm)
|
207
|
+
self.function_calling_llm = updated_llm
|
242
208
|
|
243
209
|
else:
|
244
|
-
|
245
|
-
self.function_calling_llm,
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
),
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
210
|
+
if isinstance(self.function_calling_llm, dict):
|
211
|
+
model_name = self.function_calling_llm.pop("model_name", self.function_calling_llm.pop("deployment_name", str(self.function_calling_llm)))
|
212
|
+
llm = LLM(model=model_name)
|
213
|
+
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.function_calling_llm.items() if v is not None })
|
214
|
+
self.function_calling_llm = updated_llm
|
215
|
+
|
216
|
+
else:
|
217
|
+
model_name = (getattr(self.function_calling_llm, "model_name") or getattr(self.function_calling_llm, "deployment_name") or str(self.function_calling_llm))
|
218
|
+
llm = LLM(model=model_name)
|
219
|
+
llm_params = {
|
220
|
+
"max_tokens": (getattr(self.function_calling_llm, "max_tokens") or self.max_tokens or 3000),
|
221
|
+
"timeout": getattr(self.function_calling_llm, "timeout", self.max_execution_time),
|
222
|
+
"callbacks": getattr(self.function_calling_llm, "callbacks", None),
|
223
|
+
"temperature": getattr(self.function_calling_llm, "temperature", None),
|
224
|
+
"logprobs": getattr(self.function_calling_llm, "logprobs", None),
|
225
|
+
"api_key": getattr(self.function_calling_llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
226
|
+
"base_url": getattr(self.function_calling_llm, "base_url", None),
|
227
|
+
}
|
228
|
+
updated_llm = self._set_llm_params(llm, llm_params)
|
229
|
+
self.function_calling_llm = updated_llm
|
230
|
+
|
260
231
|
return self
|
261
232
|
|
262
233
|
|
@@ -315,7 +286,32 @@ class Agent(BaseModel):
|
|
315
286
|
return self
|
316
287
|
|
317
288
|
|
318
|
-
def
|
289
|
+
def _set_llm_params(self, llm: LLM, kwargs: Dict[str, Any] = None) -> LLM:
|
290
|
+
"""
|
291
|
+
After setting up an LLM instance, add params to the instance.
|
292
|
+
Prioritize the agent's settings over the model's base setups.
|
293
|
+
"""
|
294
|
+
|
295
|
+
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
296
|
+
llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
297
|
+
|
298
|
+
if self.step_callback is not None:
|
299
|
+
llm.callbacks = [self.step_callback, ]
|
300
|
+
llm._set_callbacks(llm.callbacks)
|
301
|
+
|
302
|
+
if self.respect_context_window == False:
|
303
|
+
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
304
|
+
|
305
|
+
if kwargs:
|
306
|
+
for k, v in kwargs.items():
|
307
|
+
try:
|
308
|
+
setattr(llm, k, v)
|
309
|
+
except:
|
310
|
+
pass
|
311
|
+
return llm
|
312
|
+
|
313
|
+
|
314
|
+
def invoke(self, prompts: str, output_formats: List[str | TaskOutputFormat], response_fields: List[ResponseField]) -> Dict[str, Any]:
|
319
315
|
"""
|
320
316
|
Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
|
321
317
|
Then call the base model.
|
@@ -329,22 +325,13 @@ class Agent(BaseModel):
|
|
329
325
|
messages.append({"role": "assistant", "content": self.backstory})
|
330
326
|
self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
331
327
|
|
332
|
-
|
333
|
-
|
334
|
-
raw_response = self.llm.call(
|
335
|
-
messages=messages, output_formats=output_formats, field_list=response_fields, callbacks=callbacks
|
336
|
-
)
|
328
|
+
raw_response = self.llm.call(messages=messages, output_formats=output_formats, field_list=response_fields)
|
337
329
|
task_execution_counter += 1
|
338
330
|
self._logger.log(level="info", message=f"Agent's first response in {type(raw_response).__name__}: {raw_response}", color="blue")
|
339
331
|
|
340
332
|
if (raw_response is None or raw_response == "") and task_execution_counter < self.max_retry_limit:
|
341
333
|
while task_execution_counter <= self.max_retry_limit:
|
342
|
-
raw_response = self.llm.call(
|
343
|
-
messages=messages,
|
344
|
-
output_formats=output_formats,
|
345
|
-
field_list=response_fields,
|
346
|
-
callbacks=callbacks,
|
347
|
-
)
|
334
|
+
raw_response = self.llm.call(messages=messages, output_formats=output_formats, field_list=response_fields)
|
348
335
|
task_execution_counter += 1
|
349
336
|
self._logger.log(level="info", message=f"Agent's next response in {type(raw_response).__name__}: {raw_response}", color="blue")
|
350
337
|
|
@@ -411,3 +398,7 @@ class Agent(BaseModel):
|
|
411
398
|
self._rpm_controller.stop_rpm_counter()
|
412
399
|
|
413
400
|
return raw_response
|
401
|
+
|
402
|
+
|
403
|
+
def __repr__(self):
|
404
|
+
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
@@ -1,3 +1,93 @@
|
|
1
|
+
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
2
|
+
|
3
|
+
|
4
|
+
"""
|
5
|
+
List of models available on the framework.
|
6
|
+
Model names align with the LiteLLM's key names defined in the JSON URL.
|
7
|
+
Provider names align with the custom provider or model provider names.
|
8
|
+
-> model_key = custom_provider_name/model_name
|
9
|
+
"""
|
10
|
+
|
11
|
+
MODELS = {
|
12
|
+
"openai": [
|
13
|
+
"gpt-3.5-turbo",
|
14
|
+
"gpt-4",
|
15
|
+
"gpt-4o",
|
16
|
+
"gpt-4o-mini",
|
17
|
+
"o1-mini",
|
18
|
+
"o1-preview",
|
19
|
+
],
|
20
|
+
"gemini": [
|
21
|
+
"gemini/gemini-1.5-flash",
|
22
|
+
"gemini/gemini-1.5-pro",
|
23
|
+
"gemini/gemini-2.0-flash-exp",
|
24
|
+
"gemini/gemini-gemma-2-9b-it",
|
25
|
+
"gemini/gemini-gemma-2-27b-it",
|
26
|
+
],
|
27
|
+
"anthropic": [
|
28
|
+
"claude-3-5-sonnet-20241022",
|
29
|
+
"claude-3-5-sonnet-20240620",
|
30
|
+
"claude-3-sonnet-20240229",
|
31
|
+
"claude-3-opus-20240229",
|
32
|
+
"claude-3-haiku-20240307",
|
33
|
+
],
|
34
|
+
"ollama": [
|
35
|
+
"ollama/llama3.1",
|
36
|
+
"ollama/mixtral",
|
37
|
+
],
|
38
|
+
# "watson": [
|
39
|
+
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
40
|
+
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
41
|
+
# "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
42
|
+
# "watsonx/meta-llama/llama-3-2-1b-instruct",
|
43
|
+
# "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
44
|
+
# "watsonx/meta-llama/llama-3-405b-instruct",
|
45
|
+
# "watsonx/mistral/mistral-large",
|
46
|
+
# "watsonx/ibm/granite-3-8b-instruct",
|
47
|
+
# ],
|
48
|
+
# "bedrock": [
|
49
|
+
# "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
50
|
+
# "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
51
|
+
# "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
52
|
+
# "bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
53
|
+
# "bedrock/anthropic.claude-v2:1",
|
54
|
+
# "bedrock/anthropic.claude-v2",
|
55
|
+
# "bedrock/anthropic.claude-instant-v1",
|
56
|
+
# "bedrock/meta.llama3-1-405b-instruct-v1:0",
|
57
|
+
# "bedrock/meta.llama3-1-70b-instruct-v1:0",
|
58
|
+
# "bedrock/meta.llama3-1-8b-instruct-v1:0",
|
59
|
+
# "bedrock/meta.llama3-70b-instruct-v1:0",
|
60
|
+
# "bedrock/meta.llama3-8b-instruct-v1:0",
|
61
|
+
# "bedrock/amazon.titan-text-lite-v1",
|
62
|
+
# "bedrock/amazon.titan-text-express-v1",
|
63
|
+
# "bedrock/cohere.command-text-v14",
|
64
|
+
# "bedrock/ai21.j2-mid-v1",
|
65
|
+
# "bedrock/ai21.j2-ultra-v1",
|
66
|
+
# "bedrock/ai21.jamba-instruct-v1:0",
|
67
|
+
# "bedrock/meta.llama2-13b-chat-v1",
|
68
|
+
# "bedrock/meta.llama2-70b-chat-v1",
|
69
|
+
# "bedrock/mistral.mistral-7b-instruct-v0:2",
|
70
|
+
# "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
71
|
+
# ],
|
72
|
+
}
|
73
|
+
|
74
|
+
|
75
|
+
PROVIDERS = [
|
76
|
+
"openai",
|
77
|
+
"anthropic",
|
78
|
+
"gemini",
|
79
|
+
"ollama",
|
80
|
+
"watson",
|
81
|
+
"bedrock",
|
82
|
+
"azure",
|
83
|
+
"cerebras",
|
84
|
+
"llama",
|
85
|
+
]
|
86
|
+
|
87
|
+
|
88
|
+
"""
|
89
|
+
Max input token size by the model.
|
90
|
+
"""
|
1
91
|
LLM_CONTEXT_WINDOW_SIZES = {
|
2
92
|
"gpt-3.5-turbo": 8192,
|
3
93
|
"gpt-4": 8192,
|
@@ -6,6 +96,19 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
6
96
|
"gpt-4-turbo": 128000,
|
7
97
|
"o1-preview": 128000,
|
8
98
|
"o1-mini": 128000,
|
99
|
+
|
100
|
+
"gemini/gemini-1.5-flash": 1048576,
|
101
|
+
"gemini/gemini-1.5-pro": 2097152,
|
102
|
+
"gemini/gemini-2.0-flash-exp": 1048576,
|
103
|
+
"gemini/gemini-gemma-2-9b-it": 8192,
|
104
|
+
"gemini/gemini-gemma-2-27b-it": 8192,
|
105
|
+
|
106
|
+
"claude-3-5-sonnet-20241022": 200000,
|
107
|
+
"claude-3-5-sonnet-20240620": 200000,
|
108
|
+
"claude-3-sonnet-20240229": 200000,
|
109
|
+
"claude-3-opus-20240229": 200000,
|
110
|
+
"claude-3-haiku-20240307": 200000,
|
111
|
+
|
9
112
|
"deepseek-chat": 128000,
|
10
113
|
"gemma2-9b-it": 8192,
|
11
114
|
"gemma-7b-it": 8192,
|
@@ -20,8 +123,21 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
20
123
|
"llama3-70b-8192": 8192,
|
21
124
|
"llama3-8b-8192": 8192,
|
22
125
|
"mixtral-8x7b-32768": 32768,
|
126
|
+
"claude-3-5-sonnet-2024102": 200000,
|
127
|
+
}
|
128
|
+
|
129
|
+
|
130
|
+
LLM_API_KEY_NAMES = {
|
131
|
+
"openai": "OPENAI_API_KEY",
|
132
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
133
|
+
"gemini": "GEMINI_API_KEY",
|
23
134
|
}
|
24
135
|
|
136
|
+
LLM_BASE_URL_KEY_NAMES = {
|
137
|
+
"openai": "OPENAI_API_BASE",
|
138
|
+
"anthropic": "ANTHROPIC_API_BASE",
|
139
|
+
"gemini": "GEMINI_API_BASE",
|
140
|
+
}
|
25
141
|
|
26
142
|
LLM_VARS = {
|
27
143
|
"openai": [
|
@@ -107,67 +223,39 @@ LLM_VARS = {
|
|
107
223
|
}
|
108
224
|
|
109
225
|
|
110
|
-
PROVIDERS = [
|
111
|
-
"openai",
|
112
|
-
"anthropic",
|
113
|
-
"gemini",
|
114
|
-
"ollama",
|
115
|
-
"watson",
|
116
|
-
"bedrock",
|
117
|
-
"azure",
|
118
|
-
"cerebras",
|
119
|
-
"llama",
|
120
|
-
]
|
121
226
|
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
"claude-3-5-sonnet-20240620",
|
126
|
-
"claude-3-sonnet-20240229",
|
127
|
-
"claude-3-opus-20240229",
|
128
|
-
"claude-3-haiku-20240307",
|
129
|
-
],
|
130
|
-
"gemini": [
|
131
|
-
"gemini/gemini-1.5-flash",
|
132
|
-
"gemini/gemini-1.5-pro",
|
133
|
-
"gemini/gemini-gemma-2-9b-it",
|
134
|
-
"gemini/gemini-gemma-2-27b-it",
|
135
|
-
],
|
136
|
-
"ollama": ["ollama/llama3.1", "ollama/mixtral"],
|
137
|
-
"watson": [
|
138
|
-
"watsonx/meta-llama/llama-3-1-70b-instruct",
|
139
|
-
"watsonx/meta-llama/llama-3-1-8b-instruct",
|
140
|
-
"watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
141
|
-
"watsonx/meta-llama/llama-3-2-1b-instruct",
|
142
|
-
"watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
143
|
-
"watsonx/meta-llama/llama-3-405b-instruct",
|
144
|
-
"watsonx/mistral/mistral-large",
|
145
|
-
"watsonx/ibm/granite-3-8b-instruct",
|
146
|
-
],
|
147
|
-
"bedrock": [
|
148
|
-
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
149
|
-
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
150
|
-
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
151
|
-
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
152
|
-
"bedrock/anthropic.claude-v2:1",
|
153
|
-
"bedrock/anthropic.claude-v2",
|
154
|
-
"bedrock/anthropic.claude-instant-v1",
|
155
|
-
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
156
|
-
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
157
|
-
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
158
|
-
"bedrock/meta.llama3-70b-instruct-v1:0",
|
159
|
-
"bedrock/meta.llama3-8b-instruct-v1:0",
|
160
|
-
"bedrock/amazon.titan-text-lite-v1",
|
161
|
-
"bedrock/amazon.titan-text-express-v1",
|
162
|
-
"bedrock/cohere.command-text-v14",
|
163
|
-
"bedrock/ai21.j2-mid-v1",
|
164
|
-
"bedrock/ai21.j2-ultra-v1",
|
165
|
-
"bedrock/ai21.jamba-instruct-v1:0",
|
166
|
-
"bedrock/meta.llama2-13b-chat-v1",
|
167
|
-
"bedrock/meta.llama2-70b-chat-v1",
|
168
|
-
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
169
|
-
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
170
|
-
],
|
171
|
-
}
|
227
|
+
"""
|
228
|
+
Params for litellm.completion() func
|
229
|
+
"""
|
172
230
|
|
173
|
-
|
231
|
+
LITELLM_COMPLETION_KEYS = [
|
232
|
+
"model",
|
233
|
+
"messages",
|
234
|
+
"timeout",
|
235
|
+
"temperature", "top_p",
|
236
|
+
"n",
|
237
|
+
"stream"
|
238
|
+
"stream_options"
|
239
|
+
"stop",
|
240
|
+
"max_compl,etion_tokens"
|
241
|
+
"max_tokens",
|
242
|
+
"modalities",
|
243
|
+
"prediction",
|
244
|
+
"audio",
|
245
|
+
"presen,ce_penalty"
|
246
|
+
"frequency_penalty,"
|
247
|
+
"logit_bias",
|
248
|
+
"user",
|
249
|
+
"response_format",
|
250
|
+
"seed",
|
251
|
+
"tools,"
|
252
|
+
"tool_choice"
|
253
|
+
"logprobs",
|
254
|
+
"top_logpr,obs"
|
255
|
+
"parallel_tool_calls"
|
256
|
+
"extra_headers",
|
257
|
+
"base_url",
|
258
|
+
"api_versi,on"
|
259
|
+
"api_key",
|
260
|
+
"model_list"
|
261
|
+
]
|
versionhq/llm/model.py
CHANGED
@@ -4,20 +4,26 @@ import sys
|
|
4
4
|
import threading
|
5
5
|
import warnings
|
6
6
|
import litellm
|
7
|
+
from abc import ABC
|
7
8
|
from dotenv import load_dotenv
|
8
9
|
from litellm import get_supported_openai_params
|
9
10
|
from contextlib import contextmanager
|
10
11
|
from typing import Any, Dict, List, Optional
|
12
|
+
from typing_extensions import Self
|
11
13
|
|
12
|
-
from
|
14
|
+
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
|
15
|
+
from pydantic_core import PydanticCustomError
|
16
|
+
|
17
|
+
from versionhq.llm.llm_variables import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, LITELLM_COMPLETION_KEYS
|
13
18
|
from versionhq.task import TaskOutputFormat
|
14
19
|
from versionhq.task.model import ResponseField
|
20
|
+
from versionhq._utils.logger import Logger
|
21
|
+
|
15
22
|
|
16
23
|
load_dotenv(override=True)
|
17
24
|
API_KEY_LITELLM = os.environ.get("API_KEY_LITELLM")
|
18
|
-
|
19
|
-
os.environ
|
20
|
-
|
25
|
+
DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
|
26
|
+
DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME")
|
21
27
|
|
22
28
|
class FilteredStream:
|
23
29
|
def __init__(self, original_stream):
|
@@ -94,146 +100,175 @@ class LLMResponseSchema:
|
|
94
100
|
return response_schema
|
95
101
|
|
96
102
|
|
97
|
-
class LLM:
|
103
|
+
class LLM(BaseModel):
|
98
104
|
"""
|
105
|
+
An LLM class to store params except for response formats which will be given in the task handling process.
|
99
106
|
Use LiteLLM to connect with the model of choice.
|
100
|
-
|
107
|
+
Some optional params are passed by the agent, else follow the default settings of the model provider.
|
101
108
|
"""
|
102
109
|
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
)
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
self.
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
self.
|
110
|
+
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
111
|
+
_init_model_name: str = PrivateAttr(default=None)
|
112
|
+
model_config = ConfigDict(extra="allow")
|
113
|
+
|
114
|
+
model: str = Field(default=DEFAULT_MODEL_NAME)
|
115
|
+
provider: Optional[str] = Field(default=None, description="model provider or custom model provider")
|
116
|
+
base_url: Optional[str] = Field(default=None, description="litellm's api base")
|
117
|
+
api_key: Optional[str] = Field(default=None)
|
118
|
+
api_version: Optional[str] = Field(default=None)
|
119
|
+
|
120
|
+
# optional params
|
121
|
+
timeout: Optional[float | int] = Field(default=None)
|
122
|
+
max_tokens: Optional[int] = Field(default=None)
|
123
|
+
max_completion_tokens: Optional[int] = Field(default=None)
|
124
|
+
context_window_size: Optional[int] = Field(default=DEFAULT_CONTEXT_WINDOW_SIZE)
|
125
|
+
callbacks: List[Any] = Field(default_factory=list)
|
126
|
+
temperature: Optional[float] = Field(default=None)
|
127
|
+
top_p: Optional[float] = Field(default=None)
|
128
|
+
n: Optional[int] = Field(default=None)
|
129
|
+
stop: Optional[str | List[str]] = Field(default=None)
|
130
|
+
presence_penalty: Optional[float] = Field(default=None)
|
131
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
132
|
+
logit_bias: Optional[Dict[int, float]] = Field(default=None)
|
133
|
+
seed: Optional[int] = Field(default=None)
|
134
|
+
logprobs: Optional[bool] = Field(default=None)
|
135
|
+
top_logprobs: Optional[int] = Field(default=None)
|
136
|
+
|
137
|
+
litellm.drop_params = True
|
138
|
+
litellm.set_verbose = True
|
139
|
+
os.environ['LITELLM_LOG'] = 'DEBUG'
|
140
|
+
|
141
|
+
|
142
|
+
@model_validator(mode="after")
|
143
|
+
def validate_base_params(self) -> Self:
|
144
|
+
"""
|
145
|
+
1. Model name and provider
|
146
|
+
Check the provided model name in the list and update it with the valid model key name.
|
147
|
+
Then add the model provider if it is not provided.
|
148
|
+
Assign a default model and provider when we cannot find a model key.
|
149
|
+
|
150
|
+
2. Set up other base parameters for the model and LiteLLM as below:
|
151
|
+
1. LiteLLM - drop_params, set_verbose, callbacks
|
152
|
+
2. Model setup - context_window_size, api_key, base_url
|
153
|
+
"""
|
154
|
+
|
155
|
+
if self.model is None:
|
156
|
+
self._logger.log(level="error", message="Model name is missing.", color="red")
|
157
|
+
raise PydanticCustomError("model_missing", "The model name must be provided.", {})
|
158
|
+
|
159
|
+
|
160
|
+
self._init_model_name = self.model
|
161
|
+
self.model = None
|
162
|
+
|
163
|
+
if self.provider and MODELS.get(self.provider) is not None:
|
164
|
+
provider_model_list = MODELS.get(self.provider)
|
165
|
+
for item in provider_model_list:
|
166
|
+
if self.model is None:
|
167
|
+
if item == self._init_model_name:
|
168
|
+
self.model = item
|
169
|
+
elif self._init_model_name in item and self.model is None:
|
170
|
+
self.model = item
|
171
|
+
else:
|
172
|
+
temp_model = provider_model_list[0]
|
173
|
+
self._logger.log(level="info", message=f"The provided model: {self._init_model_name} is not in the list. We'll assign a model: {temp_model} from the selected model provider: {self.provider}.", color="yellow")
|
174
|
+
self.model = temp_model
|
175
|
+
# raise PydanticCustomError("invalid_model", "The provided model is not in the list.", {})
|
176
|
+
|
177
|
+
else:
|
178
|
+
for k, v in MODELS.items():
|
179
|
+
for item in v:
|
180
|
+
if self.model is None:
|
181
|
+
if self._init_model_name == item:
|
182
|
+
self.model = item
|
183
|
+
self.provider = k
|
184
|
+
|
185
|
+
elif self.model is None and self._init_model_name in item:
|
186
|
+
self.model = item
|
187
|
+
self.provider = k
|
188
|
+
|
189
|
+
if self.model is None:
|
190
|
+
self._logger.log(level="info", message=f"The provided model \'{self.model}\' is not in the list. We'll assign a default model.", color="yellow")
|
191
|
+
self.model = DEFAULT_MODEL_NAME
|
192
|
+
self.provider = "openai"
|
193
|
+
# raise PydanticCustomError("invalid_model", "The provided model is not in the list.", {})
|
194
|
+
|
195
|
+
if self.callbacks is not None:
|
196
|
+
self._set_callbacks(self.callbacks)
|
197
|
+
|
198
|
+
self.context_window_size = self._get_context_window_size()
|
199
|
+
|
200
|
+
api_key_name = LLM_API_KEY_NAMES.get(self.provider, "LITELLM_API_KEY")
|
201
|
+
self.api_key = os.environ.get(api_key_name, None)
|
202
|
+
|
203
|
+
base_url_key_name = LLM_BASE_URL_KEY_NAMES.get(self.provider, "OPENAI_API_BASE")
|
204
|
+
self.base_url = os.environ.get(base_url_key_name, None)
|
205
|
+
|
206
|
+
return self
|
207
|
+
|
155
208
|
|
156
209
|
def call(
|
157
210
|
self,
|
158
|
-
output_formats: List[TaskOutputFormat],
|
211
|
+
output_formats: List[str | TaskOutputFormat],
|
159
212
|
field_list: Optional[List[ResponseField]],
|
160
213
|
messages: List[Dict[str, str]],
|
161
|
-
|
214
|
+
**kwargs,
|
215
|
+
# callbacks: List[Any] = [],
|
162
216
|
) -> str:
|
163
217
|
"""
|
164
|
-
Execute LLM based on
|
218
|
+
Execute LLM based on the agent's params and model params.
|
165
219
|
"""
|
166
220
|
|
167
221
|
with suppress_warnings():
|
168
|
-
if
|
169
|
-
self.
|
222
|
+
if len(self.callbacks) > 0:
|
223
|
+
self._set_callbacks(self.callbacks)
|
170
224
|
|
171
225
|
try:
|
172
|
-
response_format = None
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
"top_p": self.top_p,
|
186
|
-
"n": self.n,
|
187
|
-
"stop": self.stop,
|
188
|
-
"max_tokens": self.max_tokens or self.max_completion_tokens,
|
189
|
-
"presence_penalty": self.presence_penalty,
|
190
|
-
"frequency_penalty": self.frequency_penalty,
|
191
|
-
"logit_bias": self.logit_bias,
|
192
|
-
# "response_format": response_format,
|
193
|
-
"seed": self.seed,
|
194
|
-
"logprobs": self.logprobs,
|
195
|
-
"top_logprobs": self.top_logprobs,
|
196
|
-
"api_base": self.base_url,
|
197
|
-
"api_version": self.api_version,
|
198
|
-
"api_key": self.api_key,
|
199
|
-
"stream": False,
|
200
|
-
**self.kwargs,
|
201
|
-
}
|
202
|
-
params = {k: v for k, v in params.items() if v is not None}
|
203
|
-
res = litellm.completion(**params)
|
226
|
+
# response_format = None
|
227
|
+
# #! REFINEME
|
228
|
+
# if TaskOutputFormat.JSON in output_formats:
|
229
|
+
# response_format = LLMResponseSchema(
|
230
|
+
# response_type="json_object", field_list=field_list
|
231
|
+
# )
|
232
|
+
|
233
|
+
params = {}
|
234
|
+
for item in LITELLM_COMPLETION_KEYS:
|
235
|
+
if hasattr(self, item) and getattr(self, item) is not None:
|
236
|
+
params[item] = getattr(self, item)
|
237
|
+
|
238
|
+
res = litellm.completion(messages=messages, stream=False, **params)
|
204
239
|
return res["choices"][0]["message"]["content"]
|
205
240
|
|
206
241
|
except Exception as e:
|
207
|
-
|
242
|
+
self._logger.log(level="error", message=f"LiteLLM call failed: {str(e)}", color="red")
|
208
243
|
return None
|
209
244
|
|
210
|
-
|
245
|
+
|
246
|
+
def _supports_function_calling(self) -> bool:
|
211
247
|
try:
|
212
248
|
params = get_supported_openai_params(model=self.model)
|
213
249
|
return "response_format" in params
|
214
250
|
except Exception as e:
|
215
|
-
|
251
|
+
self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
|
216
252
|
return False
|
217
253
|
|
218
|
-
|
254
|
+
|
255
|
+
def _supports_stop_words(self) -> bool:
|
219
256
|
try:
|
220
257
|
params = get_supported_openai_params(model=self.model)
|
221
258
|
return "stop" in params
|
222
259
|
except Exception as e:
|
223
|
-
|
260
|
+
self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
|
224
261
|
return False
|
225
262
|
|
226
|
-
|
263
|
+
|
264
|
+
def _get_context_window_size(self) -> int:
|
227
265
|
"""
|
228
266
|
Only use 75% of the context window size to avoid cutting the message in the middle.
|
229
267
|
"""
|
230
|
-
return (
|
231
|
-
|
232
|
-
if hasattr(LLM_CONTEXT_WINDOW_SIZES, self.model)
|
233
|
-
else DEFAULT_CONTEXT_WINDOW
|
234
|
-
)
|
268
|
+
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model) * 0.75) if LLM_CONTEXT_WINDOW_SIZES.get(self.model) is not None else DEFAULT_CONTEXT_WINDOW_SIZE
|
269
|
+
|
235
270
|
|
236
|
-
def
|
271
|
+
def _set_callbacks(self, callbacks: List[Any]):
|
237
272
|
callback_types = [type(callback) for callback in callbacks]
|
238
273
|
for callback in litellm.success_callback[:]:
|
239
274
|
if type(callback) in callback_types:
|
versionhq/task/__init__.py
CHANGED
versionhq/task/model.py
CHANGED
@@ -117,7 +117,7 @@ class Task(BaseModel):
|
|
117
117
|
_original_description: str = PrivateAttr(default=None)
|
118
118
|
_logger: Logger = PrivateAttr()
|
119
119
|
_task_output_handler = TaskOutputStorageHandler()
|
120
|
-
config: Optional[Dict[str, Any]] = Field(default=None, description="
|
120
|
+
config: Optional[Dict[str, Any]] = Field(default=None, description="values to set on Task class")
|
121
121
|
|
122
122
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="unique identifier for the object, not set by user")
|
123
123
|
name: Optional[str] = Field(default=None)
|
@@ -245,7 +245,7 @@ class Task(BaseModel):
|
|
245
245
|
output_json_dict: Dict[str, Any] = dict()
|
246
246
|
|
247
247
|
try:
|
248
|
-
raw_result = raw_result.replace("{'", '{"').replace("{ '", '{"').replace("': '", '": "').replace("'}", '"}').replace("' }", '"}').replace("', '", '", "').replace("['", '["').replace("[ '", '[ "').replace("']", '"]').replace("' ]", '" ]')
|
248
|
+
raw_result = raw_result.replace("{'", '{"').replace("{ '", '{"').replace("': '", '": "').replace("'}", '"}').replace("' }", '"}').replace("', '", '", "').replace("['", '["').replace("[ '", '[ "').replace("']", '"]').replace("' ]", '" ]').replace("{\n'", '{"').replace("{\'", '{"')
|
249
249
|
r = json.dumps(eval(str(raw_result)))
|
250
250
|
output_json_dict = json.loads(r)
|
251
251
|
|
@@ -425,23 +425,23 @@ class Task(BaseModel):
|
|
425
425
|
output_formats_to_follow[item.title] = f"<Return your answer in {item.type.__name__}>"
|
426
426
|
|
427
427
|
output_prompt = f"""
|
428
|
-
Output only valid JSON conforming to the specified format. Use double quotes for keys and values
|
429
|
-
{output_formats_to_follow}
|
428
|
+
Output only valid JSON conforming to the specified format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or other non-standard JSON syntax.
|
429
|
+
Specified format: {output_formats_to_follow}
|
430
430
|
"""
|
431
431
|
return output_prompt
|
432
432
|
|
433
433
|
|
434
434
|
@property
|
435
|
-
def expected_output_formats(self) -> List[TaskOutputFormat]:
|
435
|
+
def expected_output_formats(self) -> List[str | TaskOutputFormat]:
|
436
436
|
"""
|
437
437
|
Return output formats in list with the ENUM item.
|
438
438
|
`TaskOutputFormat.RAW` is set as default.
|
439
439
|
"""
|
440
|
-
outputs = [TaskOutputFormat.RAW,]
|
440
|
+
outputs = [TaskOutputFormat.RAW.value,]
|
441
441
|
if self.expected_output_json:
|
442
|
-
outputs.append(TaskOutputFormat.JSON)
|
442
|
+
outputs.append(TaskOutputFormat.JSON.value)
|
443
443
|
if self.expected_output_pydantic:
|
444
|
-
outputs.append(TaskOutputFormat.PYDANTIC)
|
444
|
+
outputs.append(TaskOutputFormat.PYDANTIC.value)
|
445
445
|
return outputs
|
446
446
|
|
447
447
|
|
versionhq/team/team_planner.py
CHANGED
@@ -17,7 +17,7 @@ class TeamPlanner:
|
|
17
17
|
|
18
18
|
def __init__(self, tasks: List[Task], planner_llm: Optional[Any] = None):
|
19
19
|
self.tasks = tasks
|
20
|
-
self.planner_llm = planner_llm if planner_llm else os.environ.get("
|
20
|
+
self.planner_llm = planner_llm if planner_llm else os.environ.get("DEFAULT_MODEL_NAME")
|
21
21
|
|
22
22
|
|
23
23
|
def _handle_assign_agents(self, unassigned_tasks: List[Task]) -> List[Any]:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.9.
|
3
|
+
Version: 1.1.9.14
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -60,7 +60,7 @@ Requires-Dist: composio-langchain>=0.6.12
|
|
60
60
|
|
61
61
|

|
62
62
|
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
63
|
-

|
64
64
|

|
65
65
|

|
66
66
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=qgKbO4z_Ip_tf_CLeRokSCc3tyh4akIzfNyL5-yb7HM,951
|
2
2
|
versionhq/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
versionhq/_utils/cache_handler.py,sha256=3-lw_5ZMWC8hnPAkSQULJ2V1FvZZ-wg9mQaUJGSOjI8,403
|
4
4
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
@@ -7,7 +7,7 @@ versionhq/_utils/process_config.py,sha256=UqoWD5IR4VLxEDGxIyVUylw_ppXwk8Wx1ynVuD
|
|
7
7
|
versionhq/_utils/rpm_controller.py,sha256=dUgFd6JtdjiLLTRmrjsBHdTaLn73XFuKpLbJh7thf2A,2289
|
8
8
|
versionhq/_utils/usage_metrics.py,sha256=hhq1OCW8Z4V93vwW2O2j528EyjOlF8wlTsX5IL-7asA,1106
|
9
9
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
-
versionhq/agent/model.py,sha256=
|
10
|
+
versionhq/agent/model.py,sha256=_7mq06b1I7kRe7XYyPKPjzduBuBBFDa_CvmEr3WkKpc,18138
|
11
11
|
versionhq/agent/parser.py,sha256=Z_swUPO3piJQuYU8oVYwXWeR2zjmNb4PxbXZeR-GlIg,4694
|
12
12
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=cdngBx1GEv7nroR46FEhnysnBJ9mEVL763_9np6Skkc,395
|
13
13
|
versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -20,24 +20,24 @@ versionhq/clients/product/model.py,sha256=c_watpIg-FzpJ2tMml6M1EbAckgGZOqTSc_GZ3
|
|
20
20
|
versionhq/clients/workflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
21
|
versionhq/clients/workflow/model.py,sha256=Onu3O4y_wroOnEPf7QZkeZp_WPHfk2DVQGdtoXfZvbc,5984
|
22
22
|
versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
-
versionhq/llm/
|
24
|
-
versionhq/llm/model.py,sha256=
|
23
|
+
versionhq/llm/llm_variables.py,sha256=2cRRgUyJ9Em2k0NcEWivsDAoDhH04T3pN_hG6IzEBCQ,7177
|
24
|
+
versionhq/llm/model.py,sha256=DNvg93t55VujCqFSvTbjEVgPdTZTwG8CIQWre9YHH_o,10825
|
25
25
|
versionhq/storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
26
|
versionhq/storage/task_output_storage.py,sha256=xoBJHeqUyQt6iJoR1WQTghP-fyxXL66qslpX1QC2-4o,4827
|
27
|
-
versionhq/task/__init__.py,sha256=
|
27
|
+
versionhq/task/__init__.py,sha256=l2r_g01i91JAGlOoHZP_Gh2WCk6mo9D19lcqt7sKMpQ,186
|
28
28
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
29
29
|
versionhq/task/log_handler.py,sha256=KJRrcNZgFSKhlNzvtYFnvtp6xukaF1s7ifX9u4zWrN8,1683
|
30
|
-
versionhq/task/model.py,sha256=
|
30
|
+
versionhq/task/model.py,sha256=xyDr9AS7Xp1jYlABMuqD7Cm1RfHnfNL5rEzyE47YUFI,19729
|
31
31
|
versionhq/team/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
32
|
versionhq/team/model.py,sha256=E52OUVzUtvR--51SFRJos3JdYKri1t2jbvvzoOvShQc,20181
|
33
|
-
versionhq/team/team_planner.py,sha256=
|
33
|
+
versionhq/team/team_planner.py,sha256=gUmrW_AZYQqAkOylS2XHuYIdTy92-E7CxQB5VmxqgrE,3675
|
34
34
|
versionhq/tool/__init__.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtgpqOzKZQ,1843
|
35
35
|
versionhq/tool/composio_tool.py,sha256=BJqaA1NhV0BT9AdY7OLCGpsAI3VEuCKnOS6D9vuU4zQ,8630
|
36
36
|
versionhq/tool/decorator.py,sha256=W_WjzZy8y43AoiFjHLPUQfNipmpOPe-wQknCWloPwmY,1195
|
37
37
|
versionhq/tool/model.py,sha256=yrvog9wh-cuIXRngwXOzPlHwBO3UhUFxCH3vQ5qRKBA,6823
|
38
38
|
versionhq/tool/tool_handler.py,sha256=A3zUkZkx4JEpFHI2uBkHDpzWfADw-bCYUQhgm6rpITM,1569
|
39
|
-
versionhq-1.1.9.
|
40
|
-
versionhq-1.1.9.
|
41
|
-
versionhq-1.1.9.
|
42
|
-
versionhq-1.1.9.
|
43
|
-
versionhq-1.1.9.
|
39
|
+
versionhq-1.1.9.14.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
|
40
|
+
versionhq-1.1.9.14.dist-info/METADATA,sha256=NnSKnOPGCFgFxyg1-tUjDbe08zfGM8VLfnWqrnOc7KA,16071
|
41
|
+
versionhq-1.1.9.14.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
42
|
+
versionhq-1.1.9.14.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
43
|
+
versionhq-1.1.9.14.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|