versionhq 1.1.11.8__py3-none-any.whl → 1.1.12.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -1,35 +1,71 @@
1
+ # silence some warnings
1
2
  import warnings
2
-
3
- warnings.filterwarnings(
4
- "ignore",
5
- message="Pydantic serializer warnings:",
6
- category=UserWarning,
7
- module="pydantic.main",
8
- )
3
+ warnings.filterwarnings(action="ignore", message="Pydantic serializer warnings:", category=UserWarning, module="pydantic.main")
4
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic._internal")
5
+ warnings.filterwarnings(action="ignore", module="LiteLLM:utils")
9
6
 
10
7
  from versionhq.agent.model import Agent
11
8
  from versionhq.clients.customer.model import Customer
12
9
  from versionhq.clients.product.model import Product, ProductProvider
13
10
  from versionhq.clients.workflow.model import MessagingWorkflow, MessagingComponent
14
- from versionhq.task.model import Task, TaskOutput
15
- from versionhq.team.model import Team, TeamOutput
16
- from versionhq.tool.model import Tool
11
+ from versionhq.knowledge.model import Knowledge, KnowledgeStorage
12
+ from versionhq.knowledge.source import PDFKnowledgeSource, CSVKnowledgeSource, JSONKnowledgeSource, TextFileKnowledgeSource, ExcelKnowledgeSource, StringKnowledgeSource
13
+ from versionhq.knowledge.source_docling import DoclingSource
14
+ from versionhq.task.model import Task, TaskOutput, ConditionalTask, ResponseField
15
+ from versionhq.task.evaluate import Evaluation, EvaluationItem
16
+ from versionhq.team.model import Team, TeamOutput, Formation, TeamMember, TaskHandlingProcess
17
+ from versionhq.tool.model import Tool, ToolSet
18
+ from versionhq.tool.cache_handler import CacheHandler
19
+ from versionhq.tool.tool_handler import ToolHandler
17
20
  from versionhq.tool.composio_tool import ComposioHandler
21
+ from versionhq.memory.contextual_memory import ContextualMemory
22
+ from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, MemoryItem
23
+
18
24
 
19
25
 
20
- __version__ = "1.1.11.8"
26
+ __version__ = "1.1.12.2"
21
27
  __all__ = [
22
28
  "Agent",
29
+
23
30
  "Customer",
24
31
  "Product",
25
32
  "ProductProvider",
26
33
  "MessagingWorkflow",
27
34
  "MessagingComponent",
28
- "LLM",
35
+
36
+ "Knowledge",
37
+ "KnowledgeStorage",
38
+ "PDFKnowledgeSource",
39
+ "CSVKnowledgeSource",
40
+ "JSONKnowledgeSource",
41
+ "TextFileKnowledgeSource",
42
+ "ExcelKnowledgeSource",
43
+ "StringKnowledgeSource",
44
+ "DoclingSource",
45
+
29
46
  "Task",
30
47
  "TaskOutput",
48
+ "ConditionalTask",
49
+ "ResponseField",
50
+
51
+ "Evaluation",
52
+ "EvaluationItem",
53
+
31
54
  "Team",
32
55
  "TeamOutput",
56
+ "Formation",
57
+ "TeamMember",
58
+ "TaskHandlingProcess",
59
+
33
60
  "Tool",
34
- "ComposioHandler"
61
+ "ToolSet",
62
+ "CacheHandler",
63
+ "ToolHandler",
64
+ "ComposioHandler",
65
+
66
+ "ContextualMemory",
67
+ "ShortTermMemory",
68
+ "LongTermMemory",
69
+ "UserMemory",
70
+ "MemoryItem"
35
71
  ]
@@ -0,0 +1,3 @@
1
+ from versionhq._utils.logger import Logger
2
+ from versionhq._utils.process_config import process_config
3
+ from versionhq._utils.usage_metrics import UsageMetrics
@@ -36,10 +36,15 @@ class Printer:
36
36
 
37
37
 
38
38
  class Logger(BaseModel):
39
+ """
40
+ Control CLI messages.
41
+ Color: red = error, yellow = warning, blue = info (from vhq), green = info (from third party)
42
+ """
43
+
39
44
  verbose: bool = Field(default=True)
40
45
  _printer: Printer = PrivateAttr(default_factory=Printer)
41
46
 
42
47
  def log(self, level, message, color="yellow"):
43
48
  if self.verbose:
44
49
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
45
- self._printer.print(f"\n{timestamp} - versionHQ - {level.upper()}: {message}", color=color)
50
+ self._printer.print(f"\n{timestamp} - versionHQ [{level.upper()}]: {message}", color=color)
@@ -0,0 +1,41 @@
1
+ from versionhq.agent.model import Agent
2
+ from versionhq.llm.model import DEFAULT_MODEL_NAME
3
+
4
+ """
5
+ In-house agents to be called across the project.
6
+ [Rules] In house agents have names and roles that start with `vhq_`. No customization allowed by client.
7
+ """
8
+
9
+ vhq_client_manager = Agent(
10
+ role="vhq-Client Manager",
11
+ goal="Efficiently communicate with the client on the task progress",
12
+ llm=DEFAULT_MODEL_NAME,
13
+ use_memory=True,
14
+ )
15
+
16
+
17
+ vhq_task_evaluator = Agent(
18
+ role="vhq-Task Evaluator",
19
+ goal="score the output according to the given evaluation criteria.",
20
+ llm=DEFAULT_MODEL_NAME,
21
+ llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
22
+ maxit=1,
23
+ max_retry_limit=1,
24
+ use_memory=True # refer past eval records of similar tasks
25
+ )
26
+
27
+
28
+ vhq_formation_planner = Agent(
29
+ role="vhq-Formation Planner",
30
+ goal="Plan a formation of agents based on the given task descirption.",
31
+ llm="gemini/gemini-2.0-flash-exp",
32
+ llm_config=dict(top_p=0.8, top_k=30, temperature=0.9),
33
+ maxit=1,
34
+ max_retry_limit=1,
35
+ knowledge_sources=[
36
+ "Solo is a formation where a single agent with tools, knowledge, and memory handles tasks indivudually. When self-learning mode is on - it will turn into Random formation. Typical usecase is an email agent drafts promo message for the given audience using their own knowledge.",
37
+ "Supervising is a formation where the leader agent gives directions, while sharing its knowledge and memory with subbordinates.Subordinates can be solo agents or networks. Typical usecase is that the leader agent strategizes an outbound campaign plan and assigns components such as media mix or message creation to subordinate agents.",
38
+ "Network is a formation where multple agents can share tasks, knowledge, and memory among network members without hierarchy. Typical usecase is that an email agent and social media agent share the product knowledge and deploy multi-channel outbound campaign. ",
39
+ "Random is a formation where a single agent handles tasks, asking help from other agents without sharing its memory or knowledge. Typical usecase is that an email agent drafts promo message for the given audience, asking insights on tones from other email agents which oversee other customer clusters, or an agent calls the external, third party agent to deploy the campaign. ",
40
+ ]
41
+ )
versionhq/agent/model.py CHANGED
@@ -1,6 +1,5 @@
1
1
  import os
2
2
  import uuid
3
- import datetime
4
3
  from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
5
4
  from typing_extensions import Self
6
5
  from dotenv import load_dotenv
@@ -165,13 +164,16 @@ class Agent(BaseModel):
165
164
  Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
166
165
  """
167
166
  self.agent_ops_agent_name = self.role
168
- self.llm = self._set_llm(llm=self.llm)
167
+ self.llm = self._convert_to_llm_class(llm=self.llm)
168
+
169
169
  function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
170
- self.function_calling_llm = self._set_llm(llm=function_calling_llm)
170
+ function_calling_llm = self._convert_to_llm_class(llm=function_calling_llm)
171
+ if function_calling_llm._supports_function_calling():
172
+ self.function_calling_llm = function_calling_llm
171
173
  return self
172
174
 
173
175
 
174
- def _set_llm(self, llm: Any | None) -> LLM:
176
+ def _convert_to_llm_class(self, llm: Any | None) -> LLM:
175
177
  llm = llm if llm is not None else DEFAULT_MODEL_NAME
176
178
 
177
179
  match llm:
@@ -350,7 +352,7 @@ class Agent(BaseModel):
350
352
  @model_validator(mode="after")
351
353
  def set_up_memory(self) -> Self:
352
354
  """
353
- Set up memories: stm, um
355
+ Set up memories: stm, ltm, and um
354
356
  """
355
357
 
356
358
  if self.use_memory == True:
@@ -413,7 +415,7 @@ class Agent(BaseModel):
413
415
  task.tokens = self.llm._tokens
414
416
 
415
417
  task_execution_counter += 1
416
- self._logger.log(level="info", message=f"Agent response: {raw_response}", color="blue")
418
+ self._logger.log(level="info", message=f"Agent response: {raw_response}", color="green")
417
419
  return raw_response
418
420
 
419
421
  except Exception as e:
@@ -429,7 +431,7 @@ class Agent(BaseModel):
429
431
  iterations += 1
430
432
 
431
433
  task_execution_counter += 1
432
- self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="blue")
434
+ self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="green")
433
435
  return raw_response
434
436
 
435
437
  if not raw_response:
@@ -474,6 +476,7 @@ class Agent(BaseModel):
474
476
  task_prompt += memory.strip()
475
477
 
476
478
 
479
+ ## comment out for now
477
480
  # if self.team and self.team._train:
478
481
  # task_prompt = self._training_handler(task_prompt=task_prompt)
479
482
  # else:
@@ -5,7 +5,7 @@ from typing_extensions import Self
5
5
 
6
6
  from pydantic import BaseModel, Field, PrivateAttr, model_validator
7
7
 
8
- from versionhq._utils.logger import Logger
8
+ from versionhq._utils import Logger
9
9
 
10
10
 
11
11
  class RPMController(BaseModel):
@@ -56,7 +56,6 @@ class ProductProvider(ABC, BaseModel):
56
56
  return self
57
57
 
58
58
 
59
-
60
59
  class Product(BaseModel):
61
60
  """
62
61
  A class to store product information used to create outbound
@@ -0,0 +1,22 @@
1
+ from versionhq.knowledge.model import Knowledge, KnowledgeStorage
2
+ from versionhq.knowledge.source import (
3
+ CSVKnowledgeSource,
4
+ ExcelKnowledgeSource,
5
+ PDFKnowledgeSource,
6
+ TextFileKnowledgeSource,
7
+ JSONKnowledgeSource,
8
+ StringKnowledgeSource
9
+ )
10
+ from versionhq.knowledge.source_docling import DoclingSource
11
+
12
+ __all__ = [
13
+ "Knowledge",
14
+ "KnowledgeStorage",
15
+ "DoclingSource",
16
+ "CSVKnowledgeSource",
17
+ "ExcelKnowledgeSource",
18
+ "PDFKnowledgeSource",
19
+ "TextFileKnowledgeSource",
20
+ "JSONKnowledgeSource",
21
+ "StringKnowledgeSource"
22
+ ]
@@ -1,5 +1,3 @@
1
- import os
2
- from abc import ABC, abstractmethod
3
1
  from typing import Any, Dict, List, Optional
4
2
  from pydantic import BaseModel, ConfigDict, Field
5
3
 
@@ -280,7 +280,6 @@ class PDFKnowledgeSource(BaseFileKnowledgeSource):
280
280
 
281
281
 
282
282
 
283
-
284
283
  class CSVKnowledgeSource(BaseFileKnowledgeSource):
285
284
  """
286
285
  A knowledge source class that stores and queries CSV file content using embeddings.
versionhq/llm/llm_vars.py CHANGED
@@ -6,29 +6,20 @@ JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_
6
6
  PROVIDERS = [
7
7
  "openai",
8
8
  "gemini",
9
- "sagemaker",
10
- "huggingface", # need api base
9
+ "openrouter",
10
+ "huggingface",
11
11
  "anthropic",
12
+ "sagemaker",
13
+ "bedrock",
12
14
  "ollama",
13
15
  "watson",
14
- "bedrock",
15
16
  "azure",
16
17
  "cerebras",
17
18
  "llama",
18
19
  ]
19
20
 
20
21
  ENDPOINT_PROVIDERS = [
21
- # "openai",
22
- # "gemini",
23
- # "sagemaker",
24
22
  "huggingface",
25
- # "anthropic",
26
- # "ollama",
27
- # "watson",
28
- # "bedrock",
29
- # "azure",
30
- # "cerebras",
31
- # "llama",
32
23
  ]
33
24
 
34
25
  """
@@ -57,10 +48,14 @@ MODELS = {
57
48
  "anthropic": [
58
49
  "claude-3-5-sonnet-20241022",
59
50
  "claude-3-5-sonnet-20240620",
60
- "claude-3-sonnet-20240229",
51
+ "claude-3-haiku-2024030",
61
52
  "claude-3-opus-20240229",
62
53
  "claude-3-haiku-20240307",
63
54
  ],
55
+ "openrouter": [
56
+ "openrouter/deepseek/deepseek-r1:free",
57
+ "openrouter/qwen/qwen-2.5-72b-instruct",
58
+ ],
64
59
  "huggingface": [
65
60
  "huggingface/qwen/qwen2.5-VL-72B-Instruct",
66
61
  ],
@@ -78,11 +73,6 @@ MODELS = {
78
73
  "ollama/mixtral",
79
74
  "ollama/mixtral-8x22B-Instruct-v0.1",
80
75
  ],
81
- "deepseek": [
82
- "deepseek/deepseek-reasoner",
83
-
84
- ],
85
-
86
76
  # "watson": [
87
77
  # "watsonx/meta-llama/llama-3-1-70b-instruct",
88
78
  # "watsonx/meta-llama/llama-3-1-8b-instruct",
@@ -98,7 +88,6 @@ MODELS = {
98
88
  "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
99
89
  "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
100
90
  "bedrock/anthropic.claude-3-opus-20240229-v1:0",
101
- # "bedrock/anthropic.claude-v2:1",
102
91
  "bedrock/anthropic.claude-v2",
103
92
  "bedrock/anthropic.claude-instant-v1",
104
93
  "bedrock/meta.llama3-1-405b-instruct-v1:0",
@@ -124,24 +113,17 @@ MODELS = {
124
113
  KEYS = {
125
114
  "openai": ["OPENAI_API_KEY"],
126
115
  "gemini": ["GEMINI_API_KEY"],
127
- "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
128
116
  "anthropic": ["ANTHROPIC_API_KEY"],
117
+ "huggingface": ["HUGGINGFACE_API_KEY", ],
118
+ "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
129
119
  }
130
120
 
131
121
 
132
- """
133
- Use base_url to specify
134
- """
135
- BASE_URLS = {
136
- "deepseek": "https://api.deepseek.com"
137
- }
138
-
139
122
 
140
123
  """
141
124
  Max input token size by the model.
142
125
  """
143
126
  LLM_CONTEXT_WINDOW_SIZES = {
144
- "gpt-3.5-turbo": 8192,
145
127
  "gpt-4": 8192,
146
128
  "gpt-4o": 128000,
147
129
  "gpt-4o-mini": 128000,
@@ -160,6 +142,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
160
142
  "claude-3-sonnet-20240229": 200000,
161
143
  "claude-3-opus-20240229": 200000,
162
144
  "claude-3-haiku-20240307": 200000,
145
+ "claude-3-5-sonnet-2024102": 200000,
163
146
 
164
147
  "deepseek-chat": 128000,
165
148
  "deepseek/deepseek-reasoner": 8192,
@@ -177,111 +160,18 @@ LLM_CONTEXT_WINDOW_SIZES = {
177
160
  "llama3-70b-8192": 8192,
178
161
  "llama3-8b-8192": 8192,
179
162
  "mixtral-8x7b-32768": 32768,
180
- "claude-3-5-sonnet-2024102": 200000,
181
- }
182
-
183
-
184
-
185
-
186
- LLM_BASE_URL_KEY_NAMES = {
187
- "openai": "OPENAI_API_BASE",
188
- "gemini": "GEMINI_API_BASE",
189
- "anthropic": "ANTHROPIC_API_BASE",
190
- }
191
-
192
- LLM_VARS = {
193
- "openai": [
194
- {
195
- "prompt": "Enter your OPENAI API key (press Enter to skip)",
196
- "key_name": "OPENAI_API_KEY",
197
- }
198
- ],
199
- "anthropic": [
200
- {
201
- "prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
202
- "key_name": "ANTHROPIC_API_KEY",
203
- }
204
- ],
205
- "gemini": [
206
- {
207
- "prompt": "Enter your GEMINI API key (press Enter to skip)",
208
- "key_name": "GEMINI_API_KEY",
209
- }
210
- ],
211
- "watson": [
212
- {
213
- "prompt": "Enter your WATSONX URL (press Enter to skip)",
214
- "key_name": "WATSONX_URL",
215
- },
216
- {
217
- "prompt": "Enter your WATSONX API Key (press Enter to skip)",
218
- "key_name": "WATSONX_APIKEY",
219
- },
220
- {
221
- "prompt": "Enter your WATSONX Project Id (press Enter to skip)",
222
- "key_name": "WATSONX_PROJECT_ID",
223
- },
224
- ],
225
- "ollama": [
226
- {
227
- "default": True,
228
- "API_BASE": "http://localhost:11434",
229
- }
230
- ],
231
- "bedrock": [
232
- {
233
- "prompt": "Enter your AWS Access Key ID (press Enter to skip)",
234
- "key_name": "AWS_ACCESS_KEY_ID",
235
- },
236
- {
237
- "prompt": "Enter your AWS Secret Access Key (press Enter to skip)",
238
- "key_name": "AWS_SECRET_ACCESS_KEY",
239
- },
240
- {
241
- "prompt": "Enter your AWS Region Name (press Enter to skip)",
242
- "key_name": "AWS_REGION_NAME",
243
- },
244
- ],
245
- "azure": [
246
- {
247
- "prompt": "Enter your Azure deployment name (must start with 'azure/')",
248
- "key_name": "model",
249
- },
250
- {
251
- "prompt": "Enter your AZURE API key (press Enter to skip)",
252
- "key_name": "AZURE_API_KEY",
253
- },
254
- {
255
- "prompt": "Enter your AZURE API base URL (press Enter to skip)",
256
- "key_name": "AZURE_API_BASE",
257
- },
258
- {
259
- "prompt": "Enter your AZURE API version (press Enter to skip)",
260
- "key_name": "AZURE_API_VERSION",
261
- },
262
- ],
263
- "cerebras": [
264
- {
265
- "prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
266
- "key_name": "model",
267
- },
268
- {
269
- "prompt": "Enter your Cerebras API version (press Enter to skip)",
270
- "key_name": "CEREBRAS_API_KEY",
271
- },
272
- ],
273
163
  }
274
164
 
275
165
 
276
166
 
277
167
  """
278
- Params for litellm.completion() func. Address common/unique params to each provider.
168
+ Params for litellm.completion().
279
169
  """
280
170
 
281
171
  PARAMS = {
282
172
  "litellm": [
283
173
  "api_base",
284
- "api_version,"
174
+ "api_version,",
285
175
  "num_retries",
286
176
  "context_window_fallback_dict",
287
177
  "fallbacks",
versionhq/llm/model.py CHANGED
@@ -1,4 +1,3 @@
1
- import logging
2
1
  import json
3
2
  import os
4
3
  import sys
@@ -6,12 +5,11 @@ import threading
6
5
  import warnings
7
6
  from dotenv import load_dotenv
8
7
  import litellm
9
- from litellm import get_supported_openai_params, JSONSchemaValidationError
8
+ from litellm import JSONSchemaValidationError
10
9
  from contextlib import contextmanager
11
10
  from typing import Any, Dict, List, Optional
12
11
  from typing_extensions import Self
13
- from pydantic import BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
14
- from pydantic_core import PydanticCustomError
12
+ from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
15
13
 
16
14
  from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS
17
15
  from versionhq.tool.model import Tool, ToolSet
@@ -25,10 +23,6 @@ DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
25
23
  DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME", "gpt-4o-mini")
26
24
  DEFAULT_MODEL_PROVIDER_NAME = os.environ.get("DEFAULT_MODEL_PROVIDER_NAME", "openai")
27
25
 
28
- # proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
29
- # openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
30
-
31
-
32
26
  class FilteredStream:
33
27
  def __init__(self, original_stream):
34
28
  self._original_stream = original_stream
@@ -53,7 +47,8 @@ class FilteredStream:
53
47
  @contextmanager
54
48
  def suppress_warnings():
55
49
  with warnings.catch_warnings():
56
- warnings.filterwarnings("ignore")
50
+ litellm.set_verbose = False
51
+ warnings.filterwarnings(action="ignore")
57
52
  old_stdout = sys.stdout
58
53
  old_stderr = sys.stderr
59
54
  sys.stdout = FilteredStream(old_stdout)
@@ -83,11 +78,11 @@ class LLM(BaseModel):
83
78
  api_key: Optional[str] = Field(default=None, description="api key to access the model")
84
79
 
85
80
  # optional params
81
+ response_format: Optional[Any] = Field(default=None)
86
82
  timeout: Optional[float | int] = Field(default=None)
87
83
  max_tokens: Optional[int] = Field(default=None)
88
84
  max_completion_tokens: Optional[int] = Field(default=None)
89
85
  context_window_size: Optional[int] = Field(default=DEFAULT_CONTEXT_WINDOW_SIZE)
90
- callbacks: List[Any] = Field(default_factory=list)
91
86
  temperature: Optional[float] = Field(default=None)
92
87
  top_p: Optional[float] = Field(default=None)
93
88
  n: Optional[int] = Field(default=None)
@@ -98,8 +93,8 @@ class LLM(BaseModel):
98
93
  seed: Optional[int] = Field(default=None)
99
94
  logprobs: Optional[bool] = Field(default=None)
100
95
  top_logprobs: Optional[int] = Field(default=None)
101
- response_format: Optional[Any] = Field(default=None)
102
96
  tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
97
+ callbacks: List[Any] = Field(default_factory=list)
103
98
 
104
99
  # LiteLLM specific fields
105
100
  api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
@@ -193,7 +188,7 @@ class LLM(BaseModel):
193
188
  @model_validator(mode="after")
194
189
  def validate_model_params(self) -> Self:
195
190
  """
196
- After setting up a valid model, provider, interface provider, add params to the model.
191
+ Set up valid params to the model after setting up a valid model, provider, interface provider names.
197
192
  """
198
193
  self._tokens = 0
199
194
 
@@ -216,19 +211,28 @@ class LLM(BaseModel):
216
211
  return self
217
212
 
218
213
 
219
- def _create_valid_params(self, config: Dict[str, Any], provider: str = None) -> Dict[str, Any]:
220
- params = dict()
221
- valid_keys = list()
222
- provider = provider if provider else self.provider if self.provider else None
223
- valid_keys = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(provider) if provider and PARAMS.get(provider) else PARAMS.get("litellm") + PARAMS.get("common")
214
+ def _create_valid_params(self, config: Dict[str, Any]) -> Dict[str, Any]:
215
+ """
216
+ Return valid params (model + litellm original params) from the given config dict.
217
+ """
218
+
219
+ valid_params, valid_keys = dict(), list()
220
+
221
+ if self.model:
222
+ valid_keys = litellm.get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
223
+
224
+ if not valid_keys:
225
+ valid_keys = PARAMS.get("common")
226
+
227
+ valid_keys += PARAMS.get("litellm")
224
228
 
225
229
  for item in valid_keys:
226
230
  if hasattr(self, item) and getattr(self, item):
227
- params[item] = getattr(self, item)
228
- elif item in config:
229
- params[item] = config[item]
231
+ valid_params[item] = getattr(self, item)
232
+ elif item in config and config[item]:
233
+ valid_params[item] = config[item]
230
234
 
231
- return params
235
+ return valid_params
232
236
 
233
237
 
234
238
  def call(
@@ -250,21 +254,18 @@ class LLM(BaseModel):
250
254
  self._set_callbacks(self.callbacks) # passed by agent
251
255
 
252
256
  try:
253
- provider = self.provider if self.provider else DEFAULT_MODEL_PROVIDER_NAME
254
257
  self.response_format = { "type": "json_object" } if tool_res_as_final == True else response_format
255
258
 
256
259
  if not tools:
257
260
  params = self._create_valid_params(config=config)
258
- res = litellm.completion(messages=messages, stream=False, **params)
261
+ res = litellm.completion(model=self.model, messages=messages, stream=False, **params)
259
262
  self._tokens += int(res["usage"]["total_tokens"])
260
263
  return res["choices"][0]["message"]["content"]
261
264
 
262
265
  else:
263
266
  self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
264
-
265
- # if provider == "openai":
266
- params = self._create_valid_params(config=config, provider=provider)
267
- res = litellm.completion(messages=messages, model=self.model, tools=self.tools)
267
+ params = self._create_valid_params(config=config)
268
+ res = litellm.completion(model=self.model, messages=messages, **params)
268
269
  tool_calls = res.choices[0].message.tool_calls
269
270
  tool_res = ""
270
271
 
@@ -304,7 +305,7 @@ class LLM(BaseModel):
304
305
  if tool_res_as_final:
305
306
  return tool_res
306
307
  else:
307
- res = litellm.completione(messages=messages, model=self.model, tools=self.tools)
308
+ res = litellm.completion(model=self.model, messages=messages, **params)
308
309
  self._tokens += int(res["usage"]["total_tokens"])
309
310
  return res.choices[0].message.content
310
311
 
@@ -320,20 +321,17 @@ class LLM(BaseModel):
320
321
 
321
322
  def _supports_function_calling(self) -> bool:
322
323
  try:
323
- params = get_supported_openai_params(model=self.model)
324
- return "response_format" in params
324
+ if self.model:
325
+ params = litellm.get_supported_openai_params(model=self.model)
326
+ return "response_format" in params if params else False
325
327
  except Exception as e:
326
- self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
328
+ self._logger.log(level="warning", message=f"Failed to get supported params: {str(e)}", color="yellow")
327
329
  return False
328
330
 
329
331
 
330
332
  def _supports_stop_words(self) -> bool:
331
- try:
332
- params = get_supported_openai_params(model=self.model)
333
- return "stop" in params
334
- except Exception as e:
335
- self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
336
- return False
333
+ supported_params = litellm.get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider)
334
+ return "stop" in supported_params if supported_params else False
337
335
 
338
336
 
339
337
  def _get_context_window_size(self) -> int: