versionhq 1.1.11.6__tar.gz → 1.1.11.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/.github/workflows/run_tests.yml +0 -2
  2. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/.gitignore +2 -0
  3. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/PKG-INFO +3 -1
  4. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/README.md +2 -0
  5. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/pyproject.toml +1 -1
  6. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/__init__.py +1 -1
  7. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/agent/model.py +78 -88
  8. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/knowledge/source_docling.py +14 -13
  9. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/llm/llm_vars.py +91 -58
  10. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/llm/model.py +130 -104
  11. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/task/model.py +4 -1
  12. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/task/structured_response.py +4 -3
  13. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq.egg-info/PKG-INFO +3 -1
  14. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/agent/agent_test.py +7 -3
  15. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/llm/llm_test.py +8 -19
  16. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/task/__init__.py +0 -8
  17. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/task/task_test.py +7 -7
  18. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/uv.lock +36 -24
  19. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/.github/workflows/publish.yml +0 -0
  20. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/.github/workflows/publish_testpypi.yml +0 -0
  21. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/.github/workflows/security_check.yml +0 -0
  22. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/.pre-commit-config.yaml +0 -0
  23. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/.python-version +0 -0
  24. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/LICENSE +0 -0
  25. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/SECURITY.md +0 -0
  26. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/db/preprocess.py +0 -0
  27. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/requirements-dev.txt +0 -0
  28. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/requirements.txt +0 -0
  29. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/runtime.txt +0 -0
  30. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/setup.cfg +0 -0
  31. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/_utils/__init__.py +0 -0
  32. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/_utils/i18n.py +0 -0
  33. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/_utils/logger.py +0 -0
  34. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/_utils/process_config.py +0 -0
  35. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/_utils/usage_metrics.py +0 -0
  36. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/_utils/vars.py +0 -0
  37. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  38. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  39. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/agent/__init__.py +0 -0
  40. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/agent/default_agents.py +0 -0
  41. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/agent/parser.py +0 -0
  42. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/agent/rpm_controller.py +0 -0
  43. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/cli/__init__.py +0 -0
  44. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/clients/__init__.py +0 -0
  45. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/clients/customer/__init__.py +0 -0
  46. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/clients/customer/model.py +0 -0
  47. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/clients/product/__init__.py +0 -0
  48. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/clients/product/model.py +0 -0
  49. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/clients/workflow/__init__.py +0 -0
  50. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/clients/workflow/model.py +0 -0
  51. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/knowledge/__init__.py +0 -0
  52. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/knowledge/_utils.py +0 -0
  53. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/knowledge/embedding.py +0 -0
  54. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/knowledge/model.py +0 -0
  55. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/knowledge/source.py +0 -0
  56. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/knowledge/storage.py +0 -0
  57. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/llm/__init__.py +0 -0
  58. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/memory/__init__.py +0 -0
  59. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/memory/contextual_memory.py +0 -0
  60. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/memory/model.py +0 -0
  61. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/storage/__init__.py +0 -0
  62. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/storage/base.py +0 -0
  63. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
  64. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/storage/mem0_storage.py +0 -0
  65. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/storage/rag_storage.py +0 -0
  66. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/storage/task_output_storage.py +0 -0
  67. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/storage/utils.py +0 -0
  68. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/task/TEMPLATES/Description.py +0 -0
  69. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/task/__init__.py +0 -0
  70. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/task/evaluate.py +0 -0
  71. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/task/formatter.py +0 -0
  72. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/task/log_handler.py +0 -0
  73. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/team/__init__.py +0 -0
  74. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/team/model.py +0 -0
  75. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/team/team_planner.py +0 -0
  76. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/tool/__init__.py +0 -0
  77. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/tool/cache_handler.py +0 -0
  78. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/tool/composio_tool.py +0 -0
  79. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/tool/composio_tool_vars.py +0 -0
  80. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/tool/decorator.py +0 -0
  81. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/tool/model.py +0 -0
  82. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq/tool/tool_handler.py +0 -0
  83. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq.egg-info/SOURCES.txt +0 -0
  84. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq.egg-info/dependency_links.txt +0 -0
  85. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq.egg-info/requires.txt +0 -0
  86. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/src/versionhq.egg-info/top_level.txt +0 -0
  87. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/__init__.py +0 -0
  88. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/agent/__init__.py +0 -0
  89. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/cli/__init__.py +0 -0
  90. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/clients/customer_test.py +0 -0
  91. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/clients/product_test.py +0 -0
  92. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/clients/workflow_test.py +0 -0
  93. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/conftest.py +0 -0
  94. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/knowledge/__init__.py +0 -0
  95. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/knowledge/knowledge_test.py +0 -0
  96. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/knowledge/mock_report_compressed.pdf +0 -0
  97. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/llm/__init__.py +0 -0
  98. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/memory/__init__.py +0 -0
  99. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/memory/memory_test.py +0 -0
  100. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/team/Prompts/Demo_test.py +0 -0
  101. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/team/__init__.py +0 -0
  102. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/team/team_test.py +0 -0
  103. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/tool/__init__.py +0 -0
  104. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/tool/composio_test.py +0 -0
  105. {versionhq-1.1.11.6 → versionhq-1.1.11.8}/tests/tool/tool_test.py +0 -0
@@ -8,13 +8,11 @@ permissions:
8
8
  env:
9
9
  LITELLM_API_KEY: ${{ secrets.LITELLM_API_KEY }}
10
10
  OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
11
- OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
12
11
  DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
13
12
  DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
14
13
  COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
15
14
  DEFAULT_MODEL_NAME: ${{ secrets.DEFAULT_MODEL_NAME }}
16
15
  GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
17
- GEMINI_API_BASE: ${{ secrets.GEMINI_API_BASE }}
18
16
  ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
19
17
  ANTHROPIC_API_BASE: ${{ secrets.ANTHROPIC_API_BASE }}
20
18
  MEM0_API_KEY: ${{ secrets.MEM0_API_KEY }}
@@ -3,6 +3,8 @@ destinations.py
3
3
 
4
4
  entity_memory.py
5
5
 
6
+ llm_connection_test.py
7
+
6
8
  train.py
7
9
 
8
10
  dist/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.11.6
3
+ Version: 1.1.11.8
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -319,8 +319,10 @@ src/
319
319
  pyenv install 3.12.8
320
320
  pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
321
321
  uv python pin 3.12.8
322
+ echo 3.12.8 > .python-version
322
323
  ```
323
324
 
325
+
324
326
  3. Set up environment variables:
325
327
  Create a `.env` file in the project root and add the following:
326
328
  ```
@@ -243,8 +243,10 @@ src/
243
243
  pyenv install 3.12.8
244
244
  pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
245
245
  uv python pin 3.12.8
246
+ echo 3.12.8 > .python-version
246
247
  ```
247
248
 
249
+
248
250
  3. Set up environment variables:
249
251
  Create a `.env` file in the project root and add the following:
250
252
  ```
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.1.11.6"
18
+ version = "1.1.11.8"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
21
21
  readme = "README.md"
@@ -17,7 +17,7 @@ from versionhq.tool.model import Tool
17
17
  from versionhq.tool.composio_tool import ComposioHandler
18
18
 
19
19
 
20
- __version__ = "1.1.11.6"
20
+ __version__ = "1.1.11.8"
21
21
  __all__ = [
22
22
  "Agent",
23
23
  "Customer",
@@ -6,10 +6,10 @@ from typing_extensions import Self
6
6
  from dotenv import load_dotenv
7
7
  import litellm
8
8
 
9
- from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator, ConfigDict
9
+ from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
10
10
  from pydantic_core import PydanticCustomError
11
11
 
12
- from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
12
+ from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME, PROVIDERS
13
13
  from versionhq.tool.model import Tool, ToolSet
14
14
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
15
15
  from versionhq.memory.contextual_memory import ContextualMemory
@@ -99,7 +99,7 @@ class Agent(BaseModel):
99
99
  tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool] | Any]] = Field(default_factory=list)
100
100
 
101
101
  # knowledge
102
- knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(default=None)
102
+ knowledge_sources: Optional[List[BaseKnowledgeSource | Any]] = Field(default=None)
103
103
  _knowledge: Optional[Knowledge] = PrivateAttr(default=None)
104
104
 
105
105
  # memory
@@ -162,90 +162,44 @@ class Agent(BaseModel):
162
162
  @model_validator(mode="after")
163
163
  def set_up_llm(self) -> Self:
164
164
  """
165
- Set up the base model and function calling model (if any) using the LLM class.
166
- Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `callbacks`,`respect_context_window` to the LLM class.
167
- The base model is selected on the client app, else use the default model.
165
+ Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
168
166
  """
169
-
170
167
  self.agent_ops_agent_name = self.role
168
+ self.llm = self._set_llm(llm=self.llm)
169
+ function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
170
+ self.function_calling_llm = self._set_llm(llm=function_calling_llm)
171
+ return self
171
172
 
172
- if isinstance(self.llm, LLM):
173
- llm = self._set_llm_params(self.llm)
174
- self.llm = llm
175
173
 
176
- elif isinstance(self.llm, str) or self.llm is None:
177
- model_name = self.llm if self.llm is not None else DEFAULT_MODEL_NAME
178
- llm = LLM(model=model_name)
179
- updated_llm = self._set_llm_params(llm)
180
- self.llm = updated_llm
174
+ def _set_llm(self, llm: Any | None) -> LLM:
175
+ llm = llm if llm is not None else DEFAULT_MODEL_NAME
181
176
 
182
- else:
183
- if isinstance(self.llm, dict):
184
- model_name = self.llm.pop("model_name", self.llm.pop("deployment_name", str(self.llm)))
185
- llm = LLM(model=model_name if model_name is not None else DEFAULT_MODEL_NAME)
186
- updated_llm = self._set_llm_params(llm, { k: v for k, v in self.llm.items() if v is not None })
187
- self.llm = updated_llm
177
+ match llm:
178
+ case LLM():
179
+ return self._set_llm_params(llm=llm)
188
180
 
189
- else:
181
+ case str():
182
+ llm_obj = LLM(model=llm)
183
+ return self._set_llm_params(llm=llm_obj)
184
+
185
+ case dict():
186
+ model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
187
+ llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
188
+ return self._set_llm_params(llm_obj, { k: v for k, v in llm.items() if v is not None })
189
+
190
+ case _:
190
191
  model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
191
- llm = LLM(model=model_name)
192
+ llm_obj = LLM(model=model_name)
192
193
  llm_params = {
193
- "max_tokens": (getattr(self.llm, "max_tokens") or self.max_tokens or 3000),
194
- "timeout": getattr(self.llm, "timeout", self.max_execution_time),
195
- "callbacks": getattr(self.llm, "callbacks", None),
196
- "temperature": getattr(self.llm, "temperature", None),
197
- "logprobs": getattr(self.llm, "logprobs", None),
198
- "api_key": getattr(self.llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
199
- "base_url": getattr(self.llm, "base_url", None),
194
+ "max_tokens": (getattr(llm, "max_tokens") or self.max_tokens or 3000),
195
+ "timeout": getattr(llm, "timeout", self.max_execution_time),
196
+ "callbacks": getattr(llm, "callbacks", None),
197
+ "temperature": getattr(llm, "temperature", None),
198
+ "logprobs": getattr(llm, "logprobs", None),
199
+ "api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
200
+ "base_url": getattr(llm, "base_url", None),
200
201
  }
201
- updated_llm = self._set_llm_params(llm, llm_params)
202
- self.llm = updated_llm
203
-
204
-
205
- """
206
- Set up funcion_calling LLM as well.
207
- Check if the model supports function calling, setup LLM instance accordingly, using the same params with the LLM.
208
- """
209
- if self.function_calling_llm:
210
- if isinstance(self.function_calling_llm, LLM):
211
- if self.function_calling_llm._supports_function_calling() == False:
212
- self.function_calling_llm = LLM(model=DEFAULT_MODEL_NAME)
213
-
214
- updated_llm = self._set_llm_params(self.function_calling_llm)
215
- self.function_calling_llm = updated_llm
216
-
217
- elif isinstance(self.function_calling_llm, str):
218
- llm = LLM(model=self.function_calling_llm)
219
-
220
- if llm._supports_function_calling() == False:
221
- llm = LLM(model=DEFAULT_MODEL_NAME)
222
-
223
- updated_llm = self._set_llm_params(llm)
224
- self.function_calling_llm = updated_llm
225
-
226
- else:
227
- if isinstance(self.function_calling_llm, dict):
228
- model_name = self.function_calling_llm.pop("model_name", self.function_calling_llm.pop("deployment_name", str(self.function_calling_llm)))
229
- llm = LLM(model=model_name)
230
- updated_llm = self._set_llm_params(llm, { k: v for k, v in self.function_calling_llm.items() if v is not None })
231
- self.function_calling_llm = updated_llm
232
-
233
- else:
234
- model_name = (getattr(self.function_calling_llm, "model_name") or getattr(self.function_calling_llm, "deployment_name") or str(self.function_calling_llm))
235
- llm = LLM(model=model_name)
236
- llm_params = {
237
- "max_tokens": (getattr(self.function_calling_llm, "max_tokens") or self.max_tokens or 3000),
238
- "timeout": getattr(self.function_calling_llm, "timeout", self.max_execution_time),
239
- "callbacks": getattr(self.function_calling_llm, "callbacks", None),
240
- "temperature": getattr(self.function_calling_llm, "temperature", None),
241
- "logprobs": getattr(self.function_calling_llm, "logprobs", None),
242
- "api_key": getattr(self.function_calling_llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
243
- "base_url": getattr(self.function_calling_llm, "base_url", None),
244
- }
245
- updated_llm = self._set_llm_params(llm, llm_params)
246
- self.function_calling_llm = updated_llm
247
-
248
- return self
202
+ return self._set_llm_params(llm=llm_obj, config=llm_params)
249
203
 
250
204
 
251
205
  def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
@@ -257,6 +211,11 @@ class Agent(BaseModel):
257
211
  llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
258
212
  llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
259
213
 
214
+ if llm.provider is None:
215
+ provider_name = llm.model.split("/")[0]
216
+ valid_provider = provider_name if provider_name in PROVIDERS else None
217
+ llm.provider = valid_provider
218
+
260
219
  if self.callbacks:
261
220
  llm.callbacks = self.callbacks
262
221
  llm._set_callbacks(llm.callbacks)
@@ -344,14 +303,46 @@ class Agent(BaseModel):
344
303
 
345
304
  @model_validator(mode="after")
346
305
  def set_up_knowledge(self) -> Self:
347
- if self.knowledge_sources:
348
- collection_name = f"{self.role.replace(' ', '_')}"
306
+ from versionhq.knowledge.source import BaseKnowledgeSource, StringKnowledgeSource, TextFileKnowledgeSource, CSVKnowledgeSource, ExcelKnowledgeSource, JSONKnowledgeSource
307
+ from versionhq.knowledge.source_docling import DoclingSource
349
308
 
350
- self._knowledge = Knowledge(
351
- sources=self.knowledge_sources,
352
- embedder_config=self.embedder_config,
353
- collection_name=collection_name,
354
- )
309
+ if self.knowledge_sources:
310
+ try:
311
+ collection_name = f"{self.role.replace(' ', '_')}"
312
+ knowledge_sources = []
313
+ docling_fp, txt_fp, json_fp, excel_fp, csv_fp, pdf_fp = [], [], [], [], [], []
314
+ str_cont = ""
315
+
316
+ for item in self.knowledge_sources:
317
+ if isinstance(item, BaseKnowledgeSource):
318
+ knowledge_sources.append(item)
319
+
320
+ elif isinstance(item, str) and "http" in item:
321
+ docling_fp.append(item)
322
+
323
+ elif isinstance(item, str):
324
+ match os.path.splitext(item)[1]:
325
+ case ".txt": txt_fp.append(item)
326
+ case ".json": json_fp.append(item)
327
+ case ".xls" | ".xlsx": excel_fp.append(item)
328
+ case ".pdf": pdf_fp.append(item)
329
+ case ".csv": csv_fp.append(item)
330
+ case _: str_cont += str(item)
331
+
332
+ else:
333
+ str_cont += str(item)
334
+
335
+ if docling_fp: knowledge_sources.append(DoclingSource(file_paths=docling_fp))
336
+ if str_cont: knowledge_sources.append(StringKnowledgeSource(content=str_cont))
337
+ if txt_fp: knowledge_sources.append(TextFileKnowledgeSource(file_paths=txt_fp))
338
+ if csv_fp: knowledge_sources.append(CSVKnowledgeSource(file_path=csv_fp))
339
+ if excel_fp: knowledge_sources.append(ExcelKnowledgeSource(file_path=excel_fp))
340
+ if json_fp: knowledge_sources.append(JSONKnowledgeSource(file_paths=json_fp))
341
+
342
+ self._knowledge = Knowledge(sources=knowledge_sources, embedder_config=self.embedder_config, collection_name=collection_name)
343
+
344
+ except:
345
+ self._logger.log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
355
346
 
356
347
  return self
357
348
 
@@ -414,7 +405,7 @@ class Agent(BaseModel):
414
405
  self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
415
406
 
416
407
  if tool_res_as_final:
417
- func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
408
+ func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else self.llm if self.llm and self.llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
418
409
  raw_response = func_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
419
410
  task.tokens = func_llm._tokens
420
411
  else:
@@ -458,7 +449,7 @@ class Agent(BaseModel):
458
449
  from versionhq.knowledge._utils import extract_knowledge_context
459
450
 
460
451
  task: InstanceOf[Task] = task
461
- tools: Optional[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
452
+ tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
462
453
 
463
454
  if self.max_rpm and self._rpm_controller:
464
455
  self._rpm_controller._reset_request_count()
@@ -474,7 +465,6 @@ class Agent(BaseModel):
474
465
  if agent_knowledge_context:
475
466
  task_prompt += agent_knowledge_context
476
467
 
477
-
478
468
  if self.use_memory == True:
479
469
  contextual_memory = ContextualMemory(
480
470
  memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
@@ -3,7 +3,6 @@ from typing import Iterator, List, Optional
3
3
  from urllib.parse import urlparse
4
4
 
5
5
  try:
6
- import docling
7
6
  from docling.datamodel.base_models import InputFormat
8
7
  from docling.document_converter import DocumentConverter
9
8
  from docling.exceptions import ConversionError
@@ -12,19 +11,12 @@ try:
12
11
  DOCLING_AVAILABLE = True
13
12
  except ImportError:
14
13
  import envoy
15
- r = envoy.run("uv add docling --optional docling")
16
-
17
- import docling
18
- from docling.datamodel.base_models import InputFormat
19
- from docling.document_converter import DocumentConverter
20
- from docling.exceptions import ConversionError
21
- from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
22
- from docling_core.types.doc.document import DoclingDocument
14
+ envoy.run("uv add docling --optional docling")
23
15
  DOCLING_AVAILABLE = True
24
16
  except:
25
17
  DOCLING_AVAILABLE = False
26
18
 
27
- from pydantic import Field, InstanceOf
19
+ from pydantic import Field
28
20
 
29
21
  from versionhq.knowledge.source import BaseKnowledgeSource
30
22
  from versionhq.storage.utils import fetch_db_storage_path
@@ -54,11 +46,20 @@ class DoclingSource(BaseKnowledgeSource):
54
46
  ))
55
47
 
56
48
  def __init__(self, *args, **kwargs):
57
- if not DOCLING_AVAILABLE:
58
- raise ImportError("The docling package is required. Please install the package using: $ uv add docling.")
59
- else:
49
+ if DOCLING_AVAILABLE:
50
+ from docling.datamodel.base_models import InputFormat
51
+ from docling.document_converter import DocumentConverter
52
+ from docling.exceptions import ConversionError
53
+ from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
54
+ from docling_core.types.doc.document import DoclingDocument
55
+
60
56
  super().__init__(*args, **kwargs)
61
57
 
58
+ else:
59
+ raise ImportError("The docling package is required. Please install the package using: $ uv add docling.")
60
+ # else:
61
+ # super().__init__(*args, **kwargs)
62
+
62
63
 
63
64
  def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
64
65
  conv_results_iter = self.document_converter.convert_all(self.valid_file_paths)
@@ -3,6 +3,33 @@ from typing import Type
3
3
 
4
4
  JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
5
5
 
6
+ PROVIDERS = [
7
+ "openai",
8
+ "gemini",
9
+ "sagemaker",
10
+ "huggingface", # need api base
11
+ "anthropic",
12
+ "ollama",
13
+ "watson",
14
+ "bedrock",
15
+ "azure",
16
+ "cerebras",
17
+ "llama",
18
+ ]
19
+
20
+ ENDPOINT_PROVIDERS = [
21
+ # "openai",
22
+ # "gemini",
23
+ # "sagemaker",
24
+ "huggingface",
25
+ # "anthropic",
26
+ # "ollama",
27
+ # "watson",
28
+ # "bedrock",
29
+ # "azure",
30
+ # "cerebras",
31
+ # "llama",
32
+ ]
6
33
 
7
34
  """
8
35
  List of models available on the framework.
@@ -16,7 +43,6 @@ litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=
16
43
 
17
44
  MODELS = {
18
45
  "openai": [
19
- # "gpt-3.5-turbo",
20
46
  "gpt-4",
21
47
  "gpt-4o",
22
48
  "gpt-4o-mini",
@@ -27,11 +53,7 @@ MODELS = {
27
53
  "gemini/gemini-1.5-flash",
28
54
  "gemini/gemini-1.5-pro",
29
55
  "gemini/gemini-2.0-flash-exp",
30
- # "gemini/gemini-gemma-2-9b-it",
31
- # "gemini/gemini-gemma-2-27b-it",
32
56
  ],
33
- # "vetrex_ai": [
34
- # ],
35
57
  "anthropic": [
36
58
  "claude-3-5-sonnet-20241022",
37
59
  "claude-3-5-sonnet-20240620",
@@ -39,10 +61,28 @@ MODELS = {
39
61
  "claude-3-opus-20240229",
40
62
  "claude-3-haiku-20240307",
41
63
  ],
42
- # "ollama": [
43
- # "ollama/llama3.1",
44
- # "ollama/mixtral",
45
- # ],
64
+ "huggingface": [
65
+ "huggingface/qwen/qwen2.5-VL-72B-Instruct",
66
+ ],
67
+ # "sagemaker": [
68
+ # "sagemaker/huggingface-text2text-flan-t5-base",
69
+ # "sagemaker/huggingface-llm-gemma-7b",
70
+ # "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b",
71
+ # "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b",
72
+ # "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-8b",
73
+ # "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-70b",
74
+ # "sagemaker/huggingface-llm-mistral-7b"
75
+ # ], #https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-latest.html
76
+ "ollama": [
77
+ "ollama/llama3.1",
78
+ "ollama/mixtral",
79
+ "ollama/mixtral-8x22B-Instruct-v0.1",
80
+ ],
81
+ "deepseek": [
82
+ "deepseek/deepseek-reasoner",
83
+
84
+ ],
85
+
46
86
  # "watson": [
47
87
  # "watsonx/meta-llama/llama-3-1-70b-instruct",
48
88
  # "watsonx/meta-llama/llama-3-1-8b-instruct",
@@ -53,44 +93,48 @@ MODELS = {
53
93
  # "watsonx/mistral/mistral-large",
54
94
  # "watsonx/ibm/granite-3-8b-instruct",
55
95
  # ],
56
- # "bedrock": [
57
- # "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
58
- # "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
59
- # "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
60
- # "bedrock/anthropic.claude-3-opus-20240229-v1:0",
61
- # "bedrock/anthropic.claude-v2:1",
62
- # "bedrock/anthropic.claude-v2",
63
- # "bedrock/anthropic.claude-instant-v1",
64
- # "bedrock/meta.llama3-1-405b-instruct-v1:0",
65
- # "bedrock/meta.llama3-1-70b-instruct-v1:0",
66
- # "bedrock/meta.llama3-1-8b-instruct-v1:0",
67
- # "bedrock/meta.llama3-70b-instruct-v1:0",
68
- # "bedrock/meta.llama3-8b-instruct-v1:0",
69
- # "bedrock/amazon.titan-text-lite-v1",
70
- # "bedrock/amazon.titan-text-express-v1",
71
- # "bedrock/cohere.command-text-v14",
72
- # "bedrock/ai21.j2-mid-v1",
73
- # "bedrock/ai21.j2-ultra-v1",
74
- # "bedrock/ai21.jamba-instruct-v1:0",
75
- # "bedrock/meta.llama2-13b-chat-v1",
76
- # "bedrock/meta.llama2-70b-chat-v1",
77
- # "bedrock/mistral.mistral-7b-instruct-v0:2",
78
- # "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
79
- # ],
96
+ "bedrock": [
97
+ "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
98
+ "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
99
+ "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
100
+ "bedrock/anthropic.claude-3-opus-20240229-v1:0",
101
+ # "bedrock/anthropic.claude-v2:1",
102
+ "bedrock/anthropic.claude-v2",
103
+ "bedrock/anthropic.claude-instant-v1",
104
+ "bedrock/meta.llama3-1-405b-instruct-v1:0",
105
+ "bedrock/meta.llama3-1-70b-instruct-v1:0",
106
+ "bedrock/meta.llama3-1-8b-instruct-v1:0",
107
+ "bedrock/meta.llama3-70b-instruct-v1:0",
108
+ "bedrock/meta.llama3-8b-instruct-v1:0",
109
+ "bedrock/amazon.titan-text-lite-v1",
110
+ "bedrock/amazon.titan-text-express-v1",
111
+ "bedrock/cohere.command-text-v14",
112
+ "bedrock/ai21.j2-mid-v1",
113
+ "bedrock/ai21.j2-ultra-v1",
114
+ "bedrock/ai21.jamba-instruct-v1:0",
115
+ "bedrock/meta.llama2-13b-chat-v1",
116
+ "bedrock/meta.llama2-70b-chat-v1",
117
+ "bedrock/mistral.mistral-7b-instruct-v0:2",
118
+ "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
119
+ ],
80
120
  }
81
121
 
82
122
 
83
- PROVIDERS = [
84
- "openai",
85
- "anthropic",
86
- "gemini",
87
- "ollama",
88
- "watson",
89
- "bedrock",
90
- "azure",
91
- "cerebras",
92
- "llama",
93
- ]
123
+
124
+ KEYS = {
125
+ "openai": ["OPENAI_API_KEY"],
126
+ "gemini": ["GEMINI_API_KEY"],
127
+ "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
128
+ "anthropic": ["ANTHROPIC_API_KEY"],
129
+ }
130
+
131
+
132
+ """
133
+ Use base_url to specify
134
+ """
135
+ BASE_URLS = {
136
+ "deepseek": "https://api.deepseek.com"
137
+ }
94
138
 
95
139
 
96
140
  """
@@ -118,6 +162,8 @@ LLM_CONTEXT_WINDOW_SIZES = {
118
162
  "claude-3-haiku-20240307": 200000,
119
163
 
120
164
  "deepseek-chat": 128000,
165
+ "deepseek/deepseek-reasoner": 8192,
166
+
121
167
  "gemma2-9b-it": 8192,
122
168
  "gemma-7b-it": 8192,
123
169
  "llama3-groq-70b-8192-tool-use-preview": 8192,
@@ -135,11 +181,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
135
181
  }
136
182
 
137
183
 
138
- LLM_API_KEY_NAMES = {
139
- "openai": "OPENAI_API_KEY",
140
- "anthropic": "ANTHROPIC_API_KEY",
141
- "gemini": "GEMINI_API_KEY",
142
- }
184
+
143
185
 
144
186
  LLM_BASE_URL_KEY_NAMES = {
145
187
  "openai": "OPENAI_API_BASE",
@@ -262,14 +304,8 @@ PARAMS = {
262
304
  ],
263
305
  "openai": [
264
306
  "timeout",
265
- # "temperature",
266
- # "top_p",
267
- # "n",
268
- # "stream",
269
307
  "stream_options",
270
- # "stop",
271
308
  "max_compl,etion_tokens",
272
- # "max_tokens",
273
309
  "modalities",
274
310
  "prediction",
275
311
  "audio",
@@ -277,10 +313,7 @@ PARAMS = {
277
313
  "frequency_penalty",
278
314
  "logit_bias",
279
315
  "user",
280
- # "response_format",
281
316
  "seed",
282
- # "tools",
283
- # "tool_choice",
284
317
  "logprobs",
285
318
  "top_logprobs",
286
319
  "parallel_tool_calls",