versionhq 1.1.11.8__tar.gz → 1.1.12.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/.github/workflows/run_tests.yml +6 -5
  2. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/.gitignore +0 -2
  3. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/LICENSE +1 -1
  4. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/PKG-INFO +10 -9
  5. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/README.md +8 -7
  6. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/pyproject.toml +1 -1
  7. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/__init__.py +5 -8
  8. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/_utils/logger.py +6 -1
  9. versionhq-1.1.12.1/src/versionhq/agent/inhouse_agents.py +31 -0
  10. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/agent/model.py +9 -5
  11. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/llm/llm_vars.py +14 -124
  12. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/llm/model.py +35 -37
  13. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/task/evaluate.py +4 -6
  14. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/task/model.py +37 -32
  15. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/team/model.py +18 -18
  16. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq.egg-info/PKG-INFO +10 -9
  17. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq.egg-info/SOURCES.txt +2 -1
  18. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/task/__init__.py +19 -5
  19. versionhq-1.1.12.1/tests/task/llm_connection_test.py +69 -0
  20. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/task/task_test.py +3 -5
  21. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/uv.lock +13 -13
  22. versionhq-1.1.11.8/src/versionhq/agent/default_agents.py +0 -15
  23. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/.github/workflows/publish.yml +0 -0
  24. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/.github/workflows/publish_testpypi.yml +0 -0
  25. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/.github/workflows/security_check.yml +0 -0
  26. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/.pre-commit-config.yaml +0 -0
  27. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/.python-version +0 -0
  28. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/SECURITY.md +0 -0
  29. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/db/preprocess.py +0 -0
  30. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/requirements-dev.txt +0 -0
  31. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/requirements.txt +0 -0
  32. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/runtime.txt +0 -0
  33. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/setup.cfg +0 -0
  34. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/_utils/__init__.py +0 -0
  35. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/_utils/i18n.py +0 -0
  36. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/_utils/process_config.py +0 -0
  37. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/_utils/usage_metrics.py +0 -0
  38. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/_utils/vars.py +0 -0
  39. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  40. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  41. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/agent/__init__.py +0 -0
  42. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/agent/parser.py +0 -0
  43. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/agent/rpm_controller.py +0 -0
  44. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/cli/__init__.py +0 -0
  45. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/clients/__init__.py +0 -0
  46. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/clients/customer/__init__.py +0 -0
  47. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/clients/customer/model.py +0 -0
  48. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/clients/product/__init__.py +0 -0
  49. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/clients/product/model.py +0 -0
  50. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/clients/workflow/__init__.py +0 -0
  51. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/clients/workflow/model.py +0 -0
  52. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/knowledge/__init__.py +0 -0
  53. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/knowledge/_utils.py +0 -0
  54. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/knowledge/embedding.py +0 -0
  55. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/knowledge/model.py +0 -0
  56. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/knowledge/source.py +0 -0
  57. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/knowledge/source_docling.py +0 -0
  58. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/knowledge/storage.py +0 -0
  59. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/llm/__init__.py +0 -0
  60. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/memory/__init__.py +0 -0
  61. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/memory/contextual_memory.py +0 -0
  62. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/memory/model.py +0 -0
  63. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/storage/__init__.py +0 -0
  64. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/storage/base.py +0 -0
  65. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
  66. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/storage/mem0_storage.py +0 -0
  67. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/storage/rag_storage.py +0 -0
  68. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/storage/task_output_storage.py +0 -0
  69. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/storage/utils.py +0 -0
  70. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/task/TEMPLATES/Description.py +0 -0
  71. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/task/__init__.py +0 -0
  72. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/task/formatter.py +0 -0
  73. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/task/log_handler.py +0 -0
  74. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/task/structured_response.py +0 -0
  75. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/team/__init__.py +0 -0
  76. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/team/team_planner.py +0 -0
  77. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/tool/__init__.py +0 -0
  78. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/tool/cache_handler.py +0 -0
  79. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/tool/composio_tool.py +0 -0
  80. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/tool/composio_tool_vars.py +0 -0
  81. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/tool/decorator.py +0 -0
  82. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/tool/model.py +0 -0
  83. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq/tool/tool_handler.py +0 -0
  84. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq.egg-info/dependency_links.txt +0 -0
  85. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq.egg-info/requires.txt +0 -0
  86. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/src/versionhq.egg-info/top_level.txt +0 -0
  87. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/__init__.py +0 -0
  88. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/agent/__init__.py +0 -0
  89. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/agent/agent_test.py +0 -0
  90. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/cli/__init__.py +0 -0
  91. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/clients/customer_test.py +0 -0
  92. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/clients/product_test.py +0 -0
  93. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/clients/workflow_test.py +0 -0
  94. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/conftest.py +0 -0
  95. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/knowledge/__init__.py +0 -0
  96. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/knowledge/knowledge_test.py +0 -0
  97. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/knowledge/mock_report_compressed.pdf +0 -0
  98. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/llm/__init__.py +0 -0
  99. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/llm/llm_test.py +0 -0
  100. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/memory/__init__.py +0 -0
  101. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/memory/memory_test.py +0 -0
  102. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/team/Prompts/Demo_test.py +0 -0
  103. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/team/__init__.py +0 -0
  104. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/team/team_test.py +0 -0
  105. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/tool/__init__.py +0 -0
  106. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/tool/composio_test.py +0 -0
  107. {versionhq-1.1.11.8 → versionhq-1.1.12.1}/tests/tool/tool_test.py +0 -0
@@ -6,15 +6,16 @@ permissions:
6
6
  contents: write
7
7
 
8
8
  env:
9
+ DEFAULT_MODEL_NAME: ${{ secrets.DEFAULT_MODEL_NAME }}
10
+ DEFAULT_MODEL_PROVIDER_NAME: ${{ secrets.DEFAULT_MODEL_PROVIDER_NAME }}
9
11
  LITELLM_API_KEY: ${{ secrets.LITELLM_API_KEY }}
10
12
  OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
11
- DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
12
- DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
13
- COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
14
- DEFAULT_MODEL_NAME: ${{ secrets.DEFAULT_MODEL_NAME }}
15
13
  GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
16
14
  ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
17
- ANTHROPIC_API_BASE: ${{ secrets.ANTHROPIC_API_BASE }}
15
+ OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
16
+ COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
17
+ DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
18
+ DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
18
19
  MEM0_API_KEY: ${{ secrets.MEM0_API_KEY }}
19
20
 
20
21
  jobs:
@@ -3,8 +3,6 @@ destinations.py
3
3
 
4
4
  entity_memory.py
5
5
 
6
- llm_connection_test.py
7
-
8
6
  train.py
9
7
 
10
8
  dist/
@@ -1,6 +1,6 @@
1
1
  MIT License
2
2
 
3
- Copyright (c) 2024 Version IO Sdn. Bhd.
3
+ Copyright (c) 2024-2025 Version IO Sdn. Bhd.
4
4
 
5
5
  Permission is hereby granted, free of charge, to any person obtaining a copy
6
6
  of this software and associated documentation files (the "Software"), to deal
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.11.8
3
+ Version: 1.1.12.1
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
7
7
 
8
- Copyright (c) 2024 Version IO Sdn. Bhd.
8
+ Copyright (c) 2024-2025 Version IO Sdn. Bhd.
9
9
 
10
10
  Permission is hereby granted, free of charge, to any person obtaining a copy
11
11
  of this software and associated documentation files (the "Software"), to deal
@@ -78,12 +78,12 @@ Requires-Dist: numpy>=1.26.4; extra == "numpy"
78
78
 
79
79
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
80
80
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
81
- ![PyPI](https://img.shields.io/badge/PyPI->=v1.1.11.4-blue)
82
- ![python ver](https://img.shields.io/badge/Python->=3.12-purple)
81
+ ![PyPI](https://img.shields.io/badge/PyPI-v1.1.12+-blue)
82
+ ![python ver](https://img.shields.io/badge/Python-3.11+-purple)
83
83
  ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
84
84
 
85
85
 
86
- LLM orchestration frameworks to deploy multi-agent systems with task-based formation.
86
+ LLM orchestration frameworks to deploy multi-agent systems and automate complex tasks with network formations.
87
87
 
88
88
  **Visit:**
89
89
 
@@ -122,15 +122,16 @@ LLM orchestration frameworks to deploy multi-agent systems with task-based forma
122
122
 
123
123
  ## Key Features
124
124
 
125
- Generate mulit-agent systems depending on the complexity of the task, and execute the task with agents of choice.
125
+ Generate multi-agent systems based on the task complexity, execute tasks, and evaluate output based on the given criteria.
126
126
 
127
- Model-agnostic agents can handle RAG tools, tools, callbacks, and knowledge sharing among other agents.
127
+ Agents are model-agnostic, and can handle and share RAG tools, knowledge, memory, and callbacks among other agents. (self-learn)
128
128
 
129
129
 
130
130
  ### Agent formation
131
- Depending on the task complexity, agents can make a different formation.
132
131
 
133
- You can specify which formation you want them to generate, or let the agent decide if you don’t have a clear plan.
132
+ Agents adapt their formation based on task complexity.
133
+
134
+ You can specify a desired formation or allow the agents to determine it autonomously (default).
134
135
 
135
136
 
136
137
  | | **Solo Agent** | **Supervising** | **Network** | **Random** |
@@ -2,12 +2,12 @@
2
2
 
3
3
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
4
4
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
5
- ![PyPI](https://img.shields.io/badge/PyPI->=v1.1.11.4-blue)
6
- ![python ver](https://img.shields.io/badge/Python->=3.12-purple)
5
+ ![PyPI](https://img.shields.io/badge/PyPI-v1.1.12+-blue)
6
+ ![python ver](https://img.shields.io/badge/Python-3.11+-purple)
7
7
  ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
8
8
 
9
9
 
10
- LLM orchestration frameworks to deploy multi-agent systems with task-based formation.
10
+ LLM orchestration frameworks to deploy multi-agent systems and automate complex tasks with network formations.
11
11
 
12
12
  **Visit:**
13
13
 
@@ -46,15 +46,16 @@ LLM orchestration frameworks to deploy multi-agent systems with task-based forma
46
46
 
47
47
  ## Key Features
48
48
 
49
- Generate mulit-agent systems depending on the complexity of the task, and execute the task with agents of choice.
49
+ Generate multi-agent systems based on the task complexity, execute tasks, and evaluate output based on the given criteria.
50
50
 
51
- Model-agnostic agents can handle RAG tools, tools, callbacks, and knowledge sharing among other agents.
51
+ Agents are model-agnostic, and can handle and share RAG tools, knowledge, memory, and callbacks among other agents. (self-learn)
52
52
 
53
53
 
54
54
  ### Agent formation
55
- Depending on the task complexity, agents can make a different formation.
56
55
 
57
- You can specify which formation you want them to generate, or let the agent decide if you don’t have a clear plan.
56
+ Agents adapt their formation based on task complexity.
57
+
58
+ You can specify a desired formation or allow the agents to determine it autonomously (default).
58
59
 
59
60
 
60
61
  | | **Solo Agent** | **Supervising** | **Network** | **Random** |
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.1.11.8"
18
+ version = "1.1.12.1"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
21
21
  readme = "README.md"
@@ -1,11 +1,8 @@
1
+ # silence some warnings
1
2
  import warnings
2
-
3
- warnings.filterwarnings(
4
- "ignore",
5
- message="Pydantic serializer warnings:",
6
- category=UserWarning,
7
- module="pydantic.main",
8
- )
3
+ warnings.filterwarnings(action="ignore", message="Pydantic serializer warnings:", category=UserWarning, module="pydantic.main")
4
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic._internal")
5
+ warnings.filterwarnings(action="ignore", module="LiteLLM:utils")
9
6
 
10
7
  from versionhq.agent.model import Agent
11
8
  from versionhq.clients.customer.model import Customer
@@ -17,7 +14,7 @@ from versionhq.tool.model import Tool
17
14
  from versionhq.tool.composio_tool import ComposioHandler
18
15
 
19
16
 
20
- __version__ = "1.1.11.8"
17
+ __version__ = "1.1.12.1"
21
18
  __all__ = [
22
19
  "Agent",
23
20
  "Customer",
@@ -36,10 +36,15 @@ class Printer:
36
36
 
37
37
 
38
38
  class Logger(BaseModel):
39
+ """
40
+ Control CLI messages.
41
+ Color: red = error, yellow = warning, blue = info (from vhq), green = info (from third party)
42
+ """
43
+
39
44
  verbose: bool = Field(default=True)
40
45
  _printer: Printer = PrivateAttr(default_factory=Printer)
41
46
 
42
47
  def log(self, level, message, color="yellow"):
43
48
  if self.verbose:
44
49
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
45
- self._printer.print(f"\n{timestamp} - versionHQ - {level.upper()}: {message}", color=color)
50
+ self._printer.print(f"\n{timestamp} - versionHQ [{level.upper()}]: {message}", color=color)
@@ -0,0 +1,31 @@
1
+ from versionhq.agent.model import Agent
2
+ from versionhq.llm.model import DEFAULT_MODEL_NAME
3
+
4
+ """
5
+ In-house agents to be called across the project.
6
+ [Rules] agents' names and roles start with `vhq_`.
7
+ """
8
+
9
+ vhq_client_manager = Agent(
10
+ role="vhq-Client Manager",
11
+ goal="Efficiently communicate with the client on the task progress",
12
+ llm=DEFAULT_MODEL_NAME
13
+ )
14
+
15
+ vhq_task_evaluator = Agent(
16
+ role="vhq-Task Evaluator",
17
+ goal="score the output according to the given evaluation criteria.",
18
+ llm=DEFAULT_MODEL_NAME,
19
+ llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
20
+ maxit=1,
21
+ max_retry_limit=1
22
+ )
23
+
24
+ vhq_formation_planner = Agent(
25
+ role="vhq-Formation Planner",
26
+ goal="Plan a formation of agents based on the given task descirption.",
27
+ llm="gemini/gemini-2.0-flash-exp",
28
+ llm_config=dict(top_p=0.8, top_k=30, temperature=0.9),
29
+ maxit=1,
30
+ max_retry_limit=1
31
+ )
@@ -165,13 +165,16 @@ class Agent(BaseModel):
165
165
  Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
166
166
  """
167
167
  self.agent_ops_agent_name = self.role
168
- self.llm = self._set_llm(llm=self.llm)
168
+ self.llm = self._convert_to_llm_class(llm=self.llm)
169
+
169
170
  function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
170
- self.function_calling_llm = self._set_llm(llm=function_calling_llm)
171
+ function_calling_llm = self._convert_to_llm_class(llm=function_calling_llm)
172
+ if function_calling_llm._supports_function_calling():
173
+ self.function_calling_llm = function_calling_llm
171
174
  return self
172
175
 
173
176
 
174
- def _set_llm(self, llm: Any | None) -> LLM:
177
+ def _convert_to_llm_class(self, llm: Any | None) -> LLM:
175
178
  llm = llm if llm is not None else DEFAULT_MODEL_NAME
176
179
 
177
180
  match llm:
@@ -413,7 +416,7 @@ class Agent(BaseModel):
413
416
  task.tokens = self.llm._tokens
414
417
 
415
418
  task_execution_counter += 1
416
- self._logger.log(level="info", message=f"Agent response: {raw_response}", color="blue")
419
+ self._logger.log(level="info", message=f"Agent response: {raw_response}", color="green")
417
420
  return raw_response
418
421
 
419
422
  except Exception as e:
@@ -429,7 +432,7 @@ class Agent(BaseModel):
429
432
  iterations += 1
430
433
 
431
434
  task_execution_counter += 1
432
- self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="blue")
435
+ self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="green")
433
436
  return raw_response
434
437
 
435
438
  if not raw_response:
@@ -474,6 +477,7 @@ class Agent(BaseModel):
474
477
  task_prompt += memory.strip()
475
478
 
476
479
 
480
+ ## comment out for now
477
481
  # if self.team and self.team._train:
478
482
  # task_prompt = self._training_handler(task_prompt=task_prompt)
479
483
  # else:
@@ -6,29 +6,20 @@ JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_
6
6
  PROVIDERS = [
7
7
  "openai",
8
8
  "gemini",
9
- "sagemaker",
10
- "huggingface", # need api base
9
+ "openrouter",
10
+ "huggingface",
11
11
  "anthropic",
12
+ "sagemaker",
13
+ "bedrock",
12
14
  "ollama",
13
15
  "watson",
14
- "bedrock",
15
16
  "azure",
16
17
  "cerebras",
17
18
  "llama",
18
19
  ]
19
20
 
20
21
  ENDPOINT_PROVIDERS = [
21
- # "openai",
22
- # "gemini",
23
- # "sagemaker",
24
22
  "huggingface",
25
- # "anthropic",
26
- # "ollama",
27
- # "watson",
28
- # "bedrock",
29
- # "azure",
30
- # "cerebras",
31
- # "llama",
32
23
  ]
33
24
 
34
25
  """
@@ -57,10 +48,14 @@ MODELS = {
57
48
  "anthropic": [
58
49
  "claude-3-5-sonnet-20241022",
59
50
  "claude-3-5-sonnet-20240620",
60
- "claude-3-sonnet-20240229",
51
+ "claude-3-haiku-2024030",
61
52
  "claude-3-opus-20240229",
62
53
  "claude-3-haiku-20240307",
63
54
  ],
55
+ "openrouter": [
56
+ "openrouter/deepseek/deepseek-r1:free",
57
+ "openrouter/qwen/qwen-2.5-72b-instruct",
58
+ ],
64
59
  "huggingface": [
65
60
  "huggingface/qwen/qwen2.5-VL-72B-Instruct",
66
61
  ],
@@ -78,11 +73,6 @@ MODELS = {
78
73
  "ollama/mixtral",
79
74
  "ollama/mixtral-8x22B-Instruct-v0.1",
80
75
  ],
81
- "deepseek": [
82
- "deepseek/deepseek-reasoner",
83
-
84
- ],
85
-
86
76
  # "watson": [
87
77
  # "watsonx/meta-llama/llama-3-1-70b-instruct",
88
78
  # "watsonx/meta-llama/llama-3-1-8b-instruct",
@@ -98,7 +88,6 @@ MODELS = {
98
88
  "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
99
89
  "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
100
90
  "bedrock/anthropic.claude-3-opus-20240229-v1:0",
101
- # "bedrock/anthropic.claude-v2:1",
102
91
  "bedrock/anthropic.claude-v2",
103
92
  "bedrock/anthropic.claude-instant-v1",
104
93
  "bedrock/meta.llama3-1-405b-instruct-v1:0",
@@ -124,24 +113,17 @@ MODELS = {
124
113
  KEYS = {
125
114
  "openai": ["OPENAI_API_KEY"],
126
115
  "gemini": ["GEMINI_API_KEY"],
127
- "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
128
116
  "anthropic": ["ANTHROPIC_API_KEY"],
117
+ "huggingface": ["HUGGINGFACE_API_KEY", ],
118
+ "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
129
119
  }
130
120
 
131
121
 
132
- """
133
- Use base_url to specify
134
- """
135
- BASE_URLS = {
136
- "deepseek": "https://api.deepseek.com"
137
- }
138
-
139
122
 
140
123
  """
141
124
  Max input token size by the model.
142
125
  """
143
126
  LLM_CONTEXT_WINDOW_SIZES = {
144
- "gpt-3.5-turbo": 8192,
145
127
  "gpt-4": 8192,
146
128
  "gpt-4o": 128000,
147
129
  "gpt-4o-mini": 128000,
@@ -160,6 +142,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
160
142
  "claude-3-sonnet-20240229": 200000,
161
143
  "claude-3-opus-20240229": 200000,
162
144
  "claude-3-haiku-20240307": 200000,
145
+ "claude-3-5-sonnet-2024102": 200000,
163
146
 
164
147
  "deepseek-chat": 128000,
165
148
  "deepseek/deepseek-reasoner": 8192,
@@ -177,111 +160,18 @@ LLM_CONTEXT_WINDOW_SIZES = {
177
160
  "llama3-70b-8192": 8192,
178
161
  "llama3-8b-8192": 8192,
179
162
  "mixtral-8x7b-32768": 32768,
180
- "claude-3-5-sonnet-2024102": 200000,
181
- }
182
-
183
-
184
-
185
-
186
- LLM_BASE_URL_KEY_NAMES = {
187
- "openai": "OPENAI_API_BASE",
188
- "gemini": "GEMINI_API_BASE",
189
- "anthropic": "ANTHROPIC_API_BASE",
190
- }
191
-
192
- LLM_VARS = {
193
- "openai": [
194
- {
195
- "prompt": "Enter your OPENAI API key (press Enter to skip)",
196
- "key_name": "OPENAI_API_KEY",
197
- }
198
- ],
199
- "anthropic": [
200
- {
201
- "prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
202
- "key_name": "ANTHROPIC_API_KEY",
203
- }
204
- ],
205
- "gemini": [
206
- {
207
- "prompt": "Enter your GEMINI API key (press Enter to skip)",
208
- "key_name": "GEMINI_API_KEY",
209
- }
210
- ],
211
- "watson": [
212
- {
213
- "prompt": "Enter your WATSONX URL (press Enter to skip)",
214
- "key_name": "WATSONX_URL",
215
- },
216
- {
217
- "prompt": "Enter your WATSONX API Key (press Enter to skip)",
218
- "key_name": "WATSONX_APIKEY",
219
- },
220
- {
221
- "prompt": "Enter your WATSONX Project Id (press Enter to skip)",
222
- "key_name": "WATSONX_PROJECT_ID",
223
- },
224
- ],
225
- "ollama": [
226
- {
227
- "default": True,
228
- "API_BASE": "http://localhost:11434",
229
- }
230
- ],
231
- "bedrock": [
232
- {
233
- "prompt": "Enter your AWS Access Key ID (press Enter to skip)",
234
- "key_name": "AWS_ACCESS_KEY_ID",
235
- },
236
- {
237
- "prompt": "Enter your AWS Secret Access Key (press Enter to skip)",
238
- "key_name": "AWS_SECRET_ACCESS_KEY",
239
- },
240
- {
241
- "prompt": "Enter your AWS Region Name (press Enter to skip)",
242
- "key_name": "AWS_REGION_NAME",
243
- },
244
- ],
245
- "azure": [
246
- {
247
- "prompt": "Enter your Azure deployment name (must start with 'azure/')",
248
- "key_name": "model",
249
- },
250
- {
251
- "prompt": "Enter your AZURE API key (press Enter to skip)",
252
- "key_name": "AZURE_API_KEY",
253
- },
254
- {
255
- "prompt": "Enter your AZURE API base URL (press Enter to skip)",
256
- "key_name": "AZURE_API_BASE",
257
- },
258
- {
259
- "prompt": "Enter your AZURE API version (press Enter to skip)",
260
- "key_name": "AZURE_API_VERSION",
261
- },
262
- ],
263
- "cerebras": [
264
- {
265
- "prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
266
- "key_name": "model",
267
- },
268
- {
269
- "prompt": "Enter your Cerebras API version (press Enter to skip)",
270
- "key_name": "CEREBRAS_API_KEY",
271
- },
272
- ],
273
163
  }
274
164
 
275
165
 
276
166
 
277
167
  """
278
- Params for litellm.completion() func. Address common/unique params to each provider.
168
+ Params for litellm.completion().
279
169
  """
280
170
 
281
171
  PARAMS = {
282
172
  "litellm": [
283
173
  "api_base",
284
- "api_version,"
174
+ "api_version,",
285
175
  "num_retries",
286
176
  "context_window_fallback_dict",
287
177
  "fallbacks",
@@ -1,4 +1,3 @@
1
- import logging
2
1
  import json
3
2
  import os
4
3
  import sys
@@ -6,12 +5,11 @@ import threading
6
5
  import warnings
7
6
  from dotenv import load_dotenv
8
7
  import litellm
9
- from litellm import get_supported_openai_params, JSONSchemaValidationError
8
+ from litellm import JSONSchemaValidationError
10
9
  from contextlib import contextmanager
11
10
  from typing import Any, Dict, List, Optional
12
11
  from typing_extensions import Self
13
- from pydantic import BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
14
- from pydantic_core import PydanticCustomError
12
+ from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
15
13
 
16
14
  from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS
17
15
  from versionhq.tool.model import Tool, ToolSet
@@ -25,10 +23,6 @@ DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
25
23
  DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME", "gpt-4o-mini")
26
24
  DEFAULT_MODEL_PROVIDER_NAME = os.environ.get("DEFAULT_MODEL_PROVIDER_NAME", "openai")
27
25
 
28
- # proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
29
- # openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
30
-
31
-
32
26
  class FilteredStream:
33
27
  def __init__(self, original_stream):
34
28
  self._original_stream = original_stream
@@ -53,7 +47,8 @@ class FilteredStream:
53
47
  @contextmanager
54
48
  def suppress_warnings():
55
49
  with warnings.catch_warnings():
56
- warnings.filterwarnings("ignore")
50
+ litellm.set_verbose = False
51
+ warnings.filterwarnings(action="ignore")
57
52
  old_stdout = sys.stdout
58
53
  old_stderr = sys.stderr
59
54
  sys.stdout = FilteredStream(old_stdout)
@@ -83,11 +78,11 @@ class LLM(BaseModel):
83
78
  api_key: Optional[str] = Field(default=None, description="api key to access the model")
84
79
 
85
80
  # optional params
81
+ response_format: Optional[Any] = Field(default=None)
86
82
  timeout: Optional[float | int] = Field(default=None)
87
83
  max_tokens: Optional[int] = Field(default=None)
88
84
  max_completion_tokens: Optional[int] = Field(default=None)
89
85
  context_window_size: Optional[int] = Field(default=DEFAULT_CONTEXT_WINDOW_SIZE)
90
- callbacks: List[Any] = Field(default_factory=list)
91
86
  temperature: Optional[float] = Field(default=None)
92
87
  top_p: Optional[float] = Field(default=None)
93
88
  n: Optional[int] = Field(default=None)
@@ -98,8 +93,8 @@ class LLM(BaseModel):
98
93
  seed: Optional[int] = Field(default=None)
99
94
  logprobs: Optional[bool] = Field(default=None)
100
95
  top_logprobs: Optional[int] = Field(default=None)
101
- response_format: Optional[Any] = Field(default=None)
102
96
  tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
97
+ callbacks: List[Any] = Field(default_factory=list)
103
98
 
104
99
  # LiteLLM specific fields
105
100
  api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
@@ -193,7 +188,7 @@ class LLM(BaseModel):
193
188
  @model_validator(mode="after")
194
189
  def validate_model_params(self) -> Self:
195
190
  """
196
- After setting up a valid model, provider, interface provider, add params to the model.
191
+ Set up valid params to the model after setting up a valid model, provider, interface provider names.
197
192
  """
198
193
  self._tokens = 0
199
194
 
@@ -216,19 +211,28 @@ class LLM(BaseModel):
216
211
  return self
217
212
 
218
213
 
219
- def _create_valid_params(self, config: Dict[str, Any], provider: str = None) -> Dict[str, Any]:
220
- params = dict()
221
- valid_keys = list()
222
- provider = provider if provider else self.provider if self.provider else None
223
- valid_keys = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(provider) if provider and PARAMS.get(provider) else PARAMS.get("litellm") + PARAMS.get("common")
214
+ def _create_valid_params(self, config: Dict[str, Any]) -> Dict[str, Any]:
215
+ """
216
+ Return valid params (model + litellm original params) from the given config dict.
217
+ """
218
+
219
+ valid_params, valid_keys = dict(), list()
220
+
221
+ if self.model:
222
+ valid_keys = litellm.get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
223
+
224
+ if not valid_keys:
225
+ valid_keys = PARAMS.get("common")
226
+
227
+ valid_keys += PARAMS.get("litellm")
224
228
 
225
229
  for item in valid_keys:
226
230
  if hasattr(self, item) and getattr(self, item):
227
- params[item] = getattr(self, item)
228
- elif item in config:
229
- params[item] = config[item]
231
+ valid_params[item] = getattr(self, item)
232
+ elif item in config and config[item]:
233
+ valid_params[item] = config[item]
230
234
 
231
- return params
235
+ return valid_params
232
236
 
233
237
 
234
238
  def call(
@@ -250,21 +254,18 @@ class LLM(BaseModel):
250
254
  self._set_callbacks(self.callbacks) # passed by agent
251
255
 
252
256
  try:
253
- provider = self.provider if self.provider else DEFAULT_MODEL_PROVIDER_NAME
254
257
  self.response_format = { "type": "json_object" } if tool_res_as_final == True else response_format
255
258
 
256
259
  if not tools:
257
260
  params = self._create_valid_params(config=config)
258
- res = litellm.completion(messages=messages, stream=False, **params)
261
+ res = litellm.completion(model=self.model, messages=messages, stream=False, **params)
259
262
  self._tokens += int(res["usage"]["total_tokens"])
260
263
  return res["choices"][0]["message"]["content"]
261
264
 
262
265
  else:
263
266
  self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
264
-
265
- # if provider == "openai":
266
- params = self._create_valid_params(config=config, provider=provider)
267
- res = litellm.completion(messages=messages, model=self.model, tools=self.tools)
267
+ params = self._create_valid_params(config=config)
268
+ res = litellm.completion(model=self.model, messages=messages, **params)
268
269
  tool_calls = res.choices[0].message.tool_calls
269
270
  tool_res = ""
270
271
 
@@ -304,7 +305,7 @@ class LLM(BaseModel):
304
305
  if tool_res_as_final:
305
306
  return tool_res
306
307
  else:
307
- res = litellm.completione(messages=messages, model=self.model, tools=self.tools)
308
+ res = litellm.completion(model=self.model, messages=messages, **params)
308
309
  self._tokens += int(res["usage"]["total_tokens"])
309
310
  return res.choices[0].message.content
310
311
 
@@ -320,20 +321,17 @@ class LLM(BaseModel):
320
321
 
321
322
  def _supports_function_calling(self) -> bool:
322
323
  try:
323
- params = get_supported_openai_params(model=self.model)
324
- return "response_format" in params
324
+ if self.model:
325
+ params = litellm.get_supported_openai_params(model=self.model)
326
+ return "response_format" in params if params else False
325
327
  except Exception as e:
326
- self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
328
+ self._logger.log(level="warning", message=f"Failed to get supported params: {str(e)}", color="yellow")
327
329
  return False
328
330
 
329
331
 
330
332
  def _supports_stop_words(self) -> bool:
331
- try:
332
- params = get_supported_openai_params(model=self.model)
333
- return "stop" in params
334
- except Exception as e:
335
- self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
336
- return False
333
+ supported_params = litellm.get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider)
334
+ return "stop" in supported_params if supported_params else False
337
335
 
338
336
 
339
337
  def _get_context_window_size(self) -> int: