vectara-agentic 0.2.6__tar.gz → 0.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

Files changed (40) hide show
  1. {vectara_agentic-0.2.6/vectara_agentic.egg-info → vectara_agentic-0.2.8}/PKG-INFO +2 -2
  2. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/requirements.txt +1 -1
  3. vectara_agentic-0.2.8/tests/test_agent.py +125 -0
  4. vectara_agentic-0.2.8/tests/test_agent_planning.py +73 -0
  5. vectara_agentic-0.2.8/tests/test_agent_type.py +157 -0
  6. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/tests/test_fallback.py +2 -2
  7. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/tests/test_private_llm.py +1 -1
  8. vectara_agentic-0.2.8/tests/test_serialization.py +110 -0
  9. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/tests/test_tools.py +1 -1
  10. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/_version.py +1 -1
  11. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/agent.py +34 -12
  12. vectara_agentic-0.2.8/vectara_agentic/db_tools.py +262 -0
  13. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/sub_query_workflow.py +20 -7
  14. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/tools.py +139 -110
  15. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/types.py +1 -0
  16. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/utils.py +76 -10
  17. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8/vectara_agentic.egg-info}/PKG-INFO +2 -2
  18. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic.egg-info/SOURCES.txt +1 -0
  19. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic.egg-info/requires.txt +1 -1
  20. vectara_agentic-0.2.6/tests/test_agent.py +0 -158
  21. vectara_agentic-0.2.6/tests/test_agent_planning.py +0 -46
  22. vectara_agentic-0.2.6/tests/test_agent_type.py +0 -83
  23. vectara_agentic-0.2.6/vectara_agentic/db_tools.py +0 -96
  24. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/LICENSE +0 -0
  25. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/MANIFEST.in +0 -0
  26. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/README.md +0 -0
  27. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/setup.cfg +0 -0
  28. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/setup.py +0 -0
  29. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/tests/__init__.py +0 -0
  30. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/tests/endpoint.py +0 -0
  31. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/tests/test_workflow.py +0 -0
  32. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/__init__.py +0 -0
  33. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/_callback.py +0 -0
  34. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/_observability.py +0 -0
  35. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/_prompts.py +0 -0
  36. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/agent_config.py +0 -0
  37. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/agent_endpoint.py +0 -0
  38. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic/tools_catalog.py +0 -0
  39. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic.egg-info/dependency_links.txt +0 -0
  40. {vectara_agentic-0.2.6 → vectara_agentic-0.2.8}/vectara_agentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,7 +16,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.12.25
19
+ Requires-Dist: llama-index==0.12.26
20
20
  Requires-Dist: llama-index-indices-managed-vectara==0.4.2
21
21
  Requires-Dist: llama-index-agent-llm-compiler==0.3.0
22
22
  Requires-Dist: llama-index-agent-lats==0.3.0
@@ -1,4 +1,4 @@
1
- llama-index==0.12.25
1
+ llama-index==0.12.26
2
2
  llama-index-indices-managed-vectara==0.4.2
3
3
  llama-index-agent-llm-compiler==0.3.0
4
4
  llama-index-agent-lats==0.3.0
@@ -0,0 +1,125 @@
1
+ import unittest
2
+ import threading
3
+ from datetime import date
4
+
5
+ from vectara_agentic.agent import _get_prompt, Agent, AgentType
6
+ from vectara_agentic.agent_config import AgentConfig
7
+ from vectara_agentic.types import ModelProvider, ObserverType
8
+ from vectara_agentic.tools import ToolsFactory
9
+
10
+ def mult(x: float, y: float) -> float:
11
+ return x * y
12
+
13
+
14
+ ARIZE_LOCK = threading.Lock()
15
+
16
+ class TestAgentPackage(unittest.TestCase):
17
+ def test_get_prompt(self):
18
+ prompt_template = "{chat_topic} on {today} with {custom_instructions}"
19
+ topic = "Programming"
20
+ custom_instructions = "Always do as your mother tells you!"
21
+ expected_output = (
22
+ "Programming on "
23
+ + date.today().strftime("%A, %B %d, %Y")
24
+ + " with Always do as your mother tells you!"
25
+ )
26
+ self.assertEqual(
27
+ _get_prompt(prompt_template, topic, custom_instructions), expected_output
28
+ )
29
+
30
+ def test_agent_init(self):
31
+ tools = [ToolsFactory().create_tool(mult)]
32
+ topic = "AI"
33
+ custom_instructions = "Always do as your mother tells you!"
34
+ agent = Agent(tools, topic, custom_instructions)
35
+ self.assertEqual(agent.agent_type, AgentType.OPENAI)
36
+ self.assertEqual(agent._topic, topic)
37
+ self.assertEqual(agent._custom_instructions, custom_instructions)
38
+
39
+ # To run this test, you must have OPENAI_API_KEY in your environment
40
+ self.assertEqual(
41
+ agent.chat(
42
+ "What is 5 times 10. Only give the answer, nothing else"
43
+ ).response.replace("$", "\\$"),
44
+ "50",
45
+ )
46
+
47
+ def test_agent_config(self):
48
+ with ARIZE_LOCK:
49
+ tools = [ToolsFactory().create_tool(mult)]
50
+ topic = "AI topic"
51
+ instructions = "Always do as your father tells you, if your mother agrees!"
52
+ config = AgentConfig(
53
+ agent_type=AgentType.REACT,
54
+ main_llm_provider=ModelProvider.ANTHROPIC,
55
+ main_llm_model_name="claude-3-5-sonnet-20241022",
56
+ tool_llm_provider=ModelProvider.TOGETHER,
57
+ tool_llm_model_name="meta-llama/Llama-3.3-70B-Instruct-Turbo",
58
+ observer=ObserverType.ARIZE_PHOENIX
59
+ )
60
+
61
+ agent = Agent(
62
+ tools=tools,
63
+ topic=topic,
64
+ custom_instructions=instructions,
65
+ agent_config=config
66
+ )
67
+ self.assertEqual(agent._topic, topic)
68
+ self.assertEqual(agent._custom_instructions, instructions)
69
+ self.assertEqual(agent.agent_type, AgentType.REACT)
70
+ self.assertEqual(agent.agent_config.observer, ObserverType.ARIZE_PHOENIX)
71
+ self.assertEqual(agent.agent_config.main_llm_provider, ModelProvider.ANTHROPIC)
72
+ self.assertEqual(agent.agent_config.tool_llm_provider, ModelProvider.TOGETHER)
73
+
74
+ # To run this test, you must have ANTHROPIC_API_KEY and TOGETHER_API_KEY in your environment
75
+ self.assertEqual(
76
+ agent.chat(
77
+ "What is 5 times 10. Only give the answer, nothing else"
78
+ ).response.replace("$", "\\$"),
79
+ "50",
80
+ )
81
+
82
+ def test_multiturn(self):
83
+ tools = [ToolsFactory().create_tool(mult)]
84
+ topic = "AI topic"
85
+ instructions = "Always do as your father tells you, if your mother agrees!"
86
+ agent = Agent(
87
+ tools=tools,
88
+ topic=topic,
89
+ custom_instructions=instructions,
90
+ )
91
+
92
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
93
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
94
+ res = agent.chat("multiply the results of the last two questions. Output only the answer.")
95
+ self.assertEqual(res.response, "1050")
96
+
97
+ def test_from_corpus(self):
98
+ agent = Agent.from_corpus(
99
+ tool_name="RAG Tool",
100
+ vectara_corpus_key="corpus_key",
101
+ vectara_api_key="api_key",
102
+ data_description="information",
103
+ assistant_specialty="question answering",
104
+ )
105
+
106
+ self.assertIsInstance(agent, Agent)
107
+ self.assertEqual(agent._topic, "question answering")
108
+
109
+ def test_chat_history(self):
110
+ tools = [ToolsFactory().create_tool(mult)]
111
+ topic = "AI topic"
112
+ instructions = "Always do as your father tells you, if your mother agrees!"
113
+ agent = Agent(
114
+ tools=tools,
115
+ topic=topic,
116
+ custom_instructions=instructions,
117
+ chat_history=[("What is 5 times 10", "50"), ("What is 3 times 7", "21")]
118
+ )
119
+
120
+ res = agent.chat("multiply the results of the last two questions. Output only the answer.")
121
+ self.assertEqual(res.response, "1050")
122
+
123
+
124
+ if __name__ == "__main__":
125
+ unittest.main()
@@ -0,0 +1,73 @@
1
+ import unittest
2
+
3
+ from vectara_agentic.agent_config import AgentConfig
4
+ from vectara_agentic.agent import Agent
5
+ from vectara_agentic.tools import VectaraToolFactory
6
+
7
+ from pydantic import Field, BaseModel
8
+
9
+
10
+ # SETUP speical test account credentials for vectara
11
+ # It's okay to expose these credentials in the test code
12
+ vectara_corpus_key = "vectara-docs_1"
13
+ vectara_api_key = 'zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA'
14
+
15
+
16
+ class QueryArgs(BaseModel):
17
+ query: str = Field(..., description="The user query, always in the form of a question.")
18
+
19
+
20
+ vec_factory = VectaraToolFactory(vectara_api_key=vectara_api_key,
21
+ vectara_corpus_key=vectara_corpus_key)
22
+ summarizer = 'vectara-summary-table-md-query-ext-jan-2025-gpt-4o'
23
+ ask_vectara = vec_factory.create_rag_tool(
24
+ tool_name = "ask_vectara",
25
+ tool_description = "This tool can respond to questions about Vectara.",
26
+ tool_args_schema = QueryArgs,
27
+ reranker = "multilingual_reranker_v1", rerank_k = 100, rerank_cutoff = 0.1,
28
+ n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
29
+ summary_num_results = 10,
30
+ vectara_summarizer = summarizer,
31
+ include_citations = True,
32
+ verbose=False,
33
+ )
34
+
35
+ class TestAgentPlanningPackage(unittest.TestCase):
36
+
37
+ def test_no_planning(self):
38
+ tools = [ask_vectara]
39
+ topic = "vectara"
40
+ instructions = "Answer user queries about Vectara."
41
+
42
+ query = "What is Vectara and what demos are available of the Vectara platform?"
43
+ agent = Agent(
44
+ tools=tools,
45
+ topic=topic,
46
+ custom_instructions=instructions,
47
+ agent_config=AgentConfig(),
48
+ )
49
+ res = agent.chat(query)
50
+ self.assertIn("demos", res.response)
51
+ self.assertIn("Vectara", res.response)
52
+
53
+ def test_structured_planning(self):
54
+ tools = [ask_vectara]
55
+ topic = "vectara"
56
+ instructions = "Answer user queries about Vectara."
57
+
58
+ query = "What is Vectara and what demos are available of the Vectara platform?"
59
+ agent = Agent(
60
+ tools=tools,
61
+ topic=topic,
62
+ custom_instructions=instructions,
63
+ agent_config=AgentConfig(),
64
+ use_structured_planning=True,
65
+ )
66
+
67
+ res = agent.chat(query)
68
+ self.assertIn("demos", res.response)
69
+ self.assertIn("Vectara", res.response)
70
+
71
+
72
+ if __name__ == "__main__":
73
+ unittest.main()
@@ -0,0 +1,157 @@
1
+ import unittest
2
+
3
+ from vectara_agentic.agent import Agent, AgentType
4
+ from vectara_agentic.agent_config import AgentConfig
5
+ from vectara_agentic.tools import ToolsFactory
6
+ from vectara_agentic.types import ModelProvider
7
+
8
+ import nest_asyncio
9
+ nest_asyncio.apply()
10
+
11
+ def mult(x: float, y: float) -> float:
12
+ return x * y
13
+
14
+
15
+ react_config_anthropic = AgentConfig(
16
+ agent_type=AgentType.REACT,
17
+ main_llm_provider=ModelProvider.ANTHROPIC,
18
+ tool_llm_provider=ModelProvider.ANTHROPIC,
19
+ )
20
+
21
+ react_config_gemini = AgentConfig(
22
+ agent_type=AgentType.REACT,
23
+ main_llm_provider=ModelProvider.GEMINI,
24
+ tool_llm_provider=ModelProvider.GEMINI,
25
+ )
26
+
27
+ react_config_together = AgentConfig(
28
+ agent_type=AgentType.REACT,
29
+ main_llm_provider=ModelProvider.TOGETHER,
30
+ tool_llm_provider=ModelProvider.TOGETHER,
31
+ )
32
+
33
+ fc_config_anthropic = AgentConfig(
34
+ agent_type=AgentType.FUNCTION_CALLING,
35
+ main_llm_provider=ModelProvider.ANTHROPIC,
36
+ tool_llm_provider=ModelProvider.ANTHROPIC,
37
+ )
38
+
39
+ fc_config_gemini = AgentConfig(
40
+ agent_type=AgentType.FUNCTION_CALLING,
41
+ main_llm_provider=ModelProvider.GEMINI,
42
+ tool_llm_provider=ModelProvider.GEMINI,
43
+ )
44
+
45
+ fc_config_together = AgentConfig(
46
+ agent_type=AgentType.FUNCTION_CALLING,
47
+ main_llm_provider=ModelProvider.TOGETHER,
48
+ tool_llm_provider=ModelProvider.TOGETHER,
49
+ )
50
+
51
+
52
+ openai_config = AgentConfig(
53
+ agent_type=AgentType.OPENAI,
54
+ )
55
+
56
+ class TestAgentType(unittest.TestCase):
57
+
58
+ def test_openai(self):
59
+ tools = [ToolsFactory().create_tool(mult)]
60
+ topic = "AI topic"
61
+ instructions = "Always do as your father tells you, if your mother agrees!"
62
+ agent = Agent(
63
+ agent_config=openai_config,
64
+ tools=tools,
65
+ topic=topic,
66
+ custom_instructions=instructions,
67
+ )
68
+
69
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
70
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
71
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
72
+ self.assertIn("1050", res.response)
73
+
74
+ def test_gemini(self):
75
+ tools = [ToolsFactory().create_tool(mult)]
76
+ topic = "AI topic"
77
+ instructions = "Always do as your father tells you, if your mother agrees!"
78
+
79
+ agent = Agent(
80
+ agent_config=react_config_gemini,
81
+ tools=tools,
82
+ topic=topic,
83
+ custom_instructions=instructions,
84
+ )
85
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
86
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
87
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
88
+ self.assertIn("1050", res.response)
89
+
90
+ agent = Agent(
91
+ agent_config=fc_config_gemini,
92
+ tools=tools,
93
+ topic=topic,
94
+ custom_instructions=instructions,
95
+ )
96
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
97
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
98
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
99
+ self.assertIn("1050", res.response)
100
+
101
+ def test_together(self):
102
+ tools = [ToolsFactory().create_tool(mult)]
103
+ topic = "AI topic"
104
+ instructions = "Always do as your father tells you, if your mother agrees!"
105
+
106
+ agent = Agent(
107
+ agent_config=react_config_together,
108
+ tools=tools,
109
+ topic=topic,
110
+ custom_instructions=instructions,
111
+ )
112
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
113
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
114
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
115
+ self.assertIn("1050", res.response)
116
+
117
+ agent = Agent(
118
+ agent_config=fc_config_together,
119
+ tools=tools,
120
+ topic=topic,
121
+ custom_instructions=instructions,
122
+ )
123
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
124
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
125
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
126
+ self.assertIn("1050", res.response)
127
+
128
+ def test_anthropic(self):
129
+ tools = [ToolsFactory().create_tool(mult)]
130
+ topic = "AI topic"
131
+ instructions = "Always do as your father tells you, if your mother agrees!"
132
+
133
+ agent = Agent(
134
+ agent_config=react_config_anthropic,
135
+ tools=tools,
136
+ topic=topic,
137
+ custom_instructions=instructions,
138
+ )
139
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
140
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
141
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
142
+ self.assertIn("1050", res.response)
143
+
144
+ agent = Agent(
145
+ agent_config=fc_config_anthropic,
146
+ tools=tools,
147
+ topic=topic,
148
+ custom_instructions=instructions,
149
+ )
150
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
151
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
152
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
153
+ self.assertIn("1050", res.response)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ unittest.main()
@@ -40,8 +40,8 @@ class TestFallback(unittest.TestCase):
40
40
  cls.flask_process.send_signal(signal.SIGINT)
41
41
  cls.flask_process.wait()
42
42
 
43
- def test_fallback(self):
44
- def mult(x, y):
43
+ def test_fallback_from_private(self):
44
+ def mult(x: float, y: float) -> float:
45
45
  return x * y
46
46
 
47
47
  tools = [ToolsFactory().create_tool(mult)]
@@ -41,7 +41,7 @@ class TestPrivateLLM(unittest.TestCase):
41
41
  cls.flask_process.wait()
42
42
 
43
43
  def test_endpoint(self):
44
- def mult(x, y):
44
+ def mult(x: float, y: float) -> float:
45
45
  return x * y
46
46
 
47
47
  tools = [ToolsFactory().create_tool(mult)]
@@ -0,0 +1,110 @@
1
+ import unittest
2
+ import threading
3
+ import os
4
+
5
+ from vectara_agentic.agent import Agent, AgentType
6
+ from vectara_agentic.agent_config import AgentConfig
7
+ from vectara_agentic.types import ModelProvider, ObserverType
8
+ from vectara_agentic.tools import ToolsFactory
9
+
10
+ from llama_index.core.utilities.sql_wrapper import SQLDatabase
11
+ from sqlalchemy import create_engine
12
+
13
+ def mult(x: float, y: float) -> float:
14
+ return x * y
15
+
16
+
17
+ ARIZE_LOCK = threading.Lock()
18
+
19
+ class TestAgentSerialization(unittest.TestCase):
20
+
21
+ @classmethod
22
+ def tearDown(cls):
23
+ try:
24
+ os.remove('ev_database.db')
25
+ except FileNotFoundError:
26
+ pass
27
+
28
+ def test_serialization(self):
29
+ with ARIZE_LOCK:
30
+ config = AgentConfig(
31
+ agent_type=AgentType.REACT,
32
+ main_llm_provider=ModelProvider.ANTHROPIC,
33
+ tool_llm_provider=ModelProvider.TOGETHER,
34
+ observer=ObserverType.ARIZE_PHOENIX
35
+ )
36
+ db_tools = ToolsFactory().database_tools(
37
+ tool_name_prefix = "ev",
38
+ content_description = 'Electric Vehicles in the state of Washington and other population information',
39
+ sql_database = SQLDatabase(create_engine('sqlite:///ev_database.db')),
40
+ )
41
+
42
+ tools = [ToolsFactory().create_tool(mult)] + ToolsFactory().standard_tools() + db_tools
43
+ topic = "AI topic"
44
+ instructions = "Always do as your father tells you, if your mother agrees!"
45
+ agent = Agent(
46
+ tools=tools,
47
+ topic=topic,
48
+ custom_instructions=instructions,
49
+ agent_config=config
50
+ )
51
+
52
+ agent_reloaded = agent.loads(agent.dumps())
53
+ agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
54
+
55
+ self.assertIsInstance(agent_reloaded, Agent)
56
+ self.assertEqual(agent, agent_reloaded)
57
+ self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
58
+
59
+ self.assertEqual(agent.agent_config.observer, agent_reloaded.agent_config.observer)
60
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded.agent_config.main_llm_provider)
61
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded.agent_config.tool_llm_provider)
62
+
63
+ self.assertIsInstance(agent_reloaded, Agent)
64
+ self.assertEqual(agent, agent_reloaded_again)
65
+ self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
66
+
67
+ self.assertEqual(agent.agent_config.observer, agent_reloaded_again.agent_config.observer)
68
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded_again.agent_config.main_llm_provider)
69
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded_again.agent_config.tool_llm_provider)
70
+
71
+ def test_serialization_from_corpus(self):
72
+ with ARIZE_LOCK:
73
+ config = AgentConfig(
74
+ agent_type=AgentType.REACT,
75
+ main_llm_provider=ModelProvider.ANTHROPIC,
76
+ tool_llm_provider=ModelProvider.TOGETHER,
77
+ observer=ObserverType.ARIZE_PHOENIX
78
+ )
79
+
80
+ agent = Agent.from_corpus(
81
+ tool_name="RAG Tool",
82
+ agent_config=config,
83
+ vectara_corpus_key="corpus_key",
84
+ vectara_api_key="api_key",
85
+ data_description="information",
86
+ assistant_specialty="question answering",
87
+ )
88
+
89
+ agent_reloaded = agent.loads(agent.dumps())
90
+ agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
91
+
92
+ self.assertIsInstance(agent_reloaded, Agent)
93
+ self.assertEqual(agent, agent_reloaded)
94
+ self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
95
+
96
+ self.assertEqual(agent.agent_config.observer, agent_reloaded.agent_config.observer)
97
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded.agent_config.main_llm_provider)
98
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded.agent_config.tool_llm_provider)
99
+
100
+ self.assertIsInstance(agent_reloaded, Agent)
101
+ self.assertEqual(agent, agent_reloaded_again)
102
+ self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
103
+
104
+ self.assertEqual(agent.agent_config.observer, agent_reloaded_again.agent_config.observer)
105
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded_again.agent_config.main_llm_provider)
106
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded_again.agent_config.tool_llm_provider)
107
+
108
+
109
+ if __name__ == "__main__":
110
+ unittest.main()
@@ -47,7 +47,7 @@ class TestToolsPackage(unittest.TestCase):
47
47
  self.assertEqual(search_tool.metadata.tool_type, ToolType.QUERY)
48
48
 
49
49
  def test_tool_factory(self):
50
- def mult(x, y):
50
+ def mult(x: float, y: float) -> float:
51
51
  return x * y
52
52
 
53
53
  tools_factory = ToolsFactory()
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Define the version of the package.
3
3
  """
4
- __version__ = "0.2.6"
4
+ __version__ = "0.2.8"
@@ -21,7 +21,7 @@ from pydantic import Field, create_model, ValidationError
21
21
  from llama_index.core.memory import ChatMemoryBuffer
22
22
  from llama_index.core.llms import ChatMessage, MessageRole
23
23
  from llama_index.core.tools import FunctionTool
24
- from llama_index.core.agent import ReActAgent, StructuredPlannerAgent
24
+ from llama_index.core.agent import ReActAgent, StructuredPlannerAgent, FunctionCallingAgent
25
25
  from llama_index.core.agent.react.formatter import ReActChatFormatter
26
26
  from llama_index.agent.llm_compiler import LLMCompilerAgentWorker
27
27
  from llama_index.agent.lats import LATSAgentWorker
@@ -277,7 +277,19 @@ class Agent:
277
277
  llm = get_llm(LLMRole.MAIN, config=config)
278
278
  llm.callback_manager = llm_callback_manager
279
279
 
280
- if agent_type == AgentType.REACT:
280
+ if agent_type == AgentType.FUNCTION_CALLING:
281
+ prompt = _get_prompt(GENERAL_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
282
+ agent = FunctionCallingAgent.from_tools(
283
+ tools=self.tools,
284
+ llm=llm,
285
+ memory=self.memory,
286
+ verbose=self.verbose,
287
+ max_function_calls=config.max_reasoning_steps,
288
+ callback_manager=llm_callback_manager,
289
+ system_prompt = prompt,
290
+ allow_parallel_tool_calls=True,
291
+ )
292
+ elif agent_type == AgentType.REACT:
281
293
  prompt = _get_prompt(REACT_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
282
294
  agent = ReActAgent.from_tools(
283
295
  tools=self.tools,
@@ -295,7 +307,7 @@ class Agent:
295
307
  llm=llm,
296
308
  memory=self.memory,
297
309
  verbose=self.verbose,
298
- callable_manager=llm_callback_manager,
310
+ callback_manager=llm_callback_manager,
299
311
  max_function_calls=config.max_reasoning_steps,
300
312
  system_prompt=prompt,
301
313
  )
@@ -304,7 +316,7 @@ class Agent:
304
316
  tools=self.tools,
305
317
  llm=llm,
306
318
  verbose=self.verbose,
307
- callable_manager=llm_callback_manager,
319
+ callback_manager=llm_callback_manager,
308
320
  )
309
321
  agent_worker.system_prompt = _get_prompt(
310
322
  _get_llm_compiler_prompt(agent_worker.system_prompt, self._topic, self._custom_instructions),
@@ -322,7 +334,7 @@ class Agent:
322
334
  num_expansions=3,
323
335
  max_rollouts=-1,
324
336
  verbose=self.verbose,
325
- callable_manager=llm_callback_manager,
337
+ callback_manager=llm_callback_manager,
326
338
  )
327
339
  prompt = _get_prompt(REACT_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
328
340
  agent_worker.chat_formatter = ReActChatFormatter(system_header=prompt)
@@ -707,6 +719,8 @@ class Agent:
707
719
  """
708
720
  max_attempts = 4 if self.fallback_agent_config else 2
709
721
  attempt = 0
722
+ orig_llm = self.llm.metadata.model_name
723
+ last_error = None
710
724
  while attempt < max_attempts:
711
725
  try:
712
726
  current_agent = self._get_current_agent()
@@ -718,16 +732,20 @@ class Agent:
718
732
  self.query_logging_callback(prompt, agent_response.response)
719
733
  return agent_response
720
734
 
721
- except Exception:
735
+ except Exception as e:
736
+ last_error = e
722
737
  if attempt >= 2:
723
738
  if self.verbose:
724
- print(f"LLM call failed on attempt {attempt+1}. Switching agent configuration.")
739
+ print(f"LLM call failed on attempt {attempt}. Switching agent configuration.")
725
740
  self._switch_agent_config()
726
741
  time.sleep(1)
727
742
  attempt += 1
728
743
 
729
744
  return AgentResponse(
730
- response=f"LLM failure can't be resolved after {max_attempts} attempts."
745
+ response=(
746
+ f"For {orig_llm} LLM - failure can't be resolved after "
747
+ f"{max_attempts} attempts ({last_error}."
748
+ )
731
749
  )
732
750
 
733
751
  def stream_chat(self, prompt: str) -> AgentStreamingResponse: # type: ignore
@@ -750,6 +768,7 @@ class Agent:
750
768
  """
751
769
  max_attempts = 4 if self.fallback_agent_config else 2
752
770
  attempt = 0
771
+ orig_llm = self.llm.metadata.model_name
753
772
  while attempt < max_attempts:
754
773
  try:
755
774
  current_agent = self._get_current_agent()
@@ -770,16 +789,20 @@ class Agent:
770
789
  agent_response.async_response_gen = _stream_response_wrapper # Override the generator
771
790
  return agent_response
772
791
 
773
- except Exception:
792
+ except Exception as e:
793
+ last_error = e
774
794
  if attempt >= 2:
775
795
  if self.verbose:
776
- print("LLM call failed. Switching agent configuration.")
796
+ print(f"LLM call failed on attempt {attempt}. Switching agent configuration.")
777
797
  self._switch_agent_config()
778
798
  time.sleep(1)
779
799
  attempt += 1
780
800
 
781
801
  return AgentResponse(
782
- response=f"LLM failure can't be resolved after {max_attempts} attempts."
802
+ response=(
803
+ f"For {orig_llm} LLM - failure can't be resolved after "
804
+ f"{max_attempts} attempts ({last_error})."
805
+ )
783
806
  )
784
807
 
785
808
  #
@@ -843,7 +866,6 @@ class Agent:
843
866
  def to_dict(self) -> Dict[str, Any]:
844
867
  """Serialize the Agent instance to a dictionary."""
845
868
  tool_info = []
846
-
847
869
  for tool in self.tools:
848
870
  if hasattr(tool.metadata, "fn_schema"):
849
871
  fn_schema_cls = tool.metadata.fn_schema