vectara-agentic 0.2.6__tar.gz → 0.2.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

Files changed (38) hide show
  1. {vectara_agentic-0.2.6/vectara_agentic.egg-info → vectara_agentic-0.2.7}/PKG-INFO +1 -1
  2. vectara_agentic-0.2.7/tests/test_agent.py +162 -0
  3. vectara_agentic-0.2.7/tests/test_agent_planning.py +73 -0
  4. vectara_agentic-0.2.7/tests/test_agent_type.py +157 -0
  5. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/tests/test_fallback.py +2 -2
  6. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/tests/test_private_llm.py +1 -1
  7. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/tests/test_tools.py +1 -1
  8. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/_version.py +1 -1
  9. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/agent.py +26 -8
  10. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/sub_query_workflow.py +16 -4
  11. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/tools.py +95 -74
  12. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/types.py +1 -0
  13. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/utils.py +48 -9
  14. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7/vectara_agentic.egg-info}/PKG-INFO +1 -1
  15. vectara_agentic-0.2.6/tests/test_agent.py +0 -158
  16. vectara_agentic-0.2.6/tests/test_agent_planning.py +0 -46
  17. vectara_agentic-0.2.6/tests/test_agent_type.py +0 -83
  18. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/LICENSE +0 -0
  19. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/MANIFEST.in +0 -0
  20. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/README.md +0 -0
  21. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/requirements.txt +0 -0
  22. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/setup.cfg +0 -0
  23. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/setup.py +0 -0
  24. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/tests/__init__.py +0 -0
  25. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/tests/endpoint.py +0 -0
  26. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/tests/test_workflow.py +0 -0
  27. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/__init__.py +0 -0
  28. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/_callback.py +0 -0
  29. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/_observability.py +0 -0
  30. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/_prompts.py +0 -0
  31. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/agent_config.py +0 -0
  32. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/agent_endpoint.py +0 -0
  33. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/db_tools.py +0 -0
  34. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic/tools_catalog.py +0 -0
  35. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic.egg-info/SOURCES.txt +0 -0
  36. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic.egg-info/dependency_links.txt +0 -0
  37. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic.egg-info/requires.txt +0 -0
  38. {vectara_agentic-0.2.6 → vectara_agentic-0.2.7}/vectara_agentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.6
3
+ Version: 0.2.7
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -0,0 +1,162 @@
1
+ import unittest
2
+ import threading
3
+ from datetime import date
4
+
5
+ from vectara_agentic.agent import _get_prompt, Agent, AgentType
6
+ from vectara_agentic.agent_config import AgentConfig
7
+ from vectara_agentic.types import ModelProvider, ObserverType
8
+ from vectara_agentic.tools import ToolsFactory
9
+
10
+ def mult(x: float, y: float) -> float:
11
+ return x * y
12
+
13
+
14
+ ARIZE_LOCK = threading.Lock()
15
+
16
+ class TestAgentPackage(unittest.TestCase):
17
+ def test_get_prompt(self):
18
+ prompt_template = "{chat_topic} on {today} with {custom_instructions}"
19
+ topic = "Programming"
20
+ custom_instructions = "Always do as your mother tells you!"
21
+ expected_output = (
22
+ "Programming on "
23
+ + date.today().strftime("%A, %B %d, %Y")
24
+ + " with Always do as your mother tells you!"
25
+ )
26
+ self.assertEqual(
27
+ _get_prompt(prompt_template, topic, custom_instructions), expected_output
28
+ )
29
+
30
+ def test_agent_init(self):
31
+ tools = [ToolsFactory().create_tool(mult)]
32
+ topic = "AI"
33
+ custom_instructions = "Always do as your mother tells you!"
34
+ agent = Agent(tools, topic, custom_instructions)
35
+ self.assertEqual(agent.agent_type, AgentType.OPENAI)
36
+ self.assertEqual(agent._topic, topic)
37
+ self.assertEqual(agent._custom_instructions, custom_instructions)
38
+
39
+ # To run this test, you must have OPENAI_API_KEY in your environment
40
+ self.assertEqual(
41
+ agent.chat(
42
+ "What is 5 times 10. Only give the answer, nothing else"
43
+ ).response.replace("$", "\\$"),
44
+ "50",
45
+ )
46
+
47
+ def test_agent_config(self):
48
+ with ARIZE_LOCK:
49
+ tools = [ToolsFactory().create_tool(mult)]
50
+ topic = "AI topic"
51
+ instructions = "Always do as your father tells you, if your mother agrees!"
52
+ config = AgentConfig(
53
+ agent_type=AgentType.REACT,
54
+ main_llm_provider=ModelProvider.ANTHROPIC,
55
+ main_llm_model_name="claude-3-5-sonnet-20241022",
56
+ tool_llm_provider=ModelProvider.TOGETHER,
57
+ tool_llm_model_name="meta-llama/Llama-3.3-70B-Instruct-Turbo",
58
+ observer=ObserverType.ARIZE_PHOENIX
59
+ )
60
+
61
+ agent = Agent(
62
+ tools=tools,
63
+ topic=topic,
64
+ custom_instructions=instructions,
65
+ agent_config=config
66
+ )
67
+ self.assertEqual(agent._topic, topic)
68
+ self.assertEqual(agent._custom_instructions, instructions)
69
+ self.assertEqual(agent.agent_type, AgentType.REACT)
70
+ self.assertEqual(agent.agent_config.observer, ObserverType.ARIZE_PHOENIX)
71
+ self.assertEqual(agent.agent_config.main_llm_provider, ModelProvider.ANTHROPIC)
72
+ self.assertEqual(agent.agent_config.tool_llm_provider, ModelProvider.TOGETHER)
73
+
74
+ # To run this test, you must have ANTHROPIC_API_KEY and TOGETHER_API_KEY in your environment
75
+ self.assertEqual(
76
+ agent.chat(
77
+ "What is 5 times 10. Only give the answer, nothing else"
78
+ ).response.replace("$", "\\$"),
79
+ "50",
80
+ )
81
+
82
+ def test_multiturn(self):
83
+ tools = [ToolsFactory().create_tool(mult)]
84
+ topic = "AI topic"
85
+ instructions = "Always do as your father tells you, if your mother agrees!"
86
+ agent = Agent(
87
+ tools=tools,
88
+ topic=topic,
89
+ custom_instructions=instructions,
90
+ )
91
+
92
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
93
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
94
+ res = agent.chat("multiply the results of the last two questions. Output only the answer.")
95
+ self.assertEqual(res.response, "1050")
96
+
97
+ def test_from_corpus(self):
98
+ agent = Agent.from_corpus(
99
+ tool_name="RAG Tool",
100
+ vectara_corpus_key="corpus_key",
101
+ vectara_api_key="api_key",
102
+ data_description="information",
103
+ assistant_specialty="question answering",
104
+ )
105
+
106
+ self.assertIsInstance(agent, Agent)
107
+ self.assertEqual(agent._topic, "question answering")
108
+
109
+ def test_serialization(self):
110
+ with ARIZE_LOCK:
111
+ config = AgentConfig(
112
+ agent_type=AgentType.REACT,
113
+ main_llm_provider=ModelProvider.ANTHROPIC,
114
+ tool_llm_provider=ModelProvider.TOGETHER,
115
+ observer=ObserverType.ARIZE_PHOENIX
116
+ )
117
+
118
+ agent = Agent.from_corpus(
119
+ tool_name="RAG Tool",
120
+ agent_config=config,
121
+ vectara_corpus_key="corpus_key",
122
+ vectara_api_key="api_key",
123
+ data_description="information",
124
+ assistant_specialty="question answering",
125
+ )
126
+
127
+ agent_reloaded = agent.loads(agent.dumps())
128
+ agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
129
+
130
+ self.assertIsInstance(agent_reloaded, Agent)
131
+ self.assertEqual(agent, agent_reloaded)
132
+ self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
133
+
134
+ self.assertEqual(agent.agent_config.observer, agent_reloaded.agent_config.observer)
135
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded.agent_config.main_llm_provider)
136
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded.agent_config.tool_llm_provider)
137
+
138
+ self.assertIsInstance(agent_reloaded, Agent)
139
+ self.assertEqual(agent, agent_reloaded_again)
140
+ self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
141
+
142
+ self.assertEqual(agent.agent_config.observer, agent_reloaded_again.agent_config.observer)
143
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded_again.agent_config.main_llm_provider)
144
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded_again.agent_config.tool_llm_provider)
145
+
146
+ def test_chat_history(self):
147
+ tools = [ToolsFactory().create_tool(mult)]
148
+ topic = "AI topic"
149
+ instructions = "Always do as your father tells you, if your mother agrees!"
150
+ agent = Agent(
151
+ tools=tools,
152
+ topic=topic,
153
+ custom_instructions=instructions,
154
+ chat_history=[("What is 5 times 10", "50"), ("What is 3 times 7", "21")]
155
+ )
156
+
157
+ res = agent.chat("multiply the results of the last two questions. Output only the answer.")
158
+ self.assertEqual(res.response, "1050")
159
+
160
+
161
+ if __name__ == "__main__":
162
+ unittest.main()
@@ -0,0 +1,73 @@
1
+ import unittest
2
+
3
+ from vectara_agentic.agent_config import AgentConfig
4
+ from vectara_agentic.agent import Agent
5
+ from vectara_agentic.tools import VectaraToolFactory
6
+
7
+ from pydantic import Field, BaseModel
8
+
9
+
10
+ # SETUP speical test account credentials for vectara
11
+ # It's okay to expose these credentials in the test code
12
+ vectara_corpus_key = "vectara-docs_1"
13
+ vectara_api_key = 'zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA'
14
+
15
+
16
+ class QueryArgs(BaseModel):
17
+ query: str = Field(..., description="The user query, always in the form of a question.")
18
+
19
+
20
+ vec_factory = VectaraToolFactory(vectara_api_key=vectara_api_key,
21
+ vectara_corpus_key=vectara_corpus_key)
22
+ summarizer = 'vectara-summary-table-md-query-ext-jan-2025-gpt-4o'
23
+ ask_vectara = vec_factory.create_rag_tool(
24
+ tool_name = "ask_vectara",
25
+ tool_description = "This tool can respond to questions about Vectara.",
26
+ tool_args_schema = QueryArgs,
27
+ reranker = "multilingual_reranker_v1", rerank_k = 100, rerank_cutoff = 0.1,
28
+ n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
29
+ summary_num_results = 10,
30
+ vectara_summarizer = summarizer,
31
+ include_citations = True,
32
+ verbose=False,
33
+ )
34
+
35
+ class TestAgentPlanningPackage(unittest.TestCase):
36
+
37
+ def test_no_planning(self):
38
+ tools = [ask_vectara]
39
+ topic = "vectara"
40
+ instructions = "Answer user queries about Vectara."
41
+
42
+ query = "What is Vectara and what demos are available of the Vectara platform?"
43
+ agent = Agent(
44
+ tools=tools,
45
+ topic=topic,
46
+ custom_instructions=instructions,
47
+ agent_config=AgentConfig(),
48
+ )
49
+ res = agent.chat(query)
50
+ self.assertIn("demos", res.response)
51
+ self.assertIn("Vectara", res.response)
52
+
53
+ def test_structured_planning(self):
54
+ tools = [ask_vectara]
55
+ topic = "vectara"
56
+ instructions = "Answer user queries about Vectara."
57
+
58
+ query = "What is Vectara and what demos are available of the Vectara platform?"
59
+ agent = Agent(
60
+ tools=tools,
61
+ topic=topic,
62
+ custom_instructions=instructions,
63
+ agent_config=AgentConfig(),
64
+ use_structured_planning=True,
65
+ )
66
+
67
+ res = agent.chat(query)
68
+ self.assertIn("demos", res.response)
69
+ self.assertIn("Vectara", res.response)
70
+
71
+
72
+ if __name__ == "__main__":
73
+ unittest.main()
@@ -0,0 +1,157 @@
1
+ import unittest
2
+
3
+ from vectara_agentic.agent import Agent, AgentType
4
+ from vectara_agentic.agent_config import AgentConfig
5
+ from vectara_agentic.tools import ToolsFactory
6
+ from vectara_agentic.types import ModelProvider
7
+
8
+ import nest_asyncio
9
+ nest_asyncio.apply()
10
+
11
+ def mult(x: float, y: float) -> float:
12
+ return x * y
13
+
14
+
15
+ react_config_anthropic = AgentConfig(
16
+ agent_type=AgentType.REACT,
17
+ main_llm_provider=ModelProvider.ANTHROPIC,
18
+ tool_llm_provider=ModelProvider.ANTHROPIC,
19
+ )
20
+
21
+ react_config_gemini = AgentConfig(
22
+ agent_type=AgentType.REACT,
23
+ main_llm_provider=ModelProvider.GEMINI,
24
+ tool_llm_provider=ModelProvider.GEMINI,
25
+ )
26
+
27
+ react_config_together = AgentConfig(
28
+ agent_type=AgentType.REACT,
29
+ main_llm_provider=ModelProvider.TOGETHER,
30
+ tool_llm_provider=ModelProvider.TOGETHER,
31
+ )
32
+
33
+ fc_config_anthropic = AgentConfig(
34
+ agent_type=AgentType.FUNCTION_CALLING,
35
+ main_llm_provider=ModelProvider.ANTHROPIC,
36
+ tool_llm_provider=ModelProvider.ANTHROPIC,
37
+ )
38
+
39
+ fc_config_gemini = AgentConfig(
40
+ agent_type=AgentType.FUNCTION_CALLING,
41
+ main_llm_provider=ModelProvider.GEMINI,
42
+ tool_llm_provider=ModelProvider.GEMINI,
43
+ )
44
+
45
+ fc_config_together = AgentConfig(
46
+ agent_type=AgentType.FUNCTION_CALLING,
47
+ main_llm_provider=ModelProvider.TOGETHER,
48
+ tool_llm_provider=ModelProvider.TOGETHER,
49
+ )
50
+
51
+
52
+ openai_config = AgentConfig(
53
+ agent_type=AgentType.OPENAI,
54
+ )
55
+
56
+ class TestAgentType(unittest.TestCase):
57
+
58
+ def test_openai(self):
59
+ tools = [ToolsFactory().create_tool(mult)]
60
+ topic = "AI topic"
61
+ instructions = "Always do as your father tells you, if your mother agrees!"
62
+ agent = Agent(
63
+ agent_config=openai_config,
64
+ tools=tools,
65
+ topic=topic,
66
+ custom_instructions=instructions,
67
+ )
68
+
69
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
70
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
71
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
72
+ self.assertIn("1050", res.response)
73
+
74
+ def test_gemini(self):
75
+ tools = [ToolsFactory().create_tool(mult)]
76
+ topic = "AI topic"
77
+ instructions = "Always do as your father tells you, if your mother agrees!"
78
+
79
+ agent = Agent(
80
+ agent_config=react_config_gemini,
81
+ tools=tools,
82
+ topic=topic,
83
+ custom_instructions=instructions,
84
+ )
85
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
86
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
87
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
88
+ self.assertIn("1050", res.response)
89
+
90
+ agent = Agent(
91
+ agent_config=fc_config_gemini,
92
+ tools=tools,
93
+ topic=topic,
94
+ custom_instructions=instructions,
95
+ )
96
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
97
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
98
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
99
+ self.assertIn("1050", res.response)
100
+
101
+ def test_together(self):
102
+ tools = [ToolsFactory().create_tool(mult)]
103
+ topic = "AI topic"
104
+ instructions = "Always do as your father tells you, if your mother agrees!"
105
+
106
+ agent = Agent(
107
+ agent_config=react_config_together,
108
+ tools=tools,
109
+ topic=topic,
110
+ custom_instructions=instructions,
111
+ )
112
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
113
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
114
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
115
+ self.assertIn("1050", res.response)
116
+
117
+ agent = Agent(
118
+ agent_config=fc_config_together,
119
+ tools=tools,
120
+ topic=topic,
121
+ custom_instructions=instructions,
122
+ )
123
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
124
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
125
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
126
+ self.assertIn("1050", res.response)
127
+
128
+ def test_anthropic(self):
129
+ tools = [ToolsFactory().create_tool(mult)]
130
+ topic = "AI topic"
131
+ instructions = "Always do as your father tells you, if your mother agrees!"
132
+
133
+ agent = Agent(
134
+ agent_config=react_config_anthropic,
135
+ tools=tools,
136
+ topic=topic,
137
+ custom_instructions=instructions,
138
+ )
139
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
140
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
141
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
142
+ self.assertIn("1050", res.response)
143
+
144
+ agent = Agent(
145
+ agent_config=fc_config_anthropic,
146
+ tools=tools,
147
+ topic=topic,
148
+ custom_instructions=instructions,
149
+ )
150
+ agent.chat("What is 5 times 10. Only give the answer, nothing else")
151
+ agent.chat("what is 3 times 7. Only give the answer, nothing else")
152
+ res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
153
+ self.assertIn("1050", res.response)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ unittest.main()
@@ -40,8 +40,8 @@ class TestFallback(unittest.TestCase):
40
40
  cls.flask_process.send_signal(signal.SIGINT)
41
41
  cls.flask_process.wait()
42
42
 
43
- def test_fallback(self):
44
- def mult(x, y):
43
+ def test_fallback_from_private(self):
44
+ def mult(x: float, y: float) -> float:
45
45
  return x * y
46
46
 
47
47
  tools = [ToolsFactory().create_tool(mult)]
@@ -41,7 +41,7 @@ class TestPrivateLLM(unittest.TestCase):
41
41
  cls.flask_process.wait()
42
42
 
43
43
  def test_endpoint(self):
44
- def mult(x, y):
44
+ def mult(x: float, y: float) -> float:
45
45
  return x * y
46
46
 
47
47
  tools = [ToolsFactory().create_tool(mult)]
@@ -47,7 +47,7 @@ class TestToolsPackage(unittest.TestCase):
47
47
  self.assertEqual(search_tool.metadata.tool_type, ToolType.QUERY)
48
48
 
49
49
  def test_tool_factory(self):
50
- def mult(x, y):
50
+ def mult(x: float, y: float) -> float:
51
51
  return x * y
52
52
 
53
53
  tools_factory = ToolsFactory()
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Define the version of the package.
3
3
  """
4
- __version__ = "0.2.6"
4
+ __version__ = "0.2.7"
@@ -21,7 +21,7 @@ from pydantic import Field, create_model, ValidationError
21
21
  from llama_index.core.memory import ChatMemoryBuffer
22
22
  from llama_index.core.llms import ChatMessage, MessageRole
23
23
  from llama_index.core.tools import FunctionTool
24
- from llama_index.core.agent import ReActAgent, StructuredPlannerAgent
24
+ from llama_index.core.agent import ReActAgent, StructuredPlannerAgent, FunctionCallingAgent
25
25
  from llama_index.core.agent.react.formatter import ReActChatFormatter
26
26
  from llama_index.agent.llm_compiler import LLMCompilerAgentWorker
27
27
  from llama_index.agent.lats import LATSAgentWorker
@@ -277,7 +277,19 @@ class Agent:
277
277
  llm = get_llm(LLMRole.MAIN, config=config)
278
278
  llm.callback_manager = llm_callback_manager
279
279
 
280
- if agent_type == AgentType.REACT:
280
+ if agent_type == AgentType.FUNCTION_CALLING:
281
+ prompt = _get_prompt(GENERAL_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
282
+ agent = FunctionCallingAgent.from_tools(
283
+ tools=self.tools,
284
+ llm=llm,
285
+ memory=self.memory,
286
+ verbose=self.verbose,
287
+ max_function_calls=config.max_reasoning_steps,
288
+ callback_manager=llm_callback_manager,
289
+ system_prompt = prompt,
290
+ allow_parallel_tool_calls=True,
291
+ )
292
+ elif agent_type == AgentType.REACT:
281
293
  prompt = _get_prompt(REACT_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
282
294
  agent = ReActAgent.from_tools(
283
295
  tools=self.tools,
@@ -295,7 +307,7 @@ class Agent:
295
307
  llm=llm,
296
308
  memory=self.memory,
297
309
  verbose=self.verbose,
298
- callable_manager=llm_callback_manager,
310
+ callback_manager=llm_callback_manager,
299
311
  max_function_calls=config.max_reasoning_steps,
300
312
  system_prompt=prompt,
301
313
  )
@@ -304,7 +316,7 @@ class Agent:
304
316
  tools=self.tools,
305
317
  llm=llm,
306
318
  verbose=self.verbose,
307
- callable_manager=llm_callback_manager,
319
+ callback_manager=llm_callback_manager,
308
320
  )
309
321
  agent_worker.system_prompt = _get_prompt(
310
322
  _get_llm_compiler_prompt(agent_worker.system_prompt, self._topic, self._custom_instructions),
@@ -322,7 +334,7 @@ class Agent:
322
334
  num_expansions=3,
323
335
  max_rollouts=-1,
324
336
  verbose=self.verbose,
325
- callable_manager=llm_callback_manager,
337
+ callback_manager=llm_callback_manager,
326
338
  )
327
339
  prompt = _get_prompt(REACT_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
328
340
  agent_worker.chat_formatter = ReActChatFormatter(system_header=prompt)
@@ -707,6 +719,8 @@ class Agent:
707
719
  """
708
720
  max_attempts = 4 if self.fallback_agent_config else 2
709
721
  attempt = 0
722
+ orig_llm = self.llm.metadata.model_name
723
+ last_error = None
710
724
  while attempt < max_attempts:
711
725
  try:
712
726
  current_agent = self._get_current_agent()
@@ -718,16 +732,20 @@ class Agent:
718
732
  self.query_logging_callback(prompt, agent_response.response)
719
733
  return agent_response
720
734
 
721
- except Exception:
735
+ except Exception as e:
736
+ last_error = e
722
737
  if attempt >= 2:
723
738
  if self.verbose:
724
- print(f"LLM call failed on attempt {attempt+1}. Switching agent configuration.")
739
+ print(f"LLM call failed on attempt {attempt}. Switching agent configuration.")
725
740
  self._switch_agent_config()
726
741
  time.sleep(1)
727
742
  attempt += 1
728
743
 
729
744
  return AgentResponse(
730
- response=f"LLM failure can't be resolved after {max_attempts} attempts."
745
+ response=(
746
+ f"For {orig_llm} LLM - failure can't be resolved after "
747
+ f"{max_attempts} attempts ({last_error}."
748
+ )
731
749
  )
732
750
 
733
751
  def stream_chat(self, prompt: str) -> AgentStreamingResponse: # type: ignore
@@ -252,8 +252,10 @@ class SequentialSubQuestionsWorkflow(Workflow):
252
252
  As an example, for the question
253
253
  "what is the name of the mayor of the largest city within 50 miles of San Francisco?",
254
254
  the sub-questions could be:
255
- - What is the largest city within 50 miles of San Francisco? (answer is San Jose)
256
- - What is the name of the mayor of San Jose?
255
+ - What is the largest city within 50 miles of San Francisco?
256
+ - Who is the mayor of this city?
257
+ The answer to the first question is San Jose, which is given as context to the second question.
258
+ The answer to the second question is Matt Mahan.
257
259
  Here is the user question: {await ctx.get('original_query')}.
258
260
  Here are previous chat messages: {chat_history}.
259
261
  And here is the list of tools: {await ctx.get('tools')}
@@ -277,7 +279,16 @@ class SequentialSubQuestionsWorkflow(Workflow):
277
279
  if await ctx.get("verbose"):
278
280
  print(f"Sub-question is {ev.question}")
279
281
  agent = await ctx.get("agent")
280
- response = await agent.achat(ev.question)
282
+ sub_questions = await ctx.get("sub_questions")
283
+ if ev.prev_answer:
284
+ prev_question = sub_questions[ev.num - 1]
285
+ prompt = f"""
286
+ The answer to the question '{prev_question}' is: '{ev.prev_answer}'
287
+ Now answer the following question: '{ev.question}'
288
+ """
289
+ response = await agent.achat(prompt)
290
+ else:
291
+ response = await agent.achat(ev.question)
281
292
  if await ctx.get("verbose"):
282
293
  print(f"Answer is {response}")
283
294
 
@@ -286,7 +297,8 @@ class SequentialSubQuestionsWorkflow(Workflow):
286
297
  return self.QueryEvent(
287
298
  question=sub_questions[ev.num + 1],
288
299
  prev_answer = response.response,
289
- num=ev.num + 1)
300
+ num=ev.num + 1
301
+ )
290
302
 
291
303
  output = self.OutputsModel(response=response.response)
292
304
  return StopEvent(result=output)
@@ -8,7 +8,7 @@ import importlib
8
8
  import os
9
9
 
10
10
  from typing import Callable, List, Dict, Any, Optional, Union, Type
11
- from pydantic import BaseModel, Field
11
+ from pydantic import BaseModel, Field, create_model
12
12
  from pydantic_core import PydanticUndefined
13
13
 
14
14
  from llama_index.core.tools import FunctionTool
@@ -21,7 +21,7 @@ from llama_index.core.workflow.context import Context
21
21
  from .types import ToolType
22
22
  from .tools_catalog import ToolsCatalog, get_bad_topics
23
23
  from .db_tools import DBLoadSampleData, DBLoadUniqueValues, DBLoadData
24
- from .utils import is_float
24
+ from .utils import is_float, summarize_vectara_document
25
25
  from .agent_config import AgentConfig
26
26
 
27
27
  LI_packages = {
@@ -74,6 +74,7 @@ class VectaraToolMetadata(ToolMetadata):
74
74
  base_repr = super().__repr__()
75
75
  return f"{base_repr}, tool_type={self.tool_type}"
76
76
 
77
+
77
78
  class VectaraTool(FunctionTool):
78
79
  """
79
80
  A subclass of FunctionTool adding the tool_type attribute.
@@ -161,7 +162,7 @@ class VectaraTool(FunctionTool):
161
162
  self, *args: Any, ctx: Optional[Context] = None, **kwargs: Any
162
163
  ) -> ToolOutput:
163
164
  try:
164
- return super().call(*args, ctx=ctx, **kwargs)
165
+ return await super().acall(*args, ctx=ctx, **kwargs)
165
166
  except Exception as e:
166
167
  err_output = ToolOutput(
167
168
  tool_name=self.metadata.name,
@@ -171,6 +172,65 @@ class VectaraTool(FunctionTool):
171
172
  )
172
173
  return err_output
173
174
 
175
+ def _create_tool_from_dynamic_function(
176
+ function: Callable[..., ToolOutput],
177
+ tool_name: str,
178
+ tool_description: str,
179
+ base_params: list[inspect.Parameter],
180
+ tool_args_schema: type[BaseModel],
181
+ ) -> VectaraTool:
182
+ """
183
+ Create a VectaraTool from a dynamic function, including
184
+ setting the function signature and creating the tool schema.
185
+ """
186
+ fields = {}
187
+ for param in base_params:
188
+ default_value = param.default if param.default != inspect.Parameter.empty else ...
189
+ fields[param.name] = (param.annotation, default_value)
190
+ for field_name, field_info in tool_args_schema.model_fields.items():
191
+ if field_name not in fields:
192
+ default_value = field_info.default if field_info.default is not PydanticUndefined else ...
193
+ fields[field_name] = (field_info.annotation, default_value)
194
+ fn_schema = create_model(f"{tool_name}", **fields)
195
+
196
+ schema_params = [
197
+ inspect.Parameter(
198
+ name=field_name,
199
+ kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
200
+ default=field_info.default if field_info.default is not PydanticUndefined else inspect.Parameter.empty,
201
+ annotation=field_info.annotation if hasattr(field_info, 'annotation') else field_info,
202
+ )
203
+ for field_name, field_info in tool_args_schema.model_fields.items()
204
+ if field_name not in [p.name for p in base_params]
205
+ ]
206
+ all_params = base_params + schema_params
207
+
208
+ required_params = [p for p in all_params if p.default is inspect.Parameter.empty]
209
+ optional_params = [p for p in all_params if p.default is not inspect.Parameter.empty]
210
+ sig = inspect.Signature(required_params + optional_params)
211
+ function.__signature__ = sig
212
+ function.__annotations__["return"] = dict[str, Any]
213
+ function.__name__ = "_" + re.sub(r"[^A-Za-z0-9_]", "_", tool_name)
214
+
215
+ # Create the tool function signature string
216
+ param_strs = []
217
+ for param in all_params:
218
+ annotation = param.annotation
219
+ type_name = annotation.__name__ if hasattr(annotation, '__name__') else str(annotation)
220
+ param_strs.append(f"{param.name}: {type_name}")
221
+ args_str = ", ".join(param_strs)
222
+ function_str = f"{tool_name}({args_str}) -> str"
223
+
224
+ # Create the tool
225
+ tool = VectaraTool.from_defaults(
226
+ fn=function,
227
+ name=tool_name,
228
+ description=function_str + "\n" + tool_description,
229
+ fn_schema=fn_schema,
230
+ tool_type=ToolType.QUERY,
231
+ )
232
+ return tool
233
+
174
234
  def _build_filter_string(kwargs: Dict[str, Any], tool_args_type: Dict[str, dict], fixed_filter: str) -> str:
175
235
  """
176
236
  Build filter string for Vectara from kwargs
@@ -196,7 +256,7 @@ def _build_filter_string(kwargs: Dict[str, Any], tool_args_type: Dict[str, dict]
196
256
 
197
257
  if value is PydanticUndefined:
198
258
  raise ValueError(
199
- f"Value of argument {key} is undefined, and this is invalid. "
259
+ f"Value of argument {key} is undefined, and this is invalid."
200
260
  "Please form proper arguments and try again."
201
261
  )
202
262
 
@@ -394,7 +454,7 @@ class VectaraToolFactory:
394
454
  )
395
455
 
396
456
  # Dynamically generate the search function
397
- def search_function(*args, **kwargs) -> ToolOutput:
457
+ def search_function(*args: Any, **kwargs: Any) -> ToolOutput:
398
458
  """
399
459
  Dynamically generated function for semantic search Vectara.
400
460
  """
@@ -406,6 +466,7 @@ class VectaraToolFactory:
406
466
 
407
467
  query = kwargs.pop("query")
408
468
  top_k = kwargs.pop("top_k", 10)
469
+ summarize = kwargs.pop("summarize", True)
409
470
  try:
410
471
  filter_string = _build_filter_string(kwargs, tool_args_type, fixed_filter)
411
472
  except ValueError as e:
@@ -453,7 +514,11 @@ class VectaraToolFactory:
453
514
  if doc.id_ in unique_ids:
454
515
  continue
455
516
  unique_ids.add(doc.id_)
456
- tool_output += f"document_id: '{doc.id_}'\nmetadata: '{doc.metadata}'\n"
517
+ if summarize:
518
+ summary = summarize_vectara_document(self.vectara_corpus_key, self.vectara_api_key, doc.id_)
519
+ tool_output += f"document_id: '{doc.id_}'\nmetadata: '{doc.metadata}'\nsummary: '{summary}'\n\n"
520
+ else:
521
+ tool_output += f"document_id: '{doc.id_}'\nmetadata: '{doc.metadata}'\n\n"
457
522
  out = ToolOutput(
458
523
  tool_name=search_function.__name__,
459
524
  content=tool_output,
@@ -462,42 +527,22 @@ class VectaraToolFactory:
462
527
  )
463
528
  return out
464
529
 
465
- fields = tool_args_schema.model_fields
466
- params = [
467
- inspect.Parameter(
468
- name=field_name,
469
- kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
470
- default=field_info.default,
471
- annotation=field_info,
472
- )
473
- for field_name, field_info in fields.items()
530
+ base_params = [
531
+ inspect.Parameter("query", inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str),
532
+ inspect.Parameter("top_k", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=10, annotation=int),
533
+ inspect.Parameter("summarize", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=True, annotation=bool),
474
534
  ]
475
-
476
- # Create a new signature using the extracted parameters
477
- sig = inspect.Signature(params)
478
- search_function.__signature__ = sig
479
- search_function.__annotations__["return"] = dict[str, Any]
480
- search_function.__name__ = "_" + re.sub(r"[^A-Za-z0-9_]", "_", tool_name)
481
-
482
- # Create the tool function signature string
483
- fields = []
484
- for name, field in tool_args_schema.model_fields.items():
485
- annotation = field.annotation
486
- type_name = annotation.__name__ if hasattr(annotation, '__name__') else str(annotation)
487
- fields.append(f"{name}: {type_name}")
488
- args_str = ", ".join(fields)
489
- function_str = f"{tool_name}({args_str}) -> str"
490
-
491
- # Create the tool
492
- search_tool_extra_desc = """
493
- The response includes metadata about each relevant document, but NOT the text itself.
535
+ search_tool_extra_desc = tool_description + "\n" + """
536
+ The response includes metadata about each relevant document.
537
+ If summarize=True, it also includes a summary of each document.
494
538
  """
495
- tool = VectaraTool.from_defaults(
496
- fn=search_function,
497
- name=tool_name,
498
- description=function_str + "\n" + tool_description + '\n' + search_tool_extra_desc,
499
- fn_schema=tool_args_schema,
500
- tool_type=ToolType.QUERY,
539
+
540
+ tool = _create_tool_from_dynamic_function(
541
+ search_function,
542
+ tool_name,
543
+ search_tool_extra_desc,
544
+ base_params,
545
+ tool_args_schema,
501
546
  )
502
547
  return tool
503
548
 
@@ -597,11 +642,11 @@ class VectaraToolFactory:
597
642
  vectara_corpus_key=self.vectara_corpus_key,
598
643
  x_source_str="vectara-agentic",
599
644
  vectara_base_url=vectara_base_url,
600
- vetara_verify_ssl=vectara_verify_ssl,
645
+ vectara_verify_ssl=vectara_verify_ssl,
601
646
  )
602
647
 
603
648
  # Dynamically generate the RAG function
604
- def rag_function(*args, **kwargs) -> ToolOutput:
649
+ def rag_function(*args: Any, **kwargs: Any) -> ToolOutput:
605
650
  """
606
651
  Dynamically generated function for RAG query with Vectara.
607
652
  """
@@ -721,39 +766,15 @@ class VectaraToolFactory:
721
766
  )
722
767
  return out
723
768
 
724
- fields = tool_args_schema.model_fields
725
- params = [
726
- inspect.Parameter(
727
- name=field_name,
728
- kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
729
- default=field_info.default,
730
- annotation=field_info,
731
- )
732
- for field_name, field_info in fields.items()
769
+ base_params = [
770
+ inspect.Parameter("query", inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str),
733
771
  ]
734
-
735
- # Create a new signature using the extracted parameters
736
- sig = inspect.Signature(params)
737
- rag_function.__signature__ = sig
738
- rag_function.__annotations__["return"] = dict[str, Any]
739
- rag_function.__name__ = "_" + re.sub(r"[^A-Za-z0-9_]", "_", tool_name)
740
-
741
- # Create the tool function signature string
742
- fields = []
743
- for name, field in tool_args_schema.model_fields.items():
744
- annotation = field.annotation
745
- type_name = annotation.__name__ if hasattr(annotation, '__name__') else str(annotation)
746
- fields.append(f"{name}: {type_name}")
747
- args_str = ", ".join(fields)
748
- function_str = f"{tool_name}({args_str}) -> str"
749
-
750
- # Create the tool
751
- tool = VectaraTool.from_defaults(
752
- fn=rag_function,
753
- name=tool_name,
754
- description=function_str + ". " + tool_description,
755
- fn_schema=tool_args_schema,
756
- tool_type=ToolType.QUERY,
772
+ tool = _create_tool_from_dynamic_function(
773
+ rag_function,
774
+ tool_name,
775
+ tool_description,
776
+ base_params,
777
+ tool_args_schema,
757
778
  )
758
779
  return tool
759
780
 
@@ -12,6 +12,7 @@ class AgentType(Enum):
12
12
 
13
13
  REACT = "REACT"
14
14
  OPENAI = "OPENAI"
15
+ FUNCTION_CALLING = "FUNCTION_CALLING"
15
16
  LLMCOMPILER = "LLMCOMPILER"
16
17
  LATS = "LATS"
17
18
 
@@ -5,6 +5,8 @@ Utilities for the Vectara agentic.
5
5
  from typing import Tuple, Callable, Optional
6
6
  from functools import lru_cache
7
7
  from inspect import signature
8
+ import json
9
+ import requests
8
10
 
9
11
  import tiktoken
10
12
 
@@ -90,35 +92,50 @@ def get_llm(
90
92
  Get the LLM for the specified role, using the provided config
91
93
  or a default if none is provided.
92
94
  """
95
+ max_tokens = 8192
93
96
  model_provider, model_name = _get_llm_params_for_role(role, config)
94
97
  if model_provider == ModelProvider.OPENAI:
95
98
  llm = OpenAI(model=model_name, temperature=0,
96
99
  is_function_calling_model=True,
97
- strict=True)
100
+ strict=True,
101
+ max_tokens=max_tokens
102
+ )
98
103
  elif model_provider == ModelProvider.ANTHROPIC:
99
- llm = Anthropic(model=model_name, temperature=0)
104
+ llm = Anthropic(model=model_name, temperature=0, max_tokens=max_tokens)
100
105
  elif model_provider == ModelProvider.GEMINI:
101
106
  from llama_index.llms.gemini import Gemini
102
- llm = Gemini(model=model_name, temperature=0, is_function_calling_model=True)
107
+ llm = Gemini(
108
+ model=model_name, temperature=0,
109
+ is_function_calling_model=True,
110
+ max_tokens=max_tokens
111
+ )
103
112
  elif model_provider == ModelProvider.TOGETHER:
104
113
  from llama_index.llms.together import TogetherLLM
105
- llm = TogetherLLM(model=model_name, temperature=0, is_function_calling_model=True)
114
+ llm = TogetherLLM(
115
+ model=model_name, temperature=0,
116
+ is_function_calling_model=True,
117
+ max_tokens=max_tokens
118
+ )
106
119
  elif model_provider == ModelProvider.GROQ:
107
120
  from llama_index.llms.groq import Groq
108
- llm = Groq(model=model_name, temperature=0, is_function_calling_model=True)
121
+ llm = Groq(
122
+ model=model_name, temperature=0,
123
+ is_function_calling_model=True, max_tokens=max_tokens
124
+ )
109
125
  elif model_provider == ModelProvider.FIREWORKS:
110
126
  from llama_index.llms.fireworks import Fireworks
111
- llm = Fireworks(model=model_name, temperature=0)
127
+ llm = Fireworks(model=model_name, temperature=0, max_tokens=max_tokens)
112
128
  elif model_provider == ModelProvider.BEDROCK:
113
129
  from llama_index.llms.bedrock import Bedrock
114
- llm = Bedrock(model=model_name, temperature=0)
130
+ llm = Bedrock(model=model_name, temperature=0, max_tokens=max_tokens)
115
131
  elif model_provider == ModelProvider.COHERE:
116
132
  from llama_index.llms.cohere import Cohere
117
- llm = Cohere(model=model_name, temperature=0)
133
+ llm = Cohere(model=model_name, temperature=0, max_tokens=max_tokens)
118
134
  elif model_provider == ModelProvider.PRIVATE:
119
135
  from llama_index.llms.openai_like import OpenAILike
120
136
  llm = OpenAILike(model=model_name, temperature=0, is_function_calling_model=True,is_chat_model=True,
121
- api_base=config.private_llm_api_base, api_key=config.private_llm_api_key)
137
+ api_base=config.private_llm_api_base, api_key=config.private_llm_api_key,
138
+ max_tokens=max_tokens)
122
139
  else:
123
140
  raise ValueError(f"Unknown LLM provider: {model_provider}")
124
141
  return llm
@@ -141,3 +158,25 @@ def remove_self_from_signature(func):
141
158
  new_sig = sig.replace(parameters=params)
142
159
  func.__signature__ = new_sig
143
160
  return func
161
+
162
+ def summarize_vectara_document(corpus_key: str, vectara_api_key, doc_id: str) -> str:
163
+ """
164
+ Summarize a document in a Vectara corpus using the Vectara API.
165
+ """
166
+ url = f"https://api.vectara.io/v2/corpora/{corpus_key}/documents/{doc_id}/summarize"
167
+
168
+ payload = json.dumps({
169
+ "llm_name": "gpt-4o",
170
+ "model_parameters": {},
171
+ "stream_response": False
172
+ })
173
+ headers = {
174
+ 'Content-Type': 'application/json',
175
+ 'Accept': 'application/json',
176
+ 'x-api-key': vectara_api_key
177
+ }
178
+
179
+ response = requests.request("POST", url, headers=headers, data=payload, timeout=60)
180
+ if response.status_code != 200:
181
+ return f"Vectara Summarization failed with error code {response.status_code}, error={response.text}"
182
+ return json.loads(response.text)["summary"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.6
3
+ Version: 0.2.7
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -1,158 +0,0 @@
1
- import unittest
2
- from datetime import date
3
-
4
- from vectara_agentic.agent import _get_prompt, Agent, AgentType
5
- from vectara_agentic.agent_config import AgentConfig
6
- from vectara_agentic.types import ModelProvider, ObserverType
7
- from vectara_agentic.tools import ToolsFactory
8
-
9
- def mult(x, y):
10
- return x * y
11
-
12
- class TestAgentPackage(unittest.TestCase):
13
- def test_get_prompt(self):
14
- prompt_template = "{chat_topic} on {today} with {custom_instructions}"
15
- topic = "Programming"
16
- custom_instructions = "Always do as your mother tells you!"
17
- expected_output = (
18
- "Programming on "
19
- + date.today().strftime("%A, %B %d, %Y")
20
- + " with Always do as your mother tells you!"
21
- )
22
- self.assertEqual(
23
- _get_prompt(prompt_template, topic, custom_instructions), expected_output
24
- )
25
-
26
- def test_agent_init(self):
27
- tools = [ToolsFactory().create_tool(mult)]
28
- topic = "AI"
29
- custom_instructions = "Always do as your mother tells you!"
30
- agent = Agent(tools, topic, custom_instructions)
31
- self.assertEqual(agent.agent_type, AgentType.OPENAI)
32
- self.assertEqual(agent._topic, topic)
33
- self.assertEqual(agent._custom_instructions, custom_instructions)
34
-
35
- # To run this test, you must have OPENAI_API_KEY in your environment
36
- self.assertEqual(
37
- agent.chat(
38
- "What is 5 times 10. Only give the answer, nothing else"
39
- ).response.replace("$", "\\$"),
40
- "50",
41
- )
42
-
43
- def test_agent_config(self):
44
- tools = [ToolsFactory().create_tool(mult)]
45
- topic = "AI topic"
46
- instructions = "Always do as your father tells you, if your mother agrees!"
47
- config = AgentConfig(
48
- agent_type=AgentType.REACT,
49
- main_llm_provider=ModelProvider.ANTHROPIC,
50
- main_llm_model_name="claude-3-5-sonnet-20241022",
51
- tool_llm_provider=ModelProvider.TOGETHER,
52
- tool_llm_model_name="meta-llama/Llama-3.3-70B-Instruct-Turbo",
53
- observer=ObserverType.ARIZE_PHOENIX
54
- )
55
-
56
- agent = Agent(
57
- tools=tools,
58
- topic=topic,
59
- custom_instructions=instructions,
60
- agent_config=config
61
- )
62
- self.assertEqual(agent._topic, topic)
63
- self.assertEqual(agent._custom_instructions, instructions)
64
- self.assertEqual(agent.agent_type, AgentType.REACT)
65
- self.assertEqual(agent.agent_config.observer, ObserverType.ARIZE_PHOENIX)
66
- self.assertEqual(agent.agent_config.main_llm_provider, ModelProvider.ANTHROPIC)
67
- self.assertEqual(agent.agent_config.tool_llm_provider, ModelProvider.TOGETHER)
68
-
69
- # To run this test, you must have ANTHROPIC_API_KEY and TOGETHER_API_KEY in your environment
70
- self.assertEqual(
71
- agent.chat(
72
- "What is 5 times 10. Only give the answer, nothing else"
73
- ).response.replace("$", "\\$"),
74
- "50",
75
- )
76
-
77
- def test_multiturn(self):
78
- tools = [ToolsFactory().create_tool(mult)]
79
- topic = "AI topic"
80
- instructions = "Always do as your father tells you, if your mother agrees!"
81
- agent = Agent(
82
- tools=tools,
83
- topic=topic,
84
- custom_instructions=instructions,
85
- )
86
-
87
- agent.chat("What is 5 times 10. Only give the answer, nothing else")
88
- agent.chat("what is 3 times 7. Only give the answer, nothing else")
89
- res = agent.chat("multiply the results of the last two questions. Output only the answer.")
90
- self.assertEqual(res.response, "1050")
91
-
92
- def test_from_corpus(self):
93
- agent = Agent.from_corpus(
94
- tool_name="RAG Tool",
95
- vectara_corpus_key="corpus_key",
96
- vectara_api_key="api_key",
97
- data_description="information",
98
- assistant_specialty="question answering",
99
- )
100
-
101
- self.assertIsInstance(agent, Agent)
102
- self.assertEqual(agent._topic, "question answering")
103
-
104
- def test_serialization(self):
105
- config = AgentConfig(
106
- agent_type=AgentType.REACT,
107
- main_llm_provider=ModelProvider.ANTHROPIC,
108
- main_llm_model_name="claude-3-5-sonnet-20241022",
109
- tool_llm_provider=ModelProvider.TOGETHER,
110
- tool_llm_model_name="meta-llama/Llama-3.3-70B-Instruct-Turbo",
111
- observer=ObserverType.ARIZE_PHOENIX
112
- )
113
-
114
- agent = Agent.from_corpus(
115
- tool_name="RAG Tool",
116
- agent_config=config,
117
- vectara_corpus_key="corpus_key",
118
- vectara_api_key="api_key",
119
- data_description="information",
120
- assistant_specialty="question answering",
121
- )
122
-
123
- agent_reloaded = agent.loads(agent.dumps())
124
- agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
125
-
126
- self.assertIsInstance(agent_reloaded, Agent)
127
- self.assertEqual(agent, agent_reloaded)
128
- self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
129
-
130
- self.assertEqual(agent.agent_config.observer, agent_reloaded.agent_config.observer)
131
- self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded.agent_config.main_llm_provider)
132
- self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded.agent_config.tool_llm_provider)
133
-
134
- self.assertIsInstance(agent_reloaded, Agent)
135
- self.assertEqual(agent, agent_reloaded_again)
136
- self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
137
-
138
- self.assertEqual(agent.agent_config.observer, agent_reloaded_again.agent_config.observer)
139
- self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded_again.agent_config.main_llm_provider)
140
- self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded_again.agent_config.tool_llm_provider)
141
-
142
- def test_chat_history(self):
143
- tools = [ToolsFactory().create_tool(mult)]
144
- topic = "AI topic"
145
- instructions = "Always do as your father tells you, if your mother agrees!"
146
- agent = Agent(
147
- tools=tools,
148
- topic=topic,
149
- custom_instructions=instructions,
150
- chat_history=[("What is 5 times 10", "50"), ("What is 3 times 7", "21")]
151
- )
152
-
153
- res = agent.chat("multiply the results of the last two questions. Output only the answer.")
154
- self.assertEqual(res.response, "1050")
155
-
156
-
157
- if __name__ == "__main__":
158
- unittest.main()
@@ -1,46 +0,0 @@
1
- import unittest
2
-
3
- from vectara_agentic.agent import Agent
4
- from vectara_agentic.agent_config import AgentConfig
5
- from vectara_agentic.tools import ToolsFactory
6
-
7
- def mult(x, y):
8
- return x * y
9
-
10
- def addition(x, y):
11
- return x + y
12
-
13
- class TestAgentPlanningPackage(unittest.TestCase):
14
-
15
- def test_no_planning(self):
16
- tools = [ToolsFactory().create_tool(mult)]
17
- topic = "AI topic"
18
- instructions = "Always do as your father tells you, if your mother agrees!"
19
- agent = Agent(
20
- tools=tools,
21
- topic=topic,
22
- custom_instructions=instructions,
23
- agent_config = AgentConfig()
24
- )
25
-
26
- res = agent.chat("If you multiply 5 times 7, then 3 times 2, and add the results - what do you get?")
27
- self.assertIn("41", res.response)
28
-
29
- def test_structured_planning(self):
30
- tools = [ToolsFactory().create_tool(mult), ToolsFactory().create_tool(addition)]
31
- topic = "AI topic"
32
- instructions = "Always do as your father tells you, if your mother agrees!"
33
- agent = Agent(
34
- tools=tools,
35
- topic=topic,
36
- custom_instructions=instructions,
37
- agent_config = AgentConfig(),
38
- use_structured_planning = True,
39
- )
40
-
41
- res = agent.chat("If you multiply 5 times 7, then 3 times 2, and add the results - what do you get?")
42
- self.assertIn("41", res.response)
43
-
44
-
45
- if __name__ == "__main__":
46
- unittest.main()
@@ -1,83 +0,0 @@
1
- import unittest
2
-
3
- from vectara_agentic.agent import Agent, AgentType
4
- from vectara_agentic.agent_config import AgentConfig
5
- from vectara_agentic.tools import ToolsFactory
6
- from vectara_agentic.types import ModelProvider
7
-
8
- import nest_asyncio
9
- nest_asyncio.apply()
10
- def mult(x, y):
11
- return x * y
12
-
13
-
14
- react_config_1 = AgentConfig(
15
- agent_type=AgentType.REACT,
16
- main_llm_provider=ModelProvider.ANTHROPIC,
17
- main_llm_model_name="claude-3-7-sonnet-20250219",
18
- tool_llm_provider=ModelProvider.TOGETHER,
19
- tool_llm_model_name="meta-llama/Llama-3.3-70B-Instruct-Turbo",
20
- )
21
-
22
- react_config_2 = AgentConfig(
23
- agent_type=AgentType.REACT,
24
- main_llm_provider=ModelProvider.GEMINI,
25
- tool_llm_provider=ModelProvider.GEMINI,
26
- )
27
-
28
- openai_config = AgentConfig(
29
- agent_type=AgentType.OPENAI,
30
- )
31
-
32
- class TestAgentType(unittest.TestCase):
33
-
34
- def test_openai(self):
35
- tools = [ToolsFactory().create_tool(mult)]
36
- topic = "AI topic"
37
- instructions = "Always do as your father tells you, if your mother agrees!"
38
- agent = Agent(
39
- agent_config=openai_config,
40
- tools=tools,
41
- topic=topic,
42
- custom_instructions=instructions,
43
- )
44
-
45
- agent.chat("What is 5 times 10. Only give the answer, nothing else")
46
- agent.chat("what is 3 times 7. Only give the answer, nothing else")
47
- res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
48
- self.assertIn("1050", res.response)
49
-
50
- def test_react_anthropic(self):
51
- tools = [ToolsFactory().create_tool(mult)]
52
- topic = "AI topic"
53
- instructions = "Always do as your father tells you, if your mother agrees!"
54
- agent = Agent(
55
- agent_config=react_config_1,
56
- tools=tools,
57
- topic=topic,
58
- custom_instructions=instructions,
59
- )
60
-
61
- agent.chat("What is 5 times 10. Only give the answer, nothing else")
62
- agent.chat("what is 3 times 7. Only give the answer, nothing else")
63
- res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
64
- self.assertIn("1050", res.response)
65
-
66
- def test_react_gemini(self):
67
- tools = [ToolsFactory().create_tool(mult)]
68
- topic = "AI topic"
69
- instructions = "Always do as your father tells you, if your mother agrees!"
70
- agent = Agent(
71
- agent_config=react_config_2,
72
- tools=tools,
73
- topic=topic,
74
- custom_instructions=instructions,
75
- )
76
- agent.chat("What is 5 times 10. Only give the answer, nothing else")
77
- agent.chat("what is 3 times 7. Only give the answer, nothing else")
78
- res = agent.chat("multiply the results of the last two multiplications. Only give the answer, nothing else.")
79
- self.assertIn("1050", res.response)
80
-
81
-
82
- if __name__ == "__main__":
83
- unittest.main()
File without changes