vectara-agentic 0.2.7__tar.gz → 0.2.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

Files changed (37) hide show
  1. {vectara_agentic-0.2.7/vectara_agentic.egg-info → vectara_agentic-0.2.9}/PKG-INFO +2 -2
  2. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/requirements.txt +1 -1
  3. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/test_agent.py +0 -37
  4. vectara_agentic-0.2.9/tests/test_serialization.py +110 -0
  5. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/_callback.py +147 -45
  6. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/_version.py +1 -1
  7. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/agent.py +18 -6
  8. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/agent_config.py +11 -0
  9. vectara_agentic-0.2.9/vectara_agentic/db_tools.py +262 -0
  10. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/sub_query_workflow.py +4 -3
  11. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/tools.py +52 -44
  12. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/utils.py +39 -11
  13. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9/vectara_agentic.egg-info}/PKG-INFO +2 -2
  14. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic.egg-info/SOURCES.txt +1 -0
  15. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic.egg-info/requires.txt +1 -1
  16. vectara_agentic-0.2.7/vectara_agentic/db_tools.py +0 -96
  17. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/LICENSE +0 -0
  18. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/MANIFEST.in +0 -0
  19. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/README.md +0 -0
  20. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/setup.cfg +0 -0
  21. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/setup.py +0 -0
  22. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/__init__.py +0 -0
  23. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/endpoint.py +0 -0
  24. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/test_agent_planning.py +0 -0
  25. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/test_agent_type.py +0 -0
  26. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/test_fallback.py +0 -0
  27. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/test_private_llm.py +0 -0
  28. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/test_tools.py +0 -0
  29. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/tests/test_workflow.py +0 -0
  30. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/__init__.py +0 -0
  31. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/_observability.py +0 -0
  32. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/_prompts.py +0 -0
  33. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/agent_endpoint.py +0 -0
  34. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/tools_catalog.py +0 -0
  35. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic/types.py +0 -0
  36. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic.egg-info/dependency_links.txt +0 -0
  37. {vectara_agentic-0.2.7 → vectara_agentic-0.2.9}/vectara_agentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.7
3
+ Version: 0.2.9
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,7 +16,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.12.25
19
+ Requires-Dist: llama-index==0.12.26
20
20
  Requires-Dist: llama-index-indices-managed-vectara==0.4.2
21
21
  Requires-Dist: llama-index-agent-llm-compiler==0.3.0
22
22
  Requires-Dist: llama-index-agent-lats==0.3.0
@@ -1,4 +1,4 @@
1
- llama-index==0.12.25
1
+ llama-index==0.12.26
2
2
  llama-index-indices-managed-vectara==0.4.2
3
3
  llama-index-agent-llm-compiler==0.3.0
4
4
  llama-index-agent-lats==0.3.0
@@ -106,43 +106,6 @@ class TestAgentPackage(unittest.TestCase):
106
106
  self.assertIsInstance(agent, Agent)
107
107
  self.assertEqual(agent._topic, "question answering")
108
108
 
109
- def test_serialization(self):
110
- with ARIZE_LOCK:
111
- config = AgentConfig(
112
- agent_type=AgentType.REACT,
113
- main_llm_provider=ModelProvider.ANTHROPIC,
114
- tool_llm_provider=ModelProvider.TOGETHER,
115
- observer=ObserverType.ARIZE_PHOENIX
116
- )
117
-
118
- agent = Agent.from_corpus(
119
- tool_name="RAG Tool",
120
- agent_config=config,
121
- vectara_corpus_key="corpus_key",
122
- vectara_api_key="api_key",
123
- data_description="information",
124
- assistant_specialty="question answering",
125
- )
126
-
127
- agent_reloaded = agent.loads(agent.dumps())
128
- agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
129
-
130
- self.assertIsInstance(agent_reloaded, Agent)
131
- self.assertEqual(agent, agent_reloaded)
132
- self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
133
-
134
- self.assertEqual(agent.agent_config.observer, agent_reloaded.agent_config.observer)
135
- self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded.agent_config.main_llm_provider)
136
- self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded.agent_config.tool_llm_provider)
137
-
138
- self.assertIsInstance(agent_reloaded, Agent)
139
- self.assertEqual(agent, agent_reloaded_again)
140
- self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
141
-
142
- self.assertEqual(agent.agent_config.observer, agent_reloaded_again.agent_config.observer)
143
- self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded_again.agent_config.main_llm_provider)
144
- self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded_again.agent_config.tool_llm_provider)
145
-
146
109
  def test_chat_history(self):
147
110
  tools = [ToolsFactory().create_tool(mult)]
148
111
  topic = "AI topic"
@@ -0,0 +1,110 @@
1
+ import unittest
2
+ import threading
3
+ import os
4
+
5
+ from vectara_agentic.agent import Agent, AgentType
6
+ from vectara_agentic.agent_config import AgentConfig
7
+ from vectara_agentic.types import ModelProvider, ObserverType
8
+ from vectara_agentic.tools import ToolsFactory
9
+
10
+ from llama_index.core.utilities.sql_wrapper import SQLDatabase
11
+ from sqlalchemy import create_engine
12
+
13
+ def mult(x: float, y: float) -> float:
14
+ return x * y
15
+
16
+
17
+ ARIZE_LOCK = threading.Lock()
18
+
19
+ class TestAgentSerialization(unittest.TestCase):
20
+
21
+ @classmethod
22
+ def tearDown(cls):
23
+ try:
24
+ os.remove('ev_database.db')
25
+ except FileNotFoundError:
26
+ pass
27
+
28
+ def test_serialization(self):
29
+ with ARIZE_LOCK:
30
+ config = AgentConfig(
31
+ agent_type=AgentType.REACT,
32
+ main_llm_provider=ModelProvider.ANTHROPIC,
33
+ tool_llm_provider=ModelProvider.TOGETHER,
34
+ observer=ObserverType.ARIZE_PHOENIX
35
+ )
36
+ db_tools = ToolsFactory().database_tools(
37
+ tool_name_prefix = "ev",
38
+ content_description = 'Electric Vehicles in the state of Washington and other population information',
39
+ sql_database = SQLDatabase(create_engine('sqlite:///ev_database.db')),
40
+ )
41
+
42
+ tools = [ToolsFactory().create_tool(mult)] + ToolsFactory().standard_tools() + db_tools
43
+ topic = "AI topic"
44
+ instructions = "Always do as your father tells you, if your mother agrees!"
45
+ agent = Agent(
46
+ tools=tools,
47
+ topic=topic,
48
+ custom_instructions=instructions,
49
+ agent_config=config
50
+ )
51
+
52
+ agent_reloaded = agent.loads(agent.dumps())
53
+ agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
54
+
55
+ self.assertIsInstance(agent_reloaded, Agent)
56
+ self.assertEqual(agent, agent_reloaded)
57
+ self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
58
+
59
+ self.assertEqual(agent.agent_config.observer, agent_reloaded.agent_config.observer)
60
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded.agent_config.main_llm_provider)
61
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded.agent_config.tool_llm_provider)
62
+
63
+ self.assertIsInstance(agent_reloaded, Agent)
64
+ self.assertEqual(agent, agent_reloaded_again)
65
+ self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
66
+
67
+ self.assertEqual(agent.agent_config.observer, agent_reloaded_again.agent_config.observer)
68
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded_again.agent_config.main_llm_provider)
69
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded_again.agent_config.tool_llm_provider)
70
+
71
+ def test_serialization_from_corpus(self):
72
+ with ARIZE_LOCK:
73
+ config = AgentConfig(
74
+ agent_type=AgentType.REACT,
75
+ main_llm_provider=ModelProvider.ANTHROPIC,
76
+ tool_llm_provider=ModelProvider.TOGETHER,
77
+ observer=ObserverType.ARIZE_PHOENIX
78
+ )
79
+
80
+ agent = Agent.from_corpus(
81
+ tool_name="RAG Tool",
82
+ agent_config=config,
83
+ vectara_corpus_key="corpus_key",
84
+ vectara_api_key="api_key",
85
+ data_description="information",
86
+ assistant_specialty="question answering",
87
+ )
88
+
89
+ agent_reloaded = agent.loads(agent.dumps())
90
+ agent_reloaded_again = agent_reloaded.loads(agent_reloaded.dumps())
91
+
92
+ self.assertIsInstance(agent_reloaded, Agent)
93
+ self.assertEqual(agent, agent_reloaded)
94
+ self.assertEqual(agent.agent_type, agent_reloaded.agent_type)
95
+
96
+ self.assertEqual(agent.agent_config.observer, agent_reloaded.agent_config.observer)
97
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded.agent_config.main_llm_provider)
98
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded.agent_config.tool_llm_provider)
99
+
100
+ self.assertIsInstance(agent_reloaded, Agent)
101
+ self.assertEqual(agent, agent_reloaded_again)
102
+ self.assertEqual(agent.agent_type, agent_reloaded_again.agent_type)
103
+
104
+ self.assertEqual(agent.agent_config.observer, agent_reloaded_again.agent_config.observer)
105
+ self.assertEqual(agent.agent_config.main_llm_provider, agent_reloaded_again.agent_config.main_llm_provider)
106
+ self.assertEqual(agent.agent_config.tool_llm_provider, agent_reloaded_again.agent_config.tool_llm_provider)
107
+
108
+
109
+ if __name__ == "__main__":
110
+ unittest.main()
@@ -4,12 +4,35 @@ Module to handle agent callbacks
4
4
 
5
5
  import inspect
6
6
  from typing import Any, Dict, Optional, List, Callable
7
+ from functools import wraps
7
8
 
8
9
  from llama_index.core.callbacks.base_handler import BaseCallbackHandler
9
10
  from llama_index.core.callbacks.schema import CBEventType, EventPayload
10
11
 
11
12
  from .types import AgentStatusType
12
13
 
14
+ def wrap_callback_fn(callback):
15
+ """
16
+ Wrap a callback function to ensure it only receives the parameters it can accept.
17
+ This is useful for ensuring that the callback function does not receive unexpected
18
+ parameters, especially when the callback is called from different contexts.
19
+ """
20
+ if callback is None:
21
+ return None
22
+ try:
23
+ sig = inspect.signature(callback)
24
+ allowed_params = set(sig.parameters.keys())
25
+ except Exception:
26
+ # If we cannot determine the signature, return the callback as is.
27
+ return callback
28
+
29
+ @wraps(callback)
30
+ def new_callback(*args, **kwargs):
31
+ # Filter kwargs to only those that the original callback accepts.
32
+ filtered_kwargs = {k: v for k, v in kwargs.items() if k in allowed_params}
33
+ return callback(*args, **filtered_kwargs)
34
+
35
+ return new_callback
13
36
 
14
37
  class AgentCallbackHandler(BaseCallbackHandler):
15
38
  """
@@ -24,7 +47,7 @@ class AgentCallbackHandler(BaseCallbackHandler):
24
47
 
25
48
  def __init__(self, fn: Optional[Callable] = None) -> None:
26
49
  super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
27
- self.fn = fn
50
+ self.fn = wrap_callback_fn(fn)
28
51
 
29
52
  # Existing synchronous methods
30
53
  def on_event_start(
@@ -37,9 +60,11 @@ class AgentCallbackHandler(BaseCallbackHandler):
37
60
  ) -> str:
38
61
  if self.fn is not None and payload is not None:
39
62
  if inspect.iscoroutinefunction(self.fn):
40
- raise ValueError("Synchronous callback handler cannot use async callback function")
63
+ raise ValueError(
64
+ "Synchronous callback handler cannot use async callback function"
65
+ )
41
66
  # Handle events as before
42
- self._handle_event(event_type, payload)
67
+ self._handle_event(event_type, payload, event_id)
43
68
  return event_id
44
69
 
45
70
  def start_trace(self, trace_id: Optional[str] = None) -> None:
@@ -73,9 +98,11 @@ class AgentCallbackHandler(BaseCallbackHandler):
73
98
  """
74
99
  if self.fn is not None and payload is not None:
75
100
  if inspect.iscoroutinefunction(self.fn):
76
- raise ValueError("Synchronous callback handler cannot use async callback function")
101
+ raise ValueError(
102
+ "Synchronous callback handler cannot use async callback function"
103
+ )
77
104
  # Handle events as before
78
- self._handle_event(event_type, payload)
105
+ self._handle_event(event_type, payload, event_id)
79
106
 
80
107
  # New asynchronous methods
81
108
  async def aon_event_start(
@@ -100,7 +127,7 @@ class AgentCallbackHandler(BaseCallbackHandler):
100
127
  event_id: the event ID
101
128
  """
102
129
  if self.fn is not None and payload is not None:
103
- await self._ahandle_event(event_type, payload)
130
+ await self._ahandle_event(event_type, payload, event_id)
104
131
  return event_id
105
132
 
106
133
  async def aon_event_end(
@@ -114,48 +141,66 @@ class AgentCallbackHandler(BaseCallbackHandler):
114
141
  Handle the end of an event (async)
115
142
  """
116
143
  if self.fn is not None and payload is not None:
117
- await self._ahandle_event(event_type, payload)
144
+ await self._ahandle_event(event_type, payload, event_id)
118
145
 
119
146
  # Helper methods for handling events
120
- def _handle_event(self, event_type: CBEventType, payload: Dict[str, Any]) -> None:
147
+ def _handle_event(
148
+ self, event_type: CBEventType, payload: Dict[str, Any], event_id: str
149
+ ) -> None:
121
150
  if event_type == CBEventType.LLM:
122
- self._handle_llm(payload)
151
+ self._handle_llm(payload, event_id)
123
152
  elif event_type == CBEventType.FUNCTION_CALL:
124
- self._handle_function_call(payload)
153
+ self._handle_function_call(payload, event_id)
125
154
  elif event_type == CBEventType.AGENT_STEP:
126
- self._handle_agent_step(payload)
155
+ self._handle_agent_step(payload, event_id)
127
156
  elif event_type == CBEventType.EXCEPTION:
128
157
  print(f"Exception: {payload.get(EventPayload.EXCEPTION)}")
129
158
  else:
130
159
  print(f"Unknown event type: {event_type}, payload={payload}")
131
160
 
132
- async def _ahandle_event(self, event_type: CBEventType, payload: Dict[str, Any]) -> None:
161
+ async def _ahandle_event(
162
+ self, event_type: CBEventType, payload: Dict[str, Any], event_id: str
163
+ ) -> None:
133
164
  if event_type == CBEventType.LLM:
134
- await self._ahandle_llm(payload)
165
+ await self._ahandle_llm(payload, event_id)
135
166
  elif event_type == CBEventType.FUNCTION_CALL:
136
- await self._ahandle_function_call(payload)
167
+ await self._ahandle_function_call(payload, event_id)
137
168
  elif event_type == CBEventType.AGENT_STEP:
138
- await self._ahandle_agent_step(payload)
169
+ await self._ahandle_agent_step(payload, event_id)
139
170
  elif event_type == CBEventType.EXCEPTION:
140
171
  print(f"Exception: {payload.get(EventPayload.EXCEPTION)}")
141
172
  else:
142
173
  print(f"Unknown event type: {event_type}, payload={payload}")
143
174
 
144
175
  # Synchronous handlers
145
- def _handle_llm(self, payload: dict) -> None:
176
+ def _handle_llm(
177
+ self,
178
+ payload: dict,
179
+ event_id: str,
180
+ ) -> None:
146
181
  if EventPayload.MESSAGES in payload:
147
182
  response = str(payload.get(EventPayload.RESPONSE))
148
183
  if response and response not in ["None", "assistant: None"]:
149
184
  if self.fn:
150
- self.fn(AgentStatusType.AGENT_UPDATE, response)
185
+ self.fn(
186
+ status_type=AgentStatusType.AGENT_UPDATE,
187
+ msg=response,
188
+ event_id=event_id,
189
+ )
151
190
  elif EventPayload.PROMPT in payload:
152
191
  prompt = str(payload.get(EventPayload.PROMPT))
153
192
  if self.fn:
154
- self.fn(AgentStatusType.AGENT_UPDATE, prompt)
193
+ self.fn(
194
+ status_type=AgentStatusType.AGENT_UPDATE,
195
+ msg=prompt,
196
+ event_id=event_id,
197
+ )
155
198
  else:
156
- print(f"vectara-agentic llm callback: no messages or prompt found in payload {payload}")
199
+ print(
200
+ f"vectara-agentic llm callback: no messages or prompt found in payload {payload}"
201
+ )
157
202
 
158
- def _handle_function_call(self, payload: dict) -> None:
203
+ def _handle_function_call(self, payload: dict, event_id: str) -> None:
159
204
  if EventPayload.FUNCTION_CALL in payload:
160
205
  fcall = str(payload.get(EventPayload.FUNCTION_CALL))
161
206
  tool = payload.get(EventPayload.TOOL)
@@ -163,46 +208,77 @@ class AgentCallbackHandler(BaseCallbackHandler):
163
208
  tool_name = tool.name
164
209
  if self.fn:
165
210
  self.fn(
166
- AgentStatusType.TOOL_CALL,
167
- f"Executing '{tool_name}' with arguments: {fcall}",
211
+ status_type=AgentStatusType.TOOL_CALL,
212
+ msg=f"Executing '{tool_name}' with arguments: {fcall}",
213
+ event_id=event_id,
168
214
  )
169
215
  elif EventPayload.FUNCTION_OUTPUT in payload:
170
216
  response = str(payload.get(EventPayload.FUNCTION_OUTPUT))
171
217
  if self.fn:
172
- self.fn(AgentStatusType.TOOL_OUTPUT, response)
218
+ self.fn(
219
+ status_type=AgentStatusType.TOOL_OUTPUT,
220
+ msg=response,
221
+ event_id=event_id,
222
+ )
173
223
  else:
174
- print(f"Vectara-agentic callback handler: no function call or output found in payload {payload}")
224
+ print(
225
+ f"Vectara-agentic callback handler: no function call or output found in payload {payload}"
226
+ )
175
227
 
176
- def _handle_agent_step(self, payload: dict) -> None:
228
+ def _handle_agent_step(self, payload: dict, event_id: str) -> None:
177
229
  if EventPayload.MESSAGES in payload:
178
230
  msg = str(payload.get(EventPayload.MESSAGES))
179
231
  if self.fn:
180
- self.fn(AgentStatusType.AGENT_STEP, msg)
232
+ self.fn(
233
+ status_type=AgentStatusType.AGENT_STEP,
234
+ msg=msg,
235
+ event_id=event_id,
236
+ )
181
237
  elif EventPayload.RESPONSE in payload:
182
238
  response = str(payload.get(EventPayload.RESPONSE))
183
239
  if self.fn:
184
- self.fn(AgentStatusType.AGENT_STEP, response)
240
+ self.fn(
241
+ status_type=AgentStatusType.AGENT_STEP,
242
+ msg=response,
243
+ event_id=event_id,
244
+ )
185
245
  else:
186
- print(f"Vectara-agentic agent_step: no messages or prompt found in payload {payload}")
246
+ print(
247
+ f"Vectara-agentic agent_step: no messages or prompt found in payload {payload}"
248
+ )
187
249
 
188
250
  # Asynchronous handlers
189
- async def _ahandle_llm(self, payload: dict) -> None:
251
+ async def _ahandle_llm(self, payload: dict, event_id: str) -> None:
190
252
  if EventPayload.MESSAGES in payload:
191
253
  response = str(payload.get(EventPayload.RESPONSE))
192
254
  if response and response not in ["None", "assistant: None"]:
193
255
  if self.fn:
194
256
  if inspect.iscoroutinefunction(self.fn):
195
- await self.fn(AgentStatusType.AGENT_UPDATE, response)
257
+ await self.fn(
258
+ status_type=AgentStatusType.AGENT_UPDATE,
259
+ msg=response,
260
+ event_id=event_id,
261
+ )
196
262
  else:
197
- self.fn(AgentStatusType.AGENT_UPDATE, response)
263
+ self.fn(
264
+ status_type=AgentStatusType.AGENT_UPDATE,
265
+ msg=response,
266
+ event_id=event_id,
267
+ )
198
268
  elif EventPayload.PROMPT in payload:
199
269
  prompt = str(payload.get(EventPayload.PROMPT))
200
270
  if self.fn:
201
- self.fn(AgentStatusType.AGENT_UPDATE, prompt)
271
+ self.fn(
272
+ status_type=AgentStatusType.AGENT_UPDATE,
273
+ msg=prompt,
274
+ event_id=event_id,
275
+ )
202
276
  else:
203
- print(f"vectara-agentic llm callback: no messages or prompt found in payload {payload}")
277
+ print(
278
+ f"vectara-agentic llm callback: no messages or prompt found in payload {payload}"
279
+ )
204
280
 
205
- async def _ahandle_function_call(self, payload: dict) -> None:
281
+ async def _ahandle_function_call(self, payload: dict, event_id: str) -> None:
206
282
  if EventPayload.FUNCTION_CALL in payload:
207
283
  fcall = str(payload.get(EventPayload.FUNCTION_CALL))
208
284
  tool = payload.get(EventPayload.TOOL)
@@ -211,38 +287,64 @@ class AgentCallbackHandler(BaseCallbackHandler):
211
287
  if self.fn:
212
288
  if inspect.iscoroutinefunction(self.fn):
213
289
  await self.fn(
214
- AgentStatusType.TOOL_CALL,
215
- f"Executing '{tool_name}' with arguments: {fcall}",
290
+ status_type=AgentStatusType.TOOL_CALL,
291
+ msg=f"Executing '{tool_name}' with arguments: {fcall}",
292
+ event_id=event_id,
216
293
  )
217
294
  else:
218
295
  self.fn(
219
- AgentStatusType.TOOL_CALL,
220
- f"Executing '{tool_name}' with arguments: {fcall}",
296
+ status_type=AgentStatusType.TOOL_CALL,
297
+ msg=f"Executing '{tool_name}' with arguments: {fcall}",
298
+ event_id=event_id,
221
299
  )
222
300
  elif EventPayload.FUNCTION_OUTPUT in payload:
223
301
  if self.fn:
224
302
  response = str(payload.get(EventPayload.FUNCTION_OUTPUT))
225
303
  if inspect.iscoroutinefunction(self.fn):
226
- await self.fn(AgentStatusType.TOOL_OUTPUT, response)
304
+ await self.fn(
305
+ status_type=AgentStatusType.TOOL_OUTPUT,
306
+ msg=response,
307
+ event_id=event_id,
308
+ )
227
309
  else:
228
- self.fn(AgentStatusType.TOOL_OUTPUT, response)
310
+ self.fn(
311
+ status_type=AgentStatusType.TOOL_OUTPUT,
312
+ msg=response,
313
+ event_id=event_id,
314
+ )
229
315
  else:
230
316
  print(f"No function call or output found in payload {payload}")
231
317
 
232
- async def _ahandle_agent_step(self, payload: dict) -> None:
318
+ async def _ahandle_agent_step(self, payload: dict, event_id: str) -> None:
233
319
  if EventPayload.MESSAGES in payload:
234
320
  if self.fn:
235
321
  msg = str(payload.get(EventPayload.MESSAGES))
236
322
  if inspect.iscoroutinefunction(self.fn):
237
- await self.fn(AgentStatusType.AGENT_STEP, msg)
323
+ await self.fn(
324
+ status_type=AgentStatusType.AGENT_STEP,
325
+ msg=msg,
326
+ event_id=event_id,
327
+ )
238
328
  else:
239
- self.fn(AgentStatusType.AGENT_STEP, msg)
329
+ self.fn(
330
+ status_type=AgentStatusType.AGENT_STEP,
331
+ msg=msg,
332
+ event_id=event_id,
333
+ )
240
334
  elif EventPayload.RESPONSE in payload:
241
335
  if self.fn:
242
336
  response = str(payload.get(EventPayload.RESPONSE))
243
337
  if inspect.iscoroutinefunction(self.fn):
244
- await self.fn(AgentStatusType.AGENT_STEP, response)
338
+ await self.fn(
339
+ status_type=AgentStatusType.AGENT_STEP,
340
+ msg=response,
341
+ event_id=event_id,
342
+ )
245
343
  else:
246
- self.fn(AgentStatusType.AGENT_STEP, response)
344
+ self.fn(
345
+ status_type=AgentStatusType.AGENT_STEP,
346
+ msg=response,
347
+ event_id=event_id,
348
+ )
247
349
  else:
248
350
  print(f"No messages or prompt found in payload {payload}")
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Define the version of the package.
3
3
  """
4
- __version__ = "0.2.7"
4
+ __version__ = "0.2.9"
@@ -33,7 +33,7 @@ from llama_index.core.agent.types import BaseAgent
33
33
  from llama_index.core.workflow import Workflow
34
34
 
35
35
  from .types import (
36
- AgentType, AgentStatusType, LLMRole, ToolType,
36
+ AgentType, AgentStatusType, LLMRole, ToolType, ModelProvider,
37
37
  AgentResponse, AgentStreamingResponse, AgentConfigType
38
38
  )
39
39
  from .utils import get_llm, get_tokenizer_for_model
@@ -278,6 +278,10 @@ class Agent:
278
278
  llm.callback_manager = llm_callback_manager
279
279
 
280
280
  if agent_type == AgentType.FUNCTION_CALLING:
281
+ if config.tool_llm_provider == ModelProvider.OPENAI:
282
+ raise ValueError(
283
+ "Vectara-agentic: Function calling agent type is not supported with the OpenAI LLM."
284
+ )
281
285
  prompt = _get_prompt(GENERAL_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
282
286
  agent = FunctionCallingAgent.from_tools(
283
287
  tools=self.tools,
@@ -286,7 +290,7 @@ class Agent:
286
290
  verbose=self.verbose,
287
291
  max_function_calls=config.max_reasoning_steps,
288
292
  callback_manager=llm_callback_manager,
289
- system_prompt = prompt,
293
+ system_prompt=prompt,
290
294
  allow_parallel_tool_calls=True,
291
295
  )
292
296
  elif agent_type == AgentType.REACT:
@@ -301,6 +305,10 @@ class Agent:
301
305
  callable_manager=llm_callback_manager,
302
306
  )
303
307
  elif agent_type == AgentType.OPENAI:
308
+ if config.tool_llm_provider != ModelProvider.OPENAI:
309
+ raise ValueError(
310
+ "Vectara-agentic: OPENAI agent type requires the OpenAI LLM."
311
+ )
304
312
  prompt = _get_prompt(GENERAL_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
305
313
  agent = OpenAIAgent.from_tools(
306
314
  tools=self.tools,
@@ -768,6 +776,7 @@ class Agent:
768
776
  """
769
777
  max_attempts = 4 if self.fallback_agent_config else 2
770
778
  attempt = 0
779
+ orig_llm = self.llm.metadata.model_name
771
780
  while attempt < max_attempts:
772
781
  try:
773
782
  current_agent = self._get_current_agent()
@@ -788,16 +797,20 @@ class Agent:
788
797
  agent_response.async_response_gen = _stream_response_wrapper # Override the generator
789
798
  return agent_response
790
799
 
791
- except Exception:
800
+ except Exception as e:
801
+ last_error = e
792
802
  if attempt >= 2:
793
803
  if self.verbose:
794
- print("LLM call failed. Switching agent configuration.")
804
+ print(f"LLM call failed on attempt {attempt}. Switching agent configuration.")
795
805
  self._switch_agent_config()
796
806
  time.sleep(1)
797
807
  attempt += 1
798
808
 
799
809
  return AgentResponse(
800
- response=f"LLM failure can't be resolved after {max_attempts} attempts."
810
+ response=(
811
+ f"For {orig_llm} LLM - failure can't be resolved after "
812
+ f"{max_attempts} attempts ({last_error})."
813
+ )
801
814
  )
802
815
 
803
816
  #
@@ -861,7 +874,6 @@ class Agent:
861
874
  def to_dict(self) -> Dict[str, Any]:
862
875
  """Serialize the Agent instance to a dictionary."""
863
876
  tool_info = []
864
-
865
877
  for tool in self.tools:
866
878
  if hasattr(tool.metadata, "fn_schema"):
867
879
  fn_schema_cls = tool.metadata.fn_schema
@@ -71,6 +71,17 @@ class AgentConfig:
71
71
  default_factory=lambda: int(os.getenv("VECTARA_AGENTIC_MAX_REASONING_STEPS", "50"))
72
72
  )
73
73
 
74
+ def __post_init__(self):
75
+ # Use object.__setattr__ since the dataclass is frozen
76
+ if isinstance(self.agent_type, str):
77
+ object.__setattr__(self, "agent_type", AgentType(self.agent_type))
78
+ if isinstance(self.main_llm_provider, str):
79
+ object.__setattr__(self, "main_llm_provider", ModelProvider(self.main_llm_provider))
80
+ if isinstance(self.tool_llm_provider, str):
81
+ object.__setattr__(self, "tool_llm_provider", ModelProvider(self.tool_llm_provider))
82
+ if isinstance(self.observer, str):
83
+ object.__setattr__(self, "observer", ObserverType(self.observer))
84
+
74
85
  def to_dict(self) -> dict:
75
86
  """
76
87
  Convert the AgentConfig to a dictionary.