vectara-agentic 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

tests/test_tools.py CHANGED
@@ -2,7 +2,12 @@ import unittest
2
2
 
3
3
  from pydantic import Field, BaseModel
4
4
 
5
- from vectara_agentic.tools import VectaraTool, VectaraToolFactory, ToolsFactory, ToolType
5
+ from vectara_agentic.tools import (
6
+ VectaraTool,
7
+ VectaraToolFactory,
8
+ ToolsFactory,
9
+ ToolType,
10
+ )
6
11
  from vectara_agentic.agent import Agent
7
12
  from vectara_agentic.agent_config import AgentConfig
8
13
 
@@ -13,9 +18,7 @@ class TestToolsPackage(unittest.TestCase):
13
18
  def test_vectara_tool_factory(self):
14
19
  vectara_corpus_key = "corpus_key"
15
20
  vectara_api_key = "api_key"
16
- vec_factory = VectaraToolFactory(
17
- vectara_corpus_key, vectara_api_key
18
- )
21
+ vec_factory = VectaraToolFactory(vectara_corpus_key, vectara_api_key)
19
22
 
20
23
  self.assertEqual(vectara_corpus_key, vec_factory.vectara_corpus_key)
21
24
  self.assertEqual(vectara_api_key, vec_factory.vectara_api_key)
@@ -46,6 +49,48 @@ class TestToolsPackage(unittest.TestCase):
46
49
  self.assertIsInstance(search_tool, FunctionTool)
47
50
  self.assertEqual(search_tool.metadata.tool_type, ToolType.QUERY)
48
51
 
52
+ def test_vectara_tool_validation(self):
53
+ vectara_corpus_key = "corpus_key"
54
+ vectara_api_key = "api_key"
55
+ vec_factory = VectaraToolFactory(vectara_corpus_key, vectara_api_key)
56
+
57
+ class QueryToolArgs(BaseModel):
58
+ query: str = Field(description="The user query")
59
+ year: int = Field(
60
+ description="The year of the document",
61
+ example=2023,
62
+ )
63
+ ticker: str = Field(
64
+ description="The stock ticker",
65
+ example="AAPL",
66
+ )
67
+
68
+ query_tool = vec_factory.create_rag_tool(
69
+ tool_name="rag_tool",
70
+ tool_description="""
71
+ Returns a response (str) to the user query based on the data in this corpus.
72
+ """,
73
+ tool_args_schema=QueryToolArgs,
74
+ )
75
+ res = query_tool(
76
+ query="What is the stock price?",
77
+ the_year=2023,
78
+ )
79
+ self.assertIn("Malfunction", str(res))
80
+
81
+ search_tool = vec_factory.create_search_tool(
82
+ tool_name="search_tool",
83
+ tool_description="""
84
+ Returns a list of documents (str) that match the user query.
85
+ """,
86
+ tool_args_schema=QueryToolArgs,
87
+ )
88
+ res = search_tool(
89
+ query="What is the stock price?",
90
+ the_year=2023,
91
+ )
92
+ self.assertIn("Malfunction", str(res))
93
+
49
94
  def test_tool_factory(self):
50
95
  def mult(x: float, y: float) -> float:
51
96
  return x * y
@@ -59,17 +104,21 @@ class TestToolsPackage(unittest.TestCase):
59
104
  def test_llama_index_tools(self):
60
105
  tools_factory = ToolsFactory()
61
106
 
62
- llama_tools = tools_factory.get_llama_index_tools(
63
- tool_package_name="arxiv",
64
- tool_spec_name="ArxivToolSpec"
65
- )
66
-
67
- arxiv_tool = llama_tools[0]
107
+ arxiv_tool = tools_factory.get_llama_index_tools(
108
+ tool_package_name="arxiv", tool_spec_name="ArxivToolSpec"
109
+ )[0]
68
110
 
69
111
  self.assertIsInstance(arxiv_tool, VectaraTool)
70
112
  self.assertIsInstance(arxiv_tool, FunctionTool)
71
113
  self.assertEqual(arxiv_tool.metadata.tool_type, ToolType.QUERY)
72
114
 
115
+ yfinance_tool = tools_factory.get_llama_index_tools(
116
+ tool_package_name="yahoo_finance", tool_spec_name="YahooFinanceToolSpec"
117
+ )[0]
118
+ self.assertIsInstance(yfinance_tool, VectaraTool)
119
+ self.assertIsInstance(yfinance_tool, FunctionTool)
120
+ self.assertEqual(yfinance_tool.metadata.tool_type, ToolType.QUERY)
121
+
73
122
  def test_public_repo(self):
74
123
  vectara_corpus_key = "vectara-docs_1"
75
124
  vectara_api_key = "zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA"
@@ -80,10 +129,12 @@ class TestToolsPackage(unittest.TestCase):
80
129
  tool_name="ask_vectara",
81
130
  data_description="data from Vectara website",
82
131
  assistant_specialty="RAG as a service",
83
- vectara_summarizer="mockingbird-1.0-2024-07-16"
132
+ vectara_summarizer="mockingbird-1.0-2024-07-16",
84
133
  )
85
134
 
86
- self.assertIn("Vectara is an end-to-end platform", str(agent.chat("What is Vectara?")))
135
+ self.assertIn(
136
+ "Vectara is an end-to-end platform", str(agent.chat("What is Vectara?"))
137
+ )
87
138
 
88
139
  def test_class_method_as_tool(self):
89
140
  class TestClass:
@@ -102,7 +153,7 @@ class TestToolsPackage(unittest.TestCase):
102
153
  tools=tools,
103
154
  topic=topic,
104
155
  custom_instructions=instructions,
105
- agent_config=config
156
+ agent_config=config,
106
157
  )
107
158
 
108
159
  self.assertEqual(
@@ -4,12 +4,35 @@ Module to handle agent callbacks
4
4
 
5
5
  import inspect
6
6
  from typing import Any, Dict, Optional, List, Callable
7
+ from functools import wraps
7
8
 
8
9
  from llama_index.core.callbacks.base_handler import BaseCallbackHandler
9
10
  from llama_index.core.callbacks.schema import CBEventType, EventPayload
10
11
 
11
12
  from .types import AgentStatusType
12
13
 
14
+ def wrap_callback_fn(callback):
15
+ """
16
+ Wrap a callback function to ensure it only receives the parameters it can accept.
17
+ This is useful for ensuring that the callback function does not receive unexpected
18
+ parameters, especially when the callback is called from different contexts.
19
+ """
20
+ if callback is None:
21
+ return None
22
+ try:
23
+ sig = inspect.signature(callback)
24
+ allowed_params = set(sig.parameters.keys())
25
+ except Exception:
26
+ # If we cannot determine the signature, return the callback as is.
27
+ return callback
28
+
29
+ @wraps(callback)
30
+ def new_callback(*args, **kwargs):
31
+ # Filter kwargs to only those that the original callback accepts.
32
+ filtered_kwargs = {k: v for k, v in kwargs.items() if k in allowed_params}
33
+ return callback(*args, **filtered_kwargs)
34
+
35
+ return new_callback
13
36
 
14
37
  class AgentCallbackHandler(BaseCallbackHandler):
15
38
  """
@@ -24,7 +47,7 @@ class AgentCallbackHandler(BaseCallbackHandler):
24
47
 
25
48
  def __init__(self, fn: Optional[Callable] = None) -> None:
26
49
  super().__init__(event_starts_to_ignore=[], event_ends_to_ignore=[])
27
- self.fn = fn
50
+ self.fn = wrap_callback_fn(fn)
28
51
 
29
52
  # Existing synchronous methods
30
53
  def on_event_start(
@@ -37,9 +60,11 @@ class AgentCallbackHandler(BaseCallbackHandler):
37
60
  ) -> str:
38
61
  if self.fn is not None and payload is not None:
39
62
  if inspect.iscoroutinefunction(self.fn):
40
- raise ValueError("Synchronous callback handler cannot use async callback function")
63
+ raise ValueError(
64
+ "Synchronous callback handler cannot use async callback function"
65
+ )
41
66
  # Handle events as before
42
- self._handle_event(event_type, payload)
67
+ self._handle_event(event_type, payload, event_id)
43
68
  return event_id
44
69
 
45
70
  def start_trace(self, trace_id: Optional[str] = None) -> None:
@@ -73,9 +98,11 @@ class AgentCallbackHandler(BaseCallbackHandler):
73
98
  """
74
99
  if self.fn is not None and payload is not None:
75
100
  if inspect.iscoroutinefunction(self.fn):
76
- raise ValueError("Synchronous callback handler cannot use async callback function")
101
+ raise ValueError(
102
+ "Synchronous callback handler cannot use async callback function"
103
+ )
77
104
  # Handle events as before
78
- self._handle_event(event_type, payload)
105
+ self._handle_event(event_type, payload, event_id)
79
106
 
80
107
  # New asynchronous methods
81
108
  async def aon_event_start(
@@ -100,7 +127,7 @@ class AgentCallbackHandler(BaseCallbackHandler):
100
127
  event_id: the event ID
101
128
  """
102
129
  if self.fn is not None and payload is not None:
103
- await self._ahandle_event(event_type, payload)
130
+ await self._ahandle_event(event_type, payload, event_id)
104
131
  return event_id
105
132
 
106
133
  async def aon_event_end(
@@ -114,48 +141,66 @@ class AgentCallbackHandler(BaseCallbackHandler):
114
141
  Handle the end of an event (async)
115
142
  """
116
143
  if self.fn is not None and payload is not None:
117
- await self._ahandle_event(event_type, payload)
144
+ await self._ahandle_event(event_type, payload, event_id)
118
145
 
119
146
  # Helper methods for handling events
120
- def _handle_event(self, event_type: CBEventType, payload: Dict[str, Any]) -> None:
147
+ def _handle_event(
148
+ self, event_type: CBEventType, payload: Dict[str, Any], event_id: str
149
+ ) -> None:
121
150
  if event_type == CBEventType.LLM:
122
- self._handle_llm(payload)
151
+ self._handle_llm(payload, event_id)
123
152
  elif event_type == CBEventType.FUNCTION_CALL:
124
- self._handle_function_call(payload)
153
+ self._handle_function_call(payload, event_id)
125
154
  elif event_type == CBEventType.AGENT_STEP:
126
- self._handle_agent_step(payload)
155
+ self._handle_agent_step(payload, event_id)
127
156
  elif event_type == CBEventType.EXCEPTION:
128
157
  print(f"Exception: {payload.get(EventPayload.EXCEPTION)}")
129
158
  else:
130
159
  print(f"Unknown event type: {event_type}, payload={payload}")
131
160
 
132
- async def _ahandle_event(self, event_type: CBEventType, payload: Dict[str, Any]) -> None:
161
+ async def _ahandle_event(
162
+ self, event_type: CBEventType, payload: Dict[str, Any], event_id: str
163
+ ) -> None:
133
164
  if event_type == CBEventType.LLM:
134
- await self._ahandle_llm(payload)
165
+ await self._ahandle_llm(payload, event_id)
135
166
  elif event_type == CBEventType.FUNCTION_CALL:
136
- await self._ahandle_function_call(payload)
167
+ await self._ahandle_function_call(payload, event_id)
137
168
  elif event_type == CBEventType.AGENT_STEP:
138
- await self._ahandle_agent_step(payload)
169
+ await self._ahandle_agent_step(payload, event_id)
139
170
  elif event_type == CBEventType.EXCEPTION:
140
171
  print(f"Exception: {payload.get(EventPayload.EXCEPTION)}")
141
172
  else:
142
173
  print(f"Unknown event type: {event_type}, payload={payload}")
143
174
 
144
175
  # Synchronous handlers
145
- def _handle_llm(self, payload: dict) -> None:
176
+ def _handle_llm(
177
+ self,
178
+ payload: dict,
179
+ event_id: str,
180
+ ) -> None:
146
181
  if EventPayload.MESSAGES in payload:
147
182
  response = str(payload.get(EventPayload.RESPONSE))
148
183
  if response and response not in ["None", "assistant: None"]:
149
184
  if self.fn:
150
- self.fn(AgentStatusType.AGENT_UPDATE, response)
185
+ self.fn(
186
+ status_type=AgentStatusType.AGENT_UPDATE,
187
+ msg=response,
188
+ event_id=event_id,
189
+ )
151
190
  elif EventPayload.PROMPT in payload:
152
191
  prompt = str(payload.get(EventPayload.PROMPT))
153
192
  if self.fn:
154
- self.fn(AgentStatusType.AGENT_UPDATE, prompt)
193
+ self.fn(
194
+ status_type=AgentStatusType.AGENT_UPDATE,
195
+ msg=prompt,
196
+ event_id=event_id,
197
+ )
155
198
  else:
156
- print(f"vectara-agentic llm callback: no messages or prompt found in payload {payload}")
199
+ print(
200
+ f"vectara-agentic llm callback: no messages or prompt found in payload {payload}"
201
+ )
157
202
 
158
- def _handle_function_call(self, payload: dict) -> None:
203
+ def _handle_function_call(self, payload: dict, event_id: str) -> None:
159
204
  if EventPayload.FUNCTION_CALL in payload:
160
205
  fcall = str(payload.get(EventPayload.FUNCTION_CALL))
161
206
  tool = payload.get(EventPayload.TOOL)
@@ -163,46 +208,77 @@ class AgentCallbackHandler(BaseCallbackHandler):
163
208
  tool_name = tool.name
164
209
  if self.fn:
165
210
  self.fn(
166
- AgentStatusType.TOOL_CALL,
167
- f"Executing '{tool_name}' with arguments: {fcall}",
211
+ status_type=AgentStatusType.TOOL_CALL,
212
+ msg=f"Executing '{tool_name}' with arguments: {fcall}",
213
+ event_id=event_id,
168
214
  )
169
215
  elif EventPayload.FUNCTION_OUTPUT in payload:
170
216
  response = str(payload.get(EventPayload.FUNCTION_OUTPUT))
171
217
  if self.fn:
172
- self.fn(AgentStatusType.TOOL_OUTPUT, response)
218
+ self.fn(
219
+ status_type=AgentStatusType.TOOL_OUTPUT,
220
+ msg=response,
221
+ event_id=event_id,
222
+ )
173
223
  else:
174
- print(f"Vectara-agentic callback handler: no function call or output found in payload {payload}")
224
+ print(
225
+ f"Vectara-agentic callback handler: no function call or output found in payload {payload}"
226
+ )
175
227
 
176
- def _handle_agent_step(self, payload: dict) -> None:
228
+ def _handle_agent_step(self, payload: dict, event_id: str) -> None:
177
229
  if EventPayload.MESSAGES in payload:
178
230
  msg = str(payload.get(EventPayload.MESSAGES))
179
231
  if self.fn:
180
- self.fn(AgentStatusType.AGENT_STEP, msg)
232
+ self.fn(
233
+ status_type=AgentStatusType.AGENT_STEP,
234
+ msg=msg,
235
+ event_id=event_id,
236
+ )
181
237
  elif EventPayload.RESPONSE in payload:
182
238
  response = str(payload.get(EventPayload.RESPONSE))
183
239
  if self.fn:
184
- self.fn(AgentStatusType.AGENT_STEP, response)
240
+ self.fn(
241
+ status_type=AgentStatusType.AGENT_STEP,
242
+ msg=response,
243
+ event_id=event_id,
244
+ )
185
245
  else:
186
- print(f"Vectara-agentic agent_step: no messages or prompt found in payload {payload}")
246
+ print(
247
+ f"Vectara-agentic agent_step: no messages or prompt found in payload {payload}"
248
+ )
187
249
 
188
250
  # Asynchronous handlers
189
- async def _ahandle_llm(self, payload: dict) -> None:
251
+ async def _ahandle_llm(self, payload: dict, event_id: str) -> None:
190
252
  if EventPayload.MESSAGES in payload:
191
253
  response = str(payload.get(EventPayload.RESPONSE))
192
254
  if response and response not in ["None", "assistant: None"]:
193
255
  if self.fn:
194
256
  if inspect.iscoroutinefunction(self.fn):
195
- await self.fn(AgentStatusType.AGENT_UPDATE, response)
257
+ await self.fn(
258
+ status_type=AgentStatusType.AGENT_UPDATE,
259
+ msg=response,
260
+ event_id=event_id,
261
+ )
196
262
  else:
197
- self.fn(AgentStatusType.AGENT_UPDATE, response)
263
+ self.fn(
264
+ status_type=AgentStatusType.AGENT_UPDATE,
265
+ msg=response,
266
+ event_id=event_id,
267
+ )
198
268
  elif EventPayload.PROMPT in payload:
199
269
  prompt = str(payload.get(EventPayload.PROMPT))
200
270
  if self.fn:
201
- self.fn(AgentStatusType.AGENT_UPDATE, prompt)
271
+ self.fn(
272
+ status_type=AgentStatusType.AGENT_UPDATE,
273
+ msg=prompt,
274
+ event_id=event_id,
275
+ )
202
276
  else:
203
- print(f"vectara-agentic llm callback: no messages or prompt found in payload {payload}")
277
+ print(
278
+ f"vectara-agentic llm callback: no messages or prompt found in payload {payload}"
279
+ )
204
280
 
205
- async def _ahandle_function_call(self, payload: dict) -> None:
281
+ async def _ahandle_function_call(self, payload: dict, event_id: str) -> None:
206
282
  if EventPayload.FUNCTION_CALL in payload:
207
283
  fcall = str(payload.get(EventPayload.FUNCTION_CALL))
208
284
  tool = payload.get(EventPayload.TOOL)
@@ -211,38 +287,64 @@ class AgentCallbackHandler(BaseCallbackHandler):
211
287
  if self.fn:
212
288
  if inspect.iscoroutinefunction(self.fn):
213
289
  await self.fn(
214
- AgentStatusType.TOOL_CALL,
215
- f"Executing '{tool_name}' with arguments: {fcall}",
290
+ status_type=AgentStatusType.TOOL_CALL,
291
+ msg=f"Executing '{tool_name}' with arguments: {fcall}",
292
+ event_id=event_id,
216
293
  )
217
294
  else:
218
295
  self.fn(
219
- AgentStatusType.TOOL_CALL,
220
- f"Executing '{tool_name}' with arguments: {fcall}",
296
+ status_type=AgentStatusType.TOOL_CALL,
297
+ msg=f"Executing '{tool_name}' with arguments: {fcall}",
298
+ event_id=event_id,
221
299
  )
222
300
  elif EventPayload.FUNCTION_OUTPUT in payload:
223
301
  if self.fn:
224
302
  response = str(payload.get(EventPayload.FUNCTION_OUTPUT))
225
303
  if inspect.iscoroutinefunction(self.fn):
226
- await self.fn(AgentStatusType.TOOL_OUTPUT, response)
304
+ await self.fn(
305
+ status_type=AgentStatusType.TOOL_OUTPUT,
306
+ msg=response,
307
+ event_id=event_id,
308
+ )
227
309
  else:
228
- self.fn(AgentStatusType.TOOL_OUTPUT, response)
310
+ self.fn(
311
+ status_type=AgentStatusType.TOOL_OUTPUT,
312
+ msg=response,
313
+ event_id=event_id,
314
+ )
229
315
  else:
230
316
  print(f"No function call or output found in payload {payload}")
231
317
 
232
- async def _ahandle_agent_step(self, payload: dict) -> None:
318
+ async def _ahandle_agent_step(self, payload: dict, event_id: str) -> None:
233
319
  if EventPayload.MESSAGES in payload:
234
320
  if self.fn:
235
321
  msg = str(payload.get(EventPayload.MESSAGES))
236
322
  if inspect.iscoroutinefunction(self.fn):
237
- await self.fn(AgentStatusType.AGENT_STEP, msg)
323
+ await self.fn(
324
+ status_type=AgentStatusType.AGENT_STEP,
325
+ msg=msg,
326
+ event_id=event_id,
327
+ )
238
328
  else:
239
- self.fn(AgentStatusType.AGENT_STEP, msg)
329
+ self.fn(
330
+ status_type=AgentStatusType.AGENT_STEP,
331
+ msg=msg,
332
+ event_id=event_id,
333
+ )
240
334
  elif EventPayload.RESPONSE in payload:
241
335
  if self.fn:
242
336
  response = str(payload.get(EventPayload.RESPONSE))
243
337
  if inspect.iscoroutinefunction(self.fn):
244
- await self.fn(AgentStatusType.AGENT_STEP, response)
338
+ await self.fn(
339
+ status_type=AgentStatusType.AGENT_STEP,
340
+ msg=response,
341
+ event_id=event_id,
342
+ )
245
343
  else:
246
- self.fn(AgentStatusType.AGENT_STEP, response)
344
+ self.fn(
345
+ status_type=AgentStatusType.AGENT_STEP,
346
+ msg=response,
347
+ event_id=event_id,
348
+ )
247
349
  else:
248
350
  print(f"No messages or prompt found in payload {payload}")
@@ -8,7 +8,7 @@ import pandas as pd
8
8
  from .types import ObserverType
9
9
  from .agent_config import AgentConfig
10
10
 
11
- def setup_observer(config: AgentConfig) -> bool:
11
+ def setup_observer(config: AgentConfig, verbose: bool) -> bool:
12
12
  '''
13
13
  Setup the observer.
14
14
  '''
@@ -31,7 +31,8 @@ def setup_observer(config: AgentConfig) -> bool:
31
31
  tracer_provider = register(endpoint=phoenix_endpoint, project_name="vectara-agentic")
32
32
  LlamaIndexInstrumentor().instrument(tracer_provider=tracer_provider)
33
33
  return True
34
- print("No observer set.")
34
+ if verbose:
35
+ print("No observer set.")
35
36
  return False
36
37
 
37
38
 
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Define the version of the package.
3
3
  """
4
- __version__ = "0.2.8"
4
+ __version__ = "0.2.10"
vectara_agentic/agent.py CHANGED
@@ -33,7 +33,7 @@ from llama_index.core.agent.types import BaseAgent
33
33
  from llama_index.core.workflow import Workflow
34
34
 
35
35
  from .types import (
36
- AgentType, AgentStatusType, LLMRole, ToolType,
36
+ AgentType, AgentStatusType, LLMRole, ToolType, ModelProvider,
37
37
  AgentResponse, AgentStreamingResponse, AgentConfigType
38
38
  )
39
39
  from .utils import get_llm, get_tokenizer_for_model
@@ -252,7 +252,7 @@ class Agent:
252
252
 
253
253
  # Setup observability
254
254
  try:
255
- self.observability_enabled = setup_observer(self.agent_config)
255
+ self.observability_enabled = setup_observer(self.agent_config, self.verbose)
256
256
  except Exception as e:
257
257
  print(f"Failed to set up observer ({e}), ignoring")
258
258
  self.observability_enabled = False
@@ -278,6 +278,10 @@ class Agent:
278
278
  llm.callback_manager = llm_callback_manager
279
279
 
280
280
  if agent_type == AgentType.FUNCTION_CALLING:
281
+ if config.tool_llm_provider == ModelProvider.OPENAI:
282
+ raise ValueError(
283
+ "Vectara-agentic: Function calling agent type is not supported with the OpenAI LLM."
284
+ )
281
285
  prompt = _get_prompt(GENERAL_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
282
286
  agent = FunctionCallingAgent.from_tools(
283
287
  tools=self.tools,
@@ -286,7 +290,7 @@ class Agent:
286
290
  verbose=self.verbose,
287
291
  max_function_calls=config.max_reasoning_steps,
288
292
  callback_manager=llm_callback_manager,
289
- system_prompt = prompt,
293
+ system_prompt=prompt,
290
294
  allow_parallel_tool_calls=True,
291
295
  )
292
296
  elif agent_type == AgentType.REACT:
@@ -301,6 +305,10 @@ class Agent:
301
305
  callable_manager=llm_callback_manager,
302
306
  )
303
307
  elif agent_type == AgentType.OPENAI:
308
+ if config.tool_llm_provider != ModelProvider.OPENAI:
309
+ raise ValueError(
310
+ "Vectara-agentic: OPENAI agent type requires the OpenAI LLM."
311
+ )
304
312
  prompt = _get_prompt(GENERAL_PROMPT_TEMPLATE, self._topic, self._custom_instructions)
305
313
  agent = OpenAIAgent.from_tools(
306
314
  tools=self.tools,
@@ -71,6 +71,17 @@ class AgentConfig:
71
71
  default_factory=lambda: int(os.getenv("VECTARA_AGENTIC_MAX_REASONING_STEPS", "50"))
72
72
  )
73
73
 
74
+ def __post_init__(self):
75
+ # Use object.__setattr__ since the dataclass is frozen
76
+ if isinstance(self.agent_type, str):
77
+ object.__setattr__(self, "agent_type", AgentType(self.agent_type))
78
+ if isinstance(self.main_llm_provider, str):
79
+ object.__setattr__(self, "main_llm_provider", ModelProvider(self.main_llm_provider))
80
+ if isinstance(self.tool_llm_provider, str):
81
+ object.__setattr__(self, "tool_llm_provider", ModelProvider(self.tool_llm_provider))
82
+ if isinstance(self.observer, str):
83
+ object.__setattr__(self, "observer", ObserverType(self.observer))
84
+
74
85
  def to_dict(self) -> dict:
75
86
  """
76
87
  Convert the AgentConfig to a dictionary.
@@ -2,6 +2,7 @@
2
2
  This module contains the SubQuestionQueryEngine workflow, which is a workflow
3
3
  that takes a user question and a list of tools, and outputs a list of sub-questions.
4
4
  """
5
+
5
6
  import json
6
7
  from pydantic import BaseModel
7
8
 
@@ -14,6 +15,7 @@ from llama_index.core.workflow import (
14
15
  StopEvent,
15
16
  )
16
17
 
18
+
17
19
  class SubQuestionQueryWorkflow(Workflow):
18
20
  """
19
21
  Workflow for sub-question query engine.
@@ -24,21 +26,25 @@ class SubQuestionQueryWorkflow(Workflow):
24
26
  """
25
27
  Inputs for the workflow.
26
28
  """
29
+
27
30
  query: str
28
31
 
29
32
  class OutputsModel(BaseModel):
30
33
  """
31
34
  Outputs for the workflow.
32
35
  """
36
+
33
37
  response: str
34
38
 
35
39
  # Workflow Event types
36
40
  class QueryEvent(Event):
37
41
  """Event for a query."""
42
+
38
43
  question: str
39
44
 
40
45
  class AnswerEvent(Event):
41
46
  """Event for an answer."""
47
+
42
48
  question: str
43
49
  answer: str
44
50
 
@@ -51,35 +57,29 @@ class SubQuestionQueryWorkflow(Workflow):
51
57
  """
52
58
  if not hasattr(ev, "inputs"):
53
59
  raise ValueError("No inputs provided to workflow Start Event.")
54
- if hasattr(ev, "inputs") and not isinstance(ev.inputs, self.InputsModel):
60
+ if not isinstance(ev.inputs, self.InputsModel):
55
61
  raise ValueError(f"Expected inputs to be of type {self.InputsModel}")
56
- if hasattr(ev, "inputs"):
57
- query = ev.inputs.query
58
- await ctx.set("original_query", query)
59
- print(f"Query is {await ctx.get('original_query')}")
60
62
 
61
- if hasattr(ev, "agent"):
62
- await ctx.set("agent", ev.agent)
63
- else:
64
- raise ValueError("Agent not provided to workflow Start Event.")
65
- chat_history = [str(msg) for msg in ev.agent.memory.get()]
63
+ query = ev.inputs.query
64
+ await ctx.set("original_query", query)
65
+ print(f"Query is {query}")
66
66
 
67
- if hasattr(ev, "llm"):
68
- await ctx.set("llm", ev.llm)
69
- else:
70
- raise ValueError("LLM not provided to workflow Start Event.")
67
+ required_attrs = ["agent", "llm", "tools"]
68
+ for attr in required_attrs:
69
+ if not hasattr(ev, attr):
70
+ raise ValueError(
71
+ f"{attr.capitalize()} not provided to workflow Start Event."
72
+ )
71
73
 
72
- if hasattr(ev, "tools"):
73
- await ctx.set("tools", ev.tools)
74
- else:
75
- raise ValueError("Tools not provided to workflow Start Event.")
74
+ await ctx.set("agent", ev.agent)
75
+ await ctx.set("llm", ev.llm)
76
+ await ctx.set("tools", ev.tools)
77
+ await ctx.set("verbose", getattr(ev, "verbose", False))
76
78
 
77
- if hasattr(ev, "verbose"):
78
- await ctx.set("verbose", ev.verbose)
79
- else:
80
- await ctx.set("verbose", False)
79
+ chat_history = [str(msg) for msg in ev.agent.memory.get()]
81
80
 
82
81
  llm = await ctx.get("llm")
82
+ original_query = await ctx.get("original_query")
83
83
  response = llm.complete(
84
84
  f"""
85
85
  Given a user question, and a list of tools, output a list of
@@ -100,7 +100,7 @@ class SubQuestionQueryWorkflow(Workflow):
100
100
  the sub-questions could be:
101
101
  - What is the largest city within 50 miles of San Francisco? (answer is San Jose)
102
102
  - What is the name of the mayor of San Jose?
103
- Here is the user question: {await ctx.get('original_query')}.
103
+ Here is the user question: {original_query}.
104
104
  Here are previous chat messages: {chat_history}.
105
105
  And here is the list of tools: {ev.tools}
106
106
  """,
@@ -109,17 +109,25 @@ class SubQuestionQueryWorkflow(Workflow):
109
109
  if await ctx.get("verbose"):
110
110
  print(f"Sub-questions are {response}")
111
111
 
112
- response_obj = json.loads(str(response))
113
- sub_questions = response_obj["sub_questions"]
112
+ if not str(response):
113
+ raise ValueError(
114
+ f"No response from LLM when generating sub-questions for query {original_query}"
115
+ )
114
116
 
115
- await ctx.set("sub_question_count", len(sub_questions))
117
+ try:
118
+ sub_questions = json.loads(str(response))["sub_questions"]
119
+ if not sub_questions:
120
+ raise ValueError("LLM returned empty sub-questions list")
121
+ except (json.JSONDecodeError, KeyError) as e:
122
+ raise ValueError(f"Invalid LLM response format: {response}") from e
116
123
 
124
+ await ctx.set("sub_question_count", len(sub_questions))
117
125
  for question in sub_questions:
118
126
  ctx.send_event(self.QueryEvent(question=question))
119
127
 
120
128
  return None
121
129
 
122
- @step(num_workers=3)
130
+ @step(num_workers=4)
123
131
  async def sub_question(self, ctx: Context, ev: QueryEvent) -> AnswerEvent:
124
132
  """
125
133
  Given a sub-question, return the answer to the sub-question, using the agent.
@@ -131,9 +139,7 @@ class SubQuestionQueryWorkflow(Workflow):
131
139
  return self.AnswerEvent(question=ev.question, answer=str(response))
132
140
 
133
141
  @step
134
- async def combine_answers(
135
- self, ctx: Context, ev: AnswerEvent
136
- ) -> StopEvent | None:
142
+ async def combine_answers(self, ctx: Context, ev: AnswerEvent) -> StopEvent | None:
137
143
  """
138
144
  Given a list of answers to sub-questions, combine them into a single answer.
139
145
  """
@@ -144,10 +150,7 @@ class SubQuestionQueryWorkflow(Workflow):
144
150
  return None
145
151
 
146
152
  answers = "\n\n".join(
147
- [
148
- f"Question: {event.question}: \n Answer: {event.answer}"
149
- for event in ready
150
- ]
153
+ f"Question: {event.question}\nAnswer: {event.answer}" for event in ready
151
154
  )
152
155
 
153
156
  prompt = f"""
@@ -169,8 +172,8 @@ class SubQuestionQueryWorkflow(Workflow):
169
172
  if await ctx.get("verbose"):
170
173
  print("Final response is", response)
171
174
 
172
- output = self.OutputsModel(response=str(response))
173
- return StopEvent(result=output)
175
+ return StopEvent(result=self.OutputsModel(response=str(response)))
176
+
174
177
 
175
178
  class SequentialSubQuestionsWorkflow(Workflow):
176
179
  """
@@ -182,17 +185,20 @@ class SequentialSubQuestionsWorkflow(Workflow):
182
185
  """
183
186
  Inputs for the workflow.
184
187
  """
188
+
185
189
  query: str
186
190
 
187
191
  class OutputsModel(BaseModel):
188
192
  """
189
193
  Outputs for the workflow.
190
194
  """
195
+
191
196
  response: str
192
197
 
193
198
  # Workflow Event types
194
199
  class QueryEvent(Event):
195
200
  """Event for a query."""
201
+
196
202
  question: str
197
203
  prev_answer: str
198
204
  num: int
@@ -232,11 +238,12 @@ class SequentialSubQuestionsWorkflow(Workflow):
232
238
  await ctx.set("verbose", ev.verbose)
233
239
  else:
234
240
  await ctx.set("verbose", False)
241
+
242
+ original_query = await ctx.get("original_query")
235
243
  if ev.verbose:
236
- print(f"Query is {await ctx.get('original_query')}")
244
+ print(f"Query is {original_query}")
237
245
 
238
246
  llm = await ctx.get("llm")
239
- orig_query = await ctx.get("original_query")
240
247
  response = llm.complete(
241
248
  f"""
242
249
  Given a user question, and a list of tools, output a list of
@@ -257,12 +264,15 @@ class SequentialSubQuestionsWorkflow(Workflow):
257
264
  - Who is the mayor of this city?
258
265
  The answer to the first question is San Jose, which is given as context to the second question.
259
266
  The answer to the second question is Matt Mahan.
260
- Here is the user question: {orig_query}.
267
+ Here is the user question: {original_query}.
261
268
  Here are previous chat messages: {chat_history}.
262
269
  And here is the list of tools: {ev.tools}
263
270
  """,
264
271
  )
265
272
 
273
+ if not str(response):
274
+ raise ValueError(f"No response from LLM for query {original_query}")
275
+
266
276
  response_obj = json.loads(str(response))
267
277
  sub_questions = response_obj["sub_questions"]
268
278
 
@@ -273,7 +283,9 @@ class SequentialSubQuestionsWorkflow(Workflow):
273
283
  return self.QueryEvent(question=sub_questions[0], prev_answer="", num=0)
274
284
 
275
285
  @step
276
- async def sub_question(self, ctx: Context, ev: QueryEvent) -> StopEvent | QueryEvent:
286
+ async def sub_question(
287
+ self, ctx: Context, ev: QueryEvent
288
+ ) -> StopEvent | QueryEvent:
277
289
  """
278
290
  Given a sub-question, return the answer to the sub-question, using the agent.
279
291
  """
@@ -297,8 +309,8 @@ class SequentialSubQuestionsWorkflow(Workflow):
297
309
  if ev.num + 1 < len(sub_questions):
298
310
  return self.QueryEvent(
299
311
  question=sub_questions[ev.num + 1],
300
- prev_answer = response.response,
301
- num=ev.num + 1
312
+ prev_answer=response.response,
313
+ num=ev.num + 1,
302
314
  )
303
315
 
304
316
  output = self.OutputsModel(response=response.response)
vectara_agentic/tools.py CHANGED
@@ -30,6 +30,7 @@ LI_packages = {
30
30
  "arxiv": ToolType.QUERY,
31
31
  "tavily_research": ToolType.QUERY,
32
32
  "exa": ToolType.QUERY,
33
+ "brave": ToolType.QUERY,
33
34
  "neo4j": ToolType.QUERY,
34
35
  "kuzu": ToolType.QUERY,
35
36
  "google": {
@@ -163,7 +164,7 @@ class VectaraTool(FunctionTool):
163
164
  except Exception as e:
164
165
  err_output = ToolOutput(
165
166
  tool_name=self.metadata.name,
166
- content=f"Tool Malfunction: {str(e)}",
167
+ content=f"Tool {self.metadata.name} Malfunction: {str(e)}",
167
168
  raw_input={"args": args, "kwargs": kwargs},
168
169
  raw_output={"response": str(e)},
169
170
  )
@@ -177,7 +178,7 @@ class VectaraTool(FunctionTool):
177
178
  except Exception as e:
178
179
  err_output = ToolOutput(
179
180
  tool_name=self.metadata.name,
180
- content=f"Tool Malfunction: {str(e)}",
181
+ content=f"Tool {self.metadata.name} Malfunction: {str(e)}",
181
182
  raw_input={"args": args, "kwargs": kwargs},
182
183
  raw_output={"response": str(e)},
183
184
  )
@@ -539,8 +540,8 @@ class VectaraToolFactory:
539
540
  summary = summaries_dict.get(doc_id, "")
540
541
  tool_output += f"document_id: '{doc_id}'\nmetadata: '{metadata}'\nsummary: '{summary}'\n\n"
541
542
  else:
542
- for doc in docs:
543
- tool_output += f"document_id: '{doc.id_}'\nmetadata: '{doc.metadata}'\n\n"
543
+ for doc_id, metadata in docs:
544
+ tool_output += f"document_id: '{doc_id}'\nmetadata: '{metadata}'\n\n"
544
545
 
545
546
  out = ToolOutput(
546
547
  tool_name=search_function.__name__,
vectara_agentic/utils.py CHANGED
@@ -43,7 +43,7 @@ def _get_llm_params_for_role(
43
43
  config = config or AgentConfig() # fallback to default config
44
44
 
45
45
  if role == LLMRole.TOOL:
46
- model_provider = config.tool_llm_provider
46
+ model_provider = ModelProvider(config.tool_llm_provider)
47
47
  # If the user hasn’t explicitly set a tool_llm_model_name,
48
48
  # fallback to provider default from provider_to_default_model_name
49
49
  model_name = (
@@ -51,7 +51,7 @@ def _get_llm_params_for_role(
51
51
  or provider_to_default_model_name.get(model_provider)
52
52
  )
53
53
  else:
54
- model_provider = config.main_llm_provider
54
+ model_provider = ModelProvider(config.main_llm_provider)
55
55
  model_name = (
56
56
  config.main_llm_model_name
57
57
  or provider_to_default_model_name.get(model_provider)
@@ -110,6 +110,7 @@ def get_llm(
110
110
  llm = Gemini(
111
111
  model=model_name, temperature=0,
112
112
  is_function_calling_model=True,
113
+ allow_parallel_tool_calls=True,
113
114
  max_tokens=max_tokens,
114
115
  )
115
116
  elif model_provider == ModelProvider.TOGETHER:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.8
3
+ Version: 0.2.10
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,31 +16,31 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.12.26
19
+ Requires-Dist: llama-index==0.12.29
20
20
  Requires-Dist: llama-index-indices-managed-vectara==0.4.2
21
21
  Requires-Dist: llama-index-agent-llm-compiler==0.3.0
22
22
  Requires-Dist: llama-index-agent-lats==0.3.0
23
23
  Requires-Dist: llama-index-agent-openai==0.4.6
24
- Requires-Dist: llama-index-llms-openai==0.3.25
24
+ Requires-Dist: llama-index-llms-openai==0.3.32
25
25
  Requires-Dist: llama-index-llms-anthropic==0.6.10
26
26
  Requires-Dist: llama-index-llms-together==0.3.1
27
27
  Requires-Dist: llama-index-llms-groq==0.3.1
28
28
  Requires-Dist: llama-index-llms-fireworks==0.3.2
29
29
  Requires-Dist: llama-index-llms-cohere==0.4.0
30
- Requires-Dist: llama-index-llms-gemini==0.4.11
31
- Requires-Dist: llama-index-llms-bedrock==0.3.4
30
+ Requires-Dist: llama-index-llms-gemini==0.4.14
31
+ Requires-Dist: llama-index-llms-bedrock==0.3.8
32
32
  Requires-Dist: llama-index-tools-yahoo-finance==0.3.0
33
33
  Requires-Dist: llama-index-tools-arxiv==0.3.0
34
34
  Requires-Dist: llama-index-tools-database==0.3.0
35
35
  Requires-Dist: llama-index-tools-google==0.3.0
36
36
  Requires-Dist: llama-index-tools-tavily_research==0.3.0
37
+ Requires-Dist: llama_index.tools.brave_search==0.3.0
37
38
  Requires-Dist: llama-index-tools-neo4j==0.3.0
38
- Requires-Dist: llama-index-graph-stores-kuzu==0.6.0
39
+ Requires-Dist: llama-index-graph-stores-kuzu==0.7.0
39
40
  Requires-Dist: llama-index-tools-slack==0.3.0
40
41
  Requires-Dist: llama-index-tools-exa==0.3.0
41
- Requires-Dist: tavily-python==0.5.1
42
- Requires-Dist: exa-py==1.8.9
43
- Requires-Dist: yahoo-finance==1.4.0
42
+ Requires-Dist: tavily-python==0.5.4
43
+ Requires-Dist: exa-py==1.9.1
44
44
  Requires-Dist: openinference-instrumentation-llama-index==3.3.3
45
45
  Requires-Dist: opentelemetry-proto==1.31.0
46
46
  Requires-Dist: arize-phoenix==8.14.1
@@ -176,7 +176,7 @@ query_financial_reports_tool = vec_factory.create_rag_tool(
176
176
  )
177
177
  ```
178
178
 
179
- See the [docs](https://vectara.github.io/vectara-agentic-docs/) for additional arguments to customize your Vectara RAG tool.
179
+ See the [docs](https://vectara.github.io/py-vectara-agentic/latest/) for additional arguments to customize your Vectara RAG tool.
180
180
 
181
181
  ### 3. Create other tools (optional)
182
182
 
@@ -205,7 +205,7 @@ agent = Agent(
205
205
  )
206
206
  ```
207
207
 
208
- See the [docs](https://vectara.github.io/vectara-agentic-docs/) for additional arguments, including `agent_progress_callback` and `query_logging_callback`.
208
+ See the [docs](https://vectara.github.io/py-vectara-agentic/latest/) for additional arguments, including `agent_progress_callback` and `query_logging_callback`.
209
209
 
210
210
  ### 5. Run a chat interaction
211
211
 
@@ -376,7 +376,7 @@ specified in the Agent configuration.
376
376
  - `load_unique_values`: returns the top unique values for a given column
377
377
 
378
378
  In addition, we include various other tools from LlamaIndex ToolSpecs:
379
- * Tavily search and EXA.AI
379
+ * Tavily search, EXA.AI and Brave Search
380
380
  * arxiv
381
381
  * neo4j & Kuzu for Graph DB integration
382
382
  * Google tools (including gmail, calendar, and search)
@@ -6,24 +6,24 @@ tests/test_agent_type.py,sha256=JM0Q2GBGHSADoBacz_DW551zWSfbpf7qa8xXqtyWsc4,5671
6
6
  tests/test_fallback.py,sha256=M5YD7NHZ0joVU1frYIr9_OiRAIje5mrXrYVcekzlyGs,2829
7
7
  tests/test_private_llm.py,sha256=CY-_rCpxGUuxnZ3ypkodw5Jj-sJCNdh6rLbCvULwuJI,2247
8
8
  tests/test_serialization.py,sha256=Ed23GN2zhSJNdPFrVK4aqLkOhJKviczR_o0t-r9TuRI,4762
9
- tests/test_tools.py,sha256=IVKn0HoS2erTCr1mOEGzTkktiY0PCfKNvqnD_pizjOg,3977
9
+ tests/test_tools.py,sha256=as0rEAKAs6ekvqFDCcq1smRWKhQm5EaH2PUWT8hg1qQ,5726
10
10
  tests/test_workflow.py,sha256=lVyrVHdRO5leYNbYtHTmKqMX0c8_xehCpUA7cXQKVsc,2175
11
11
  vectara_agentic/__init__.py,sha256=2GLDS3U6KckK-dBRl9v_x1kSV507gEhjOfuMmmu0Qxg,850
12
- vectara_agentic/_callback.py,sha256=5PfqjLmuaZIR6dnqmhniTD_zwCgfi7kOu-nexb6Kss4,9688
13
- vectara_agentic/_observability.py,sha256=fTL3KW0jQU-_JSpFgjO6-XzgDut_oiq9kt4QR-FkSqU,3804
12
+ vectara_agentic/_callback.py,sha256=lU35-Pxp-fsMpOi4woY6oLECAhO1nSmLIy3b8fbgT54,13029
13
+ vectara_agentic/_observability.py,sha256=BA2zhwa5930aaDUJxHefPlmIPt8kZOuLHVBc9PtYNuU,3839
14
14
  vectara_agentic/_prompts.py,sha256=LYyiOAiC8imz3U7MSJiuCYAP39afsp7ycXY7-9biyJI,9314
15
- vectara_agentic/_version.py,sha256=HOBvs3gmojKxd7sNMHt6Q-0_rlFpgzlI1gXNZOS_Fqc,65
16
- vectara_agentic/agent.py,sha256=ioC6EN86_d7SS1jEZ6CUe6OtetuGmLdWftj5bklPfMs,43522
17
- vectara_agentic/agent_config.py,sha256=y1hSvU5ns0cE2R7BqF65LFstixF1ytJcoVgicGXo7w0,3691
15
+ vectara_agentic/_version.py,sha256=-h5PMZplLmgj04BNOpvmJ0dvWk8SQqCU6tP_hnySaFE,66
16
+ vectara_agentic/agent.py,sha256=KX0VYQuGFkK_CELjUFdxXWYHng32GFjsLdRdH-gR7aM,43970
17
+ vectara_agentic/agent_config.py,sha256=E-rtYMcpoGxnEAyy8231bizo2n0uGQ2qWxuSgTEfwdQ,4327
18
18
  vectara_agentic/agent_endpoint.py,sha256=QIMejCLlpW2qzXxeDAxv3anF46XMDdVMdKGWhJh3azY,1996
19
19
  vectara_agentic/db_tools.py,sha256=Go03bzma9m-qDH0CPP8hWhf1nu_4S6s7ke0jGqz58Pk,10296
20
- vectara_agentic/sub_query_workflow.py,sha256=3WoVnryR2NXyYXbLDM1XVLd7DtbCG0jgrVqeDUN4YNQ,10943
21
- vectara_agentic/tools.py,sha256=Mm2qfJZWnbNa9G-ycYMP7NPLSo4uUJ9_y45YmXxtlSc,42571
20
+ vectara_agentic/sub_query_workflow.py,sha256=rwiS4e-k75LQvT_WdwoRI8sAJRsL9kiDZlrPwcjYUAE,11120
21
+ vectara_agentic/tools.py,sha256=EC4NAhJy_SB62c2e-fmalWLXtTt6Pgjfi0qE22KxRco,42650
22
22
  vectara_agentic/tools_catalog.py,sha256=oiw3wAfbpFhh0_6rMvZsyPqWV6QIzHqhZCNzqRxuyV8,4818
23
23
  vectara_agentic/types.py,sha256=HcS7vR8P2v2xQTlOc6ZFV2vvlr3OpzSNWhtcLMxqUZc,1792
24
- vectara_agentic/utils.py,sha256=nBQqVb4_UNummqVz28DHm3VaKzy8OAq-xSjhU23uxWU,7646
25
- vectara_agentic-0.2.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
26
- vectara_agentic-0.2.8.dist-info/METADATA,sha256=IV5fm77XOPOvqfcpCZUKRxq9QgnoF3mPu-om_sTKEK8,25046
27
- vectara_agentic-0.2.8.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
28
- vectara_agentic-0.2.8.dist-info/top_level.txt,sha256=Y7TQTFdOYGYodQRltUGRieZKIYuzeZj2kHqAUpfCUfg,22
29
- vectara_agentic-0.2.8.dist-info/RECORD,,
24
+ vectara_agentic/utils.py,sha256=4vA5MyNoG47_7eHuLFQByiG_FHWbrQ6ZJDsdqHUwiJA,7720
25
+ vectara_agentic-0.2.10.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
26
+ vectara_agentic-0.2.10.dist-info/METADATA,sha256=LjjT33W4ISkAKJIk0_tJSxYuF39Z5EY355qrz6dCbR4,25088
27
+ vectara_agentic-0.2.10.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
28
+ vectara_agentic-0.2.10.dist-info/top_level.txt,sha256=Y7TQTFdOYGYodQRltUGRieZKIYuzeZj2kHqAUpfCUfg,22
29
+ vectara_agentic-0.2.10.dist-info/RECORD,,