vectara-agentic 0.2.9__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

tests/test_tools.py CHANGED
@@ -2,7 +2,12 @@ import unittest
2
2
 
3
3
  from pydantic import Field, BaseModel
4
4
 
5
- from vectara_agentic.tools import VectaraTool, VectaraToolFactory, ToolsFactory, ToolType
5
+ from vectara_agentic.tools import (
6
+ VectaraTool,
7
+ VectaraToolFactory,
8
+ ToolsFactory,
9
+ ToolType,
10
+ )
6
11
  from vectara_agentic.agent import Agent
7
12
  from vectara_agentic.agent_config import AgentConfig
8
13
 
@@ -13,9 +18,7 @@ class TestToolsPackage(unittest.TestCase):
13
18
  def test_vectara_tool_factory(self):
14
19
  vectara_corpus_key = "corpus_key"
15
20
  vectara_api_key = "api_key"
16
- vec_factory = VectaraToolFactory(
17
- vectara_corpus_key, vectara_api_key
18
- )
21
+ vec_factory = VectaraToolFactory(vectara_corpus_key, vectara_api_key)
19
22
 
20
23
  self.assertEqual(vectara_corpus_key, vec_factory.vectara_corpus_key)
21
24
  self.assertEqual(vectara_api_key, vec_factory.vectara_api_key)
@@ -46,6 +49,48 @@ class TestToolsPackage(unittest.TestCase):
46
49
  self.assertIsInstance(search_tool, FunctionTool)
47
50
  self.assertEqual(search_tool.metadata.tool_type, ToolType.QUERY)
48
51
 
52
+ def test_vectara_tool_validation(self):
53
+ vectara_corpus_key = "corpus_key"
54
+ vectara_api_key = "api_key"
55
+ vec_factory = VectaraToolFactory(vectara_corpus_key, vectara_api_key)
56
+
57
+ class QueryToolArgs(BaseModel):
58
+ query: str = Field(description="The user query")
59
+ year: int = Field(
60
+ description="The year of the document",
61
+ example=2023,
62
+ )
63
+ ticker: str = Field(
64
+ description="The stock ticker",
65
+ example="AAPL",
66
+ )
67
+
68
+ query_tool = vec_factory.create_rag_tool(
69
+ tool_name="rag_tool",
70
+ tool_description="""
71
+ Returns a response (str) to the user query based on the data in this corpus.
72
+ """,
73
+ tool_args_schema=QueryToolArgs,
74
+ )
75
+ res = query_tool(
76
+ query="What is the stock price?",
77
+ the_year=2023,
78
+ )
79
+ self.assertIn("Malfunction", str(res))
80
+
81
+ search_tool = vec_factory.create_search_tool(
82
+ tool_name="search_tool",
83
+ tool_description="""
84
+ Returns a list of documents (str) that match the user query.
85
+ """,
86
+ tool_args_schema=QueryToolArgs,
87
+ )
88
+ res = search_tool(
89
+ query="What is the stock price?",
90
+ the_year=2023,
91
+ )
92
+ self.assertIn("Malfunction", str(res))
93
+
49
94
  def test_tool_factory(self):
50
95
  def mult(x: float, y: float) -> float:
51
96
  return x * y
@@ -59,17 +104,21 @@ class TestToolsPackage(unittest.TestCase):
59
104
  def test_llama_index_tools(self):
60
105
  tools_factory = ToolsFactory()
61
106
 
62
- llama_tools = tools_factory.get_llama_index_tools(
63
- tool_package_name="arxiv",
64
- tool_spec_name="ArxivToolSpec"
65
- )
66
-
67
- arxiv_tool = llama_tools[0]
107
+ arxiv_tool = tools_factory.get_llama_index_tools(
108
+ tool_package_name="arxiv", tool_spec_name="ArxivToolSpec"
109
+ )[0]
68
110
 
69
111
  self.assertIsInstance(arxiv_tool, VectaraTool)
70
112
  self.assertIsInstance(arxiv_tool, FunctionTool)
71
113
  self.assertEqual(arxiv_tool.metadata.tool_type, ToolType.QUERY)
72
114
 
115
+ yfinance_tool = tools_factory.get_llama_index_tools(
116
+ tool_package_name="yahoo_finance", tool_spec_name="YahooFinanceToolSpec"
117
+ )[0]
118
+ self.assertIsInstance(yfinance_tool, VectaraTool)
119
+ self.assertIsInstance(yfinance_tool, FunctionTool)
120
+ self.assertEqual(yfinance_tool.metadata.tool_type, ToolType.QUERY)
121
+
73
122
  def test_public_repo(self):
74
123
  vectara_corpus_key = "vectara-docs_1"
75
124
  vectara_api_key = "zqt_UXrBcnI2UXINZkrv4g1tQPhzj02vfdtqYJIDiA"
@@ -80,10 +129,12 @@ class TestToolsPackage(unittest.TestCase):
80
129
  tool_name="ask_vectara",
81
130
  data_description="data from Vectara website",
82
131
  assistant_specialty="RAG as a service",
83
- vectara_summarizer="mockingbird-1.0-2024-07-16"
132
+ vectara_summarizer="mockingbird-1.0-2024-07-16",
84
133
  )
85
134
 
86
- self.assertIn("Vectara is an end-to-end platform", str(agent.chat("What is Vectara?")))
135
+ self.assertIn(
136
+ "Vectara is an end-to-end platform", str(agent.chat("What is Vectara?"))
137
+ )
87
138
 
88
139
  def test_class_method_as_tool(self):
89
140
  class TestClass:
@@ -102,7 +153,7 @@ class TestToolsPackage(unittest.TestCase):
102
153
  tools=tools,
103
154
  topic=topic,
104
155
  custom_instructions=instructions,
105
- agent_config=config
156
+ agent_config=config,
106
157
  )
107
158
 
108
159
  self.assertEqual(
@@ -8,7 +8,7 @@ import pandas as pd
8
8
  from .types import ObserverType
9
9
  from .agent_config import AgentConfig
10
10
 
11
- def setup_observer(config: AgentConfig) -> bool:
11
+ def setup_observer(config: AgentConfig, verbose: bool) -> bool:
12
12
  '''
13
13
  Setup the observer.
14
14
  '''
@@ -31,7 +31,8 @@ def setup_observer(config: AgentConfig) -> bool:
31
31
  tracer_provider = register(endpoint=phoenix_endpoint, project_name="vectara-agentic")
32
32
  LlamaIndexInstrumentor().instrument(tracer_provider=tracer_provider)
33
33
  return True
34
- print("No observer set.")
34
+ if verbose:
35
+ print("No observer set.")
35
36
  return False
36
37
 
37
38
 
@@ -1,4 +1,4 @@
1
1
  """
2
2
  Define the version of the package.
3
3
  """
4
- __version__ = "0.2.9"
4
+ __version__ = "0.2.10"
vectara_agentic/agent.py CHANGED
@@ -252,7 +252,7 @@ class Agent:
252
252
 
253
253
  # Setup observability
254
254
  try:
255
- self.observability_enabled = setup_observer(self.agent_config)
255
+ self.observability_enabled = setup_observer(self.agent_config, self.verbose)
256
256
  except Exception as e:
257
257
  print(f"Failed to set up observer ({e}), ignoring")
258
258
  self.observability_enabled = False
@@ -2,6 +2,7 @@
2
2
  This module contains the SubQuestionQueryEngine workflow, which is a workflow
3
3
  that takes a user question and a list of tools, and outputs a list of sub-questions.
4
4
  """
5
+
5
6
  import json
6
7
  from pydantic import BaseModel
7
8
 
@@ -14,6 +15,7 @@ from llama_index.core.workflow import (
14
15
  StopEvent,
15
16
  )
16
17
 
18
+
17
19
  class SubQuestionQueryWorkflow(Workflow):
18
20
  """
19
21
  Workflow for sub-question query engine.
@@ -24,21 +26,25 @@ class SubQuestionQueryWorkflow(Workflow):
24
26
  """
25
27
  Inputs for the workflow.
26
28
  """
29
+
27
30
  query: str
28
31
 
29
32
  class OutputsModel(BaseModel):
30
33
  """
31
34
  Outputs for the workflow.
32
35
  """
36
+
33
37
  response: str
34
38
 
35
39
  # Workflow Event types
36
40
  class QueryEvent(Event):
37
41
  """Event for a query."""
42
+
38
43
  question: str
39
44
 
40
45
  class AnswerEvent(Event):
41
46
  """Event for an answer."""
47
+
42
48
  question: str
43
49
  answer: str
44
50
 
@@ -51,35 +57,29 @@ class SubQuestionQueryWorkflow(Workflow):
51
57
  """
52
58
  if not hasattr(ev, "inputs"):
53
59
  raise ValueError("No inputs provided to workflow Start Event.")
54
- if hasattr(ev, "inputs") and not isinstance(ev.inputs, self.InputsModel):
60
+ if not isinstance(ev.inputs, self.InputsModel):
55
61
  raise ValueError(f"Expected inputs to be of type {self.InputsModel}")
56
- if hasattr(ev, "inputs"):
57
- query = ev.inputs.query
58
- await ctx.set("original_query", query)
59
- print(f"Query is {await ctx.get('original_query')}")
60
62
 
61
- if hasattr(ev, "agent"):
62
- await ctx.set("agent", ev.agent)
63
- else:
64
- raise ValueError("Agent not provided to workflow Start Event.")
65
- chat_history = [str(msg) for msg in ev.agent.memory.get()]
63
+ query = ev.inputs.query
64
+ await ctx.set("original_query", query)
65
+ print(f"Query is {query}")
66
66
 
67
- if hasattr(ev, "llm"):
68
- await ctx.set("llm", ev.llm)
69
- else:
70
- raise ValueError("LLM not provided to workflow Start Event.")
67
+ required_attrs = ["agent", "llm", "tools"]
68
+ for attr in required_attrs:
69
+ if not hasattr(ev, attr):
70
+ raise ValueError(
71
+ f"{attr.capitalize()} not provided to workflow Start Event."
72
+ )
71
73
 
72
- if hasattr(ev, "tools"):
73
- await ctx.set("tools", ev.tools)
74
- else:
75
- raise ValueError("Tools not provided to workflow Start Event.")
74
+ await ctx.set("agent", ev.agent)
75
+ await ctx.set("llm", ev.llm)
76
+ await ctx.set("tools", ev.tools)
77
+ await ctx.set("verbose", getattr(ev, "verbose", False))
76
78
 
77
- if hasattr(ev, "verbose"):
78
- await ctx.set("verbose", ev.verbose)
79
- else:
80
- await ctx.set("verbose", False)
79
+ chat_history = [str(msg) for msg in ev.agent.memory.get()]
81
80
 
82
81
  llm = await ctx.get("llm")
82
+ original_query = await ctx.get("original_query")
83
83
  response = llm.complete(
84
84
  f"""
85
85
  Given a user question, and a list of tools, output a list of
@@ -100,7 +100,7 @@ class SubQuestionQueryWorkflow(Workflow):
100
100
  the sub-questions could be:
101
101
  - What is the largest city within 50 miles of San Francisco? (answer is San Jose)
102
102
  - What is the name of the mayor of San Jose?
103
- Here is the user question: {await ctx.get('original_query')}.
103
+ Here is the user question: {original_query}.
104
104
  Here are previous chat messages: {chat_history}.
105
105
  And here is the list of tools: {ev.tools}
106
106
  """,
@@ -109,17 +109,25 @@ class SubQuestionQueryWorkflow(Workflow):
109
109
  if await ctx.get("verbose"):
110
110
  print(f"Sub-questions are {response}")
111
111
 
112
- response_obj = json.loads(str(response))
113
- sub_questions = response_obj["sub_questions"]
112
+ if not str(response):
113
+ raise ValueError(
114
+ f"No response from LLM when generating sub-questions for query {original_query}"
115
+ )
114
116
 
115
- await ctx.set("sub_question_count", len(sub_questions))
117
+ try:
118
+ sub_questions = json.loads(str(response))["sub_questions"]
119
+ if not sub_questions:
120
+ raise ValueError("LLM returned empty sub-questions list")
121
+ except (json.JSONDecodeError, KeyError) as e:
122
+ raise ValueError(f"Invalid LLM response format: {response}") from e
116
123
 
124
+ await ctx.set("sub_question_count", len(sub_questions))
117
125
  for question in sub_questions:
118
126
  ctx.send_event(self.QueryEvent(question=question))
119
127
 
120
128
  return None
121
129
 
122
- @step(num_workers=3)
130
+ @step(num_workers=4)
123
131
  async def sub_question(self, ctx: Context, ev: QueryEvent) -> AnswerEvent:
124
132
  """
125
133
  Given a sub-question, return the answer to the sub-question, using the agent.
@@ -131,9 +139,7 @@ class SubQuestionQueryWorkflow(Workflow):
131
139
  return self.AnswerEvent(question=ev.question, answer=str(response))
132
140
 
133
141
  @step
134
- async def combine_answers(
135
- self, ctx: Context, ev: AnswerEvent
136
- ) -> StopEvent | None:
142
+ async def combine_answers(self, ctx: Context, ev: AnswerEvent) -> StopEvent | None:
137
143
  """
138
144
  Given a list of answers to sub-questions, combine them into a single answer.
139
145
  """
@@ -144,10 +150,7 @@ class SubQuestionQueryWorkflow(Workflow):
144
150
  return None
145
151
 
146
152
  answers = "\n\n".join(
147
- [
148
- f"Question: {event.question}: \n Answer: {event.answer}"
149
- for event in ready
150
- ]
153
+ f"Question: {event.question}\nAnswer: {event.answer}" for event in ready
151
154
  )
152
155
 
153
156
  prompt = f"""
@@ -169,8 +172,8 @@ class SubQuestionQueryWorkflow(Workflow):
169
172
  if await ctx.get("verbose"):
170
173
  print("Final response is", response)
171
174
 
172
- output = self.OutputsModel(response=str(response))
173
- return StopEvent(result=output)
175
+ return StopEvent(result=self.OutputsModel(response=str(response)))
176
+
174
177
 
175
178
  class SequentialSubQuestionsWorkflow(Workflow):
176
179
  """
@@ -182,17 +185,20 @@ class SequentialSubQuestionsWorkflow(Workflow):
182
185
  """
183
186
  Inputs for the workflow.
184
187
  """
188
+
185
189
  query: str
186
190
 
187
191
  class OutputsModel(BaseModel):
188
192
  """
189
193
  Outputs for the workflow.
190
194
  """
195
+
191
196
  response: str
192
197
 
193
198
  # Workflow Event types
194
199
  class QueryEvent(Event):
195
200
  """Event for a query."""
201
+
196
202
  question: str
197
203
  prev_answer: str
198
204
  num: int
@@ -232,11 +238,12 @@ class SequentialSubQuestionsWorkflow(Workflow):
232
238
  await ctx.set("verbose", ev.verbose)
233
239
  else:
234
240
  await ctx.set("verbose", False)
241
+
242
+ original_query = await ctx.get("original_query")
235
243
  if ev.verbose:
236
- print(f"Query is {await ctx.get('original_query')}")
244
+ print(f"Query is {original_query}")
237
245
 
238
246
  llm = await ctx.get("llm")
239
- orig_query = await ctx.get("original_query")
240
247
  response = llm.complete(
241
248
  f"""
242
249
  Given a user question, and a list of tools, output a list of
@@ -257,12 +264,15 @@ class SequentialSubQuestionsWorkflow(Workflow):
257
264
  - Who is the mayor of this city?
258
265
  The answer to the first question is San Jose, which is given as context to the second question.
259
266
  The answer to the second question is Matt Mahan.
260
- Here is the user question: {orig_query}.
267
+ Here is the user question: {original_query}.
261
268
  Here are previous chat messages: {chat_history}.
262
269
  And here is the list of tools: {ev.tools}
263
270
  """,
264
271
  )
265
272
 
273
+ if not str(response):
274
+ raise ValueError(f"No response from LLM for query {original_query}")
275
+
266
276
  response_obj = json.loads(str(response))
267
277
  sub_questions = response_obj["sub_questions"]
268
278
 
@@ -273,7 +283,9 @@ class SequentialSubQuestionsWorkflow(Workflow):
273
283
  return self.QueryEvent(question=sub_questions[0], prev_answer="", num=0)
274
284
 
275
285
  @step
276
- async def sub_question(self, ctx: Context, ev: QueryEvent) -> StopEvent | QueryEvent:
286
+ async def sub_question(
287
+ self, ctx: Context, ev: QueryEvent
288
+ ) -> StopEvent | QueryEvent:
277
289
  """
278
290
  Given a sub-question, return the answer to the sub-question, using the agent.
279
291
  """
@@ -297,8 +309,8 @@ class SequentialSubQuestionsWorkflow(Workflow):
297
309
  if ev.num + 1 < len(sub_questions):
298
310
  return self.QueryEvent(
299
311
  question=sub_questions[ev.num + 1],
300
- prev_answer = response.response,
301
- num=ev.num + 1
312
+ prev_answer=response.response,
313
+ num=ev.num + 1,
302
314
  )
303
315
 
304
316
  output = self.OutputsModel(response=response.response)
vectara_agentic/tools.py CHANGED
@@ -30,6 +30,7 @@ LI_packages = {
30
30
  "arxiv": ToolType.QUERY,
31
31
  "tavily_research": ToolType.QUERY,
32
32
  "exa": ToolType.QUERY,
33
+ "brave": ToolType.QUERY,
33
34
  "neo4j": ToolType.QUERY,
34
35
  "kuzu": ToolType.QUERY,
35
36
  "google": {
@@ -163,7 +164,7 @@ class VectaraTool(FunctionTool):
163
164
  except Exception as e:
164
165
  err_output = ToolOutput(
165
166
  tool_name=self.metadata.name,
166
- content=f"Tool Malfunction: {str(e)}",
167
+ content=f"Tool {self.metadata.name} Malfunction: {str(e)}",
167
168
  raw_input={"args": args, "kwargs": kwargs},
168
169
  raw_output={"response": str(e)},
169
170
  )
@@ -177,7 +178,7 @@ class VectaraTool(FunctionTool):
177
178
  except Exception as e:
178
179
  err_output = ToolOutput(
179
180
  tool_name=self.metadata.name,
180
- content=f"Tool Malfunction: {str(e)}",
181
+ content=f"Tool {self.metadata.name} Malfunction: {str(e)}",
181
182
  raw_input={"args": args, "kwargs": kwargs},
182
183
  raw_output={"response": str(e)},
183
184
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.9
3
+ Version: 0.2.10
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,31 +16,31 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.12.26
19
+ Requires-Dist: llama-index==0.12.29
20
20
  Requires-Dist: llama-index-indices-managed-vectara==0.4.2
21
21
  Requires-Dist: llama-index-agent-llm-compiler==0.3.0
22
22
  Requires-Dist: llama-index-agent-lats==0.3.0
23
23
  Requires-Dist: llama-index-agent-openai==0.4.6
24
- Requires-Dist: llama-index-llms-openai==0.3.25
24
+ Requires-Dist: llama-index-llms-openai==0.3.32
25
25
  Requires-Dist: llama-index-llms-anthropic==0.6.10
26
26
  Requires-Dist: llama-index-llms-together==0.3.1
27
27
  Requires-Dist: llama-index-llms-groq==0.3.1
28
28
  Requires-Dist: llama-index-llms-fireworks==0.3.2
29
29
  Requires-Dist: llama-index-llms-cohere==0.4.0
30
- Requires-Dist: llama-index-llms-gemini==0.4.11
31
- Requires-Dist: llama-index-llms-bedrock==0.3.4
30
+ Requires-Dist: llama-index-llms-gemini==0.4.14
31
+ Requires-Dist: llama-index-llms-bedrock==0.3.8
32
32
  Requires-Dist: llama-index-tools-yahoo-finance==0.3.0
33
33
  Requires-Dist: llama-index-tools-arxiv==0.3.0
34
34
  Requires-Dist: llama-index-tools-database==0.3.0
35
35
  Requires-Dist: llama-index-tools-google==0.3.0
36
36
  Requires-Dist: llama-index-tools-tavily_research==0.3.0
37
+ Requires-Dist: llama_index.tools.brave_search==0.3.0
37
38
  Requires-Dist: llama-index-tools-neo4j==0.3.0
38
- Requires-Dist: llama-index-graph-stores-kuzu==0.6.0
39
+ Requires-Dist: llama-index-graph-stores-kuzu==0.7.0
39
40
  Requires-Dist: llama-index-tools-slack==0.3.0
40
41
  Requires-Dist: llama-index-tools-exa==0.3.0
41
- Requires-Dist: tavily-python==0.5.1
42
- Requires-Dist: exa-py==1.8.9
43
- Requires-Dist: yahoo-finance==1.4.0
42
+ Requires-Dist: tavily-python==0.5.4
43
+ Requires-Dist: exa-py==1.9.1
44
44
  Requires-Dist: openinference-instrumentation-llama-index==3.3.3
45
45
  Requires-Dist: opentelemetry-proto==1.31.0
46
46
  Requires-Dist: arize-phoenix==8.14.1
@@ -176,7 +176,7 @@ query_financial_reports_tool = vec_factory.create_rag_tool(
176
176
  )
177
177
  ```
178
178
 
179
- See the [docs](https://vectara.github.io/vectara-agentic-docs/) for additional arguments to customize your Vectara RAG tool.
179
+ See the [docs](https://vectara.github.io/py-vectara-agentic/latest/) for additional arguments to customize your Vectara RAG tool.
180
180
 
181
181
  ### 3. Create other tools (optional)
182
182
 
@@ -205,7 +205,7 @@ agent = Agent(
205
205
  )
206
206
  ```
207
207
 
208
- See the [docs](https://vectara.github.io/vectara-agentic-docs/) for additional arguments, including `agent_progress_callback` and `query_logging_callback`.
208
+ See the [docs](https://vectara.github.io/py-vectara-agentic/latest/) for additional arguments, including `agent_progress_callback` and `query_logging_callback`.
209
209
 
210
210
  ### 5. Run a chat interaction
211
211
 
@@ -376,7 +376,7 @@ specified in the Agent configuration.
376
376
  - `load_unique_values`: returns the top unique values for a given column
377
377
 
378
378
  In addition, we include various other tools from LlamaIndex ToolSpecs:
379
- * Tavily search and EXA.AI
379
+ * Tavily search, EXA.AI and Brave Search
380
380
  * arxiv
381
381
  * neo4j & Kuzu for Graph DB integration
382
382
  * Google tools (including gmail, calendar, and search)
@@ -6,24 +6,24 @@ tests/test_agent_type.py,sha256=JM0Q2GBGHSADoBacz_DW551zWSfbpf7qa8xXqtyWsc4,5671
6
6
  tests/test_fallback.py,sha256=M5YD7NHZ0joVU1frYIr9_OiRAIje5mrXrYVcekzlyGs,2829
7
7
  tests/test_private_llm.py,sha256=CY-_rCpxGUuxnZ3ypkodw5Jj-sJCNdh6rLbCvULwuJI,2247
8
8
  tests/test_serialization.py,sha256=Ed23GN2zhSJNdPFrVK4aqLkOhJKviczR_o0t-r9TuRI,4762
9
- tests/test_tools.py,sha256=IVKn0HoS2erTCr1mOEGzTkktiY0PCfKNvqnD_pizjOg,3977
9
+ tests/test_tools.py,sha256=as0rEAKAs6ekvqFDCcq1smRWKhQm5EaH2PUWT8hg1qQ,5726
10
10
  tests/test_workflow.py,sha256=lVyrVHdRO5leYNbYtHTmKqMX0c8_xehCpUA7cXQKVsc,2175
11
11
  vectara_agentic/__init__.py,sha256=2GLDS3U6KckK-dBRl9v_x1kSV507gEhjOfuMmmu0Qxg,850
12
12
  vectara_agentic/_callback.py,sha256=lU35-Pxp-fsMpOi4woY6oLECAhO1nSmLIy3b8fbgT54,13029
13
- vectara_agentic/_observability.py,sha256=fTL3KW0jQU-_JSpFgjO6-XzgDut_oiq9kt4QR-FkSqU,3804
13
+ vectara_agentic/_observability.py,sha256=BA2zhwa5930aaDUJxHefPlmIPt8kZOuLHVBc9PtYNuU,3839
14
14
  vectara_agentic/_prompts.py,sha256=LYyiOAiC8imz3U7MSJiuCYAP39afsp7ycXY7-9biyJI,9314
15
- vectara_agentic/_version.py,sha256=H4T1Nr91mODgWHnEWr2XhUyVnkEh_ZuEEayYoJS0Iis,65
16
- vectara_agentic/agent.py,sha256=ItNy6QRfgkk-zePK05k8yhsov_TVh5ScB-BbiQdUbTY,43956
15
+ vectara_agentic/_version.py,sha256=-h5PMZplLmgj04BNOpvmJ0dvWk8SQqCU6tP_hnySaFE,66
16
+ vectara_agentic/agent.py,sha256=KX0VYQuGFkK_CELjUFdxXWYHng32GFjsLdRdH-gR7aM,43970
17
17
  vectara_agentic/agent_config.py,sha256=E-rtYMcpoGxnEAyy8231bizo2n0uGQ2qWxuSgTEfwdQ,4327
18
18
  vectara_agentic/agent_endpoint.py,sha256=QIMejCLlpW2qzXxeDAxv3anF46XMDdVMdKGWhJh3azY,1996
19
19
  vectara_agentic/db_tools.py,sha256=Go03bzma9m-qDH0CPP8hWhf1nu_4S6s7ke0jGqz58Pk,10296
20
- vectara_agentic/sub_query_workflow.py,sha256=3WoVnryR2NXyYXbLDM1XVLd7DtbCG0jgrVqeDUN4YNQ,10943
21
- vectara_agentic/tools.py,sha256=vOIevyGhApeZ46UelSTXmKXWE26Z2KtjLgsb4cHP49M,42579
20
+ vectara_agentic/sub_query_workflow.py,sha256=rwiS4e-k75LQvT_WdwoRI8sAJRsL9kiDZlrPwcjYUAE,11120
21
+ vectara_agentic/tools.py,sha256=EC4NAhJy_SB62c2e-fmalWLXtTt6Pgjfi0qE22KxRco,42650
22
22
  vectara_agentic/tools_catalog.py,sha256=oiw3wAfbpFhh0_6rMvZsyPqWV6QIzHqhZCNzqRxuyV8,4818
23
23
  vectara_agentic/types.py,sha256=HcS7vR8P2v2xQTlOc6ZFV2vvlr3OpzSNWhtcLMxqUZc,1792
24
24
  vectara_agentic/utils.py,sha256=4vA5MyNoG47_7eHuLFQByiG_FHWbrQ6ZJDsdqHUwiJA,7720
25
- vectara_agentic-0.2.9.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
26
- vectara_agentic-0.2.9.dist-info/METADATA,sha256=FfjEtidIWQlsUXY_cdVIxPc6h07M3JwXjQKEj3aCMLY,25046
27
- vectara_agentic-0.2.9.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
28
- vectara_agentic-0.2.9.dist-info/top_level.txt,sha256=Y7TQTFdOYGYodQRltUGRieZKIYuzeZj2kHqAUpfCUfg,22
29
- vectara_agentic-0.2.9.dist-info/RECORD,,
25
+ vectara_agentic-0.2.10.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
26
+ vectara_agentic-0.2.10.dist-info/METADATA,sha256=LjjT33W4ISkAKJIk0_tJSxYuF39Z5EY355qrz6dCbR4,25088
27
+ vectara_agentic-0.2.10.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
28
+ vectara_agentic-0.2.10.dist-info/top_level.txt,sha256=Y7TQTFdOYGYodQRltUGRieZKIYuzeZj2kHqAUpfCUfg,22
29
+ vectara_agentic-0.2.10.dist-info/RECORD,,