vectara-agentic 0.2.12__tar.gz → 0.2.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vectara-agentic might be problematic. Click here for more details.

Files changed (38) hide show
  1. {vectara_agentic-0.2.12/vectara_agentic.egg-info → vectara_agentic-0.2.13}/PKG-INFO +335 -230
  2. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/README.md +325 -222
  3. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/requirements.txt +9 -7
  4. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/test_agent.py +18 -1
  5. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/test_agent_planning.py +0 -9
  6. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/test_agent_type.py +40 -0
  7. vectara_agentic-0.2.13/tests/test_tools.py +266 -0
  8. vectara_agentic-0.2.13/tests/test_vectara_llms.py +77 -0
  9. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/_prompts.py +6 -8
  10. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/_version.py +1 -1
  11. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/agent.py +239 -78
  12. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/tools.py +209 -140
  13. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/utils.py +74 -46
  14. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13/vectara_agentic.egg-info}/PKG-INFO +335 -230
  15. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic.egg-info/SOURCES.txt +1 -0
  16. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic.egg-info/requires.txt +9 -7
  17. vectara_agentic-0.2.12/tests/test_tools.py +0 -168
  18. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/LICENSE +0 -0
  19. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/MANIFEST.in +0 -0
  20. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/setup.cfg +0 -0
  21. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/setup.py +0 -0
  22. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/__init__.py +0 -0
  23. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/endpoint.py +0 -0
  24. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/test_fallback.py +0 -0
  25. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/test_private_llm.py +0 -0
  26. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/test_serialization.py +0 -0
  27. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/tests/test_workflow.py +0 -0
  28. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/__init__.py +0 -0
  29. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/_callback.py +0 -0
  30. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/_observability.py +0 -0
  31. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/agent_config.py +0 -0
  32. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/agent_endpoint.py +0 -0
  33. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/db_tools.py +0 -0
  34. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/sub_query_workflow.py +0 -0
  35. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/tools_catalog.py +0 -0
  36. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic/types.py +0 -0
  37. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic.egg-info/dependency_links.txt +0 -0
  38. {vectara_agentic-0.2.12 → vectara_agentic-0.2.13}/vectara_agentic.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: vectara_agentic
3
- Version: 0.2.12
3
+ Version: 0.2.13
4
4
  Summary: A Python package for creating AI Assistants and AI Agents with Vectara
5
5
  Home-page: https://github.com/vectara/py-vectara-agentic
6
6
  Author: Ofer Mendelevitch
@@ -16,8 +16,8 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
16
  Requires-Python: >=3.10
17
17
  Description-Content-Type: text/markdown
18
18
  License-File: LICENSE
19
- Requires-Dist: llama-index==0.12.30
20
- Requires-Dist: llama-index-indices-managed-vectara==0.4.2
19
+ Requires-Dist: llama-index==0.12.31
20
+ Requires-Dist: llama-index-indices-managed-vectara==0.4.3
21
21
  Requires-Dist: llama-index-agent-llm-compiler==0.3.0
22
22
  Requires-Dist: llama-index-agent-lats==0.3.0
23
23
  Requires-Dist: llama-index-agent-openai==0.4.6
@@ -39,12 +39,14 @@ Requires-Dist: llama-index-tools-neo4j==0.3.0
39
39
  Requires-Dist: llama-index-graph-stores-kuzu==0.7.0
40
40
  Requires-Dist: llama-index-tools-slack==0.3.0
41
41
  Requires-Dist: llama-index-tools-exa==0.3.0
42
+ Requires-Dist: llama-index-tools-wikipedia==0.3.0
43
+ Requires-Dist: llama-index-tools-bing-search==0.3.0
42
44
  Requires-Dist: tavily-python==0.5.4
43
- Requires-Dist: exa-py==1.9.1
44
- Requires-Dist: openinference-instrumentation-llama-index==3.3.3
45
- Requires-Dist: opentelemetry-proto==1.31.0
46
- Requires-Dist: arize-phoenix==8.14.1
47
- Requires-Dist: arize-phoenix-otel==0.8.0
45
+ Requires-Dist: exa-py==1.12.0
46
+ Requires-Dist: openinference-instrumentation-llama-index==4.2.1
47
+ Requires-Dist: opentelemetry-proto==1.32.1
48
+ Requires-Dist: arize-phoenix==8.26.1
49
+ Requires-Dist: arize-phoenix-otel==0.9.2
48
50
  Requires-Dist: protobuf==5.29.3
49
51
  Requires-Dist: tokenizers>=0.20
50
52
  Requires-Dist: pydantic==2.10.6
@@ -70,7 +72,7 @@ Dynamic: summary
70
72
 
71
73
  <p align="center">
72
74
  <a href="https://vectara.github.io/py-vectara-agentic">Documentation</a> ·
73
- <a href="#examples">Examples</a> ·
75
+ <a href="#example-ai-assistants">Examples</a> ·
74
76
  <a href="https://discord.gg/S9dwgCNEFs">Discord</a>
75
77
  </p>
76
78
 
@@ -84,8 +86,24 @@ Dynamic: summary
84
86
  <a href="https://twitter.com/vectara">
85
87
  <img src="https://img.shields.io/twitter/follow/vectara.svg?style=social&label=Follow%20%40Vectara" alt="Twitter">
86
88
  </a>
89
+ <a href="https://pypi.org/project/vectara-agentic/">
90
+ <img src="https://img.shields.io/pypi/v/vectara-agentic.svg" alt="PyPI version">
91
+ </a>
92
+ <a href="https://pypi.org/project/vectara-agentic/">
93
+ <img src="https://img.shields.io/pypi/pyversions/vectara-agentic.svg" alt="Python versions">
94
+ </a>
87
95
  </p>
88
96
 
97
+ ## 📑 Table of Contents
98
+
99
+ - [Overview](#-overview)
100
+ - [Quick Start](#-quick-start)
101
+ - [Using Tools](#using-tools)
102
+ - [Advanced Usage: Workflows](#advanced-usage-workflows)
103
+ - [Configuration](#️-configuration)
104
+ - [Contributing](#-contributing)
105
+ - [License](#-license)
106
+
89
107
  ## ✨ Overview
90
108
 
91
109
  `vectara-agentic` is a Python library for developing powerful AI assistants and agents using Vectara and Agentic-RAG. It leverages the LlamaIndex Agent framework and provides helper functions to quickly create tools that connect to Vectara corpora.
@@ -107,9 +125,9 @@ Dynamic: summary
107
125
  - **Observability:**
108
126
  Built-in support with Arize Phoenix for monitoring and feedback.
109
127
  - **Workflow Support:**
110
- Extend your agents capabilities by defining custom workflows using the `run()` method.
128
+ Extend your agent's capabilities by defining custom workflows using the `run()` method.
111
129
 
112
- ### 📚 Example AI Assistants
130
+ ### Example AI Assistants
113
131
 
114
132
  Check out our example AI assistants:
115
133
 
@@ -118,14 +136,14 @@ Check out our example AI assistants:
118
136
  - [Legal Assistant](https://huggingface.co/spaces/vectara/legal-agent)
119
137
  - [EV Assistant](https://huggingface.co/spaces/vectara/ev-assistant)
120
138
 
121
- ### Prerequisites
139
+ ### Prerequisites
122
140
 
123
141
  - [Vectara account](https://console.vectara.com/signup/?utm_source=github&utm_medium=code&utm_term=DevRel&utm_content=vectara-agentic&utm_campaign=github-code-DevRel-vectara-agentic)
124
142
  - A Vectara corpus with an [API key](https://docs.vectara.com/docs/api-keys)
125
143
  - [Python 3.10 or higher](https://www.python.org/downloads/)
126
144
  - OpenAI API key (or API keys for Anthropic, TOGETHER.AI, Fireworks AI, Bedrock, Cohere, GEMINI or GROQ, if you choose to use them)
127
145
 
128
- ### Installation
146
+ ### Installation
129
147
 
130
148
  ```bash
131
149
  pip install vectara-agentic
@@ -133,6 +151,8 @@ pip install vectara-agentic
133
151
 
134
152
  ## 🚀 Quick Start
135
153
 
154
+ Let's see how we create a simple AI assistant to answer questions about financial data ingested into Vectara, using `vectara-agentic`.
155
+
136
156
  ### 1. Initialize the Vectara tool factory
137
157
 
138
158
  ```python
@@ -147,7 +167,7 @@ vec_factory = VectaraToolFactory(
147
167
 
148
168
  ### 2. Create a Vectara RAG Tool
149
169
 
150
- A RAG tool calls the full Vectara RAG pipeline to provide summarized responses to queries grounded in data.
170
+ A RAG tool calls the full Vectara RAG pipeline to provide summarized responses to queries grounded in data. We define two additional arguments (`year` and `ticker` that map to filter attributes in the Vectara corpus):
151
171
 
152
172
  ```python
153
173
  from pydantic import BaseModel, Field
@@ -164,48 +184,60 @@ class QueryFinancialReportsArgs(BaseModel):
164
184
  year: int | str = Field(..., description=f"The year this query relates to. An integer between {min(years)} and {max(years)} or a string specifying a condition on the year (example: '>2020').")
165
185
  ticker: str = Field(..., description=f"The company ticker. Must be a valid ticket symbol from the list {tickers.keys()}.")
166
186
 
167
- query_financial_reports_tool = vec_factory.create_rag_tool(
187
+ ask_finance = vec_factory.create_rag_tool(
168
188
  tool_name="query_financial_reports",
169
189
  tool_description="Query financial reports for a company and year",
170
190
  tool_args_schema=QueryFinancialReportsArgs,
171
191
  lambda_val=0.005,
172
192
  summary_num_results=7,
173
- # Additional arguments
193
+ # Additional Vectara query arguments...
174
194
  )
175
195
  ```
176
196
 
177
- Note that we only defined the `year` and `ticker` arguments. The `query` argument is automatically added by `vectara-agentic`.
197
+ > **Note:** We only defined the `year` and `ticker` arguments in the QueryFinancialReportsArgs model. The `query` argument is automatically added by `create_rag_tool`.
178
198
 
179
- See the [docs](https://vectara.github.io/py-vectara-agentic/latest/) for additional arguments to customize your Vectara RAG tool.
199
+ To learn about additional arguments `create_rag_tool`, please see the full [docs](https://vectara.github.io/py-vectara-agentic/latest/).
180
200
 
181
201
  ### 3. Create other tools (optional)
182
202
 
183
- In addition to RAG tools, you can generate a lot of other types of tools the agent can use. These could be mathematical tools, tools
203
+ In addition to RAG tools or search tools, you can generate additional tools the agent can use. These could be mathematical tools, tools
184
204
  that call other APIs to get more information, or any other type of tool.
185
205
 
186
- See [Agent Tools](#agent-tools) for more information.
206
+ See [Agent Tools](#️-agent-tools-at-a-glance) for more information.
187
207
 
188
208
  ### 4. Create your agent
189
209
 
210
+ Here is how we will instantiate our AI Finance Assistant. First define your custom instructions:
211
+
212
+ ```python
213
+ financial_assistant_instructions = """
214
+ - You are a helpful financial assistant, with expertise in financial reporting, in conversation with a user.
215
+ - Never discuss politics, and always respond politely.
216
+ - Respond in a compact format by using appropriate units of measure (e.g., K for thousands, M for millions, B for billions).
217
+ - Do not report the same number twice (e.g. $100K and 100,000 USD).
218
+ - Always check the get_company_info and get_valid_years tools to validate company and year are valid.
219
+ - When querying a tool for a numeric value or KPI, use a concise and non-ambiguous description of what you are looking for.
220
+ - If you calculate a metric, make sure you have all the necessary information to complete the calculation. Don't guess.
221
+ """
222
+ ```
223
+
224
+ Then just instantiate the `Agent` class:
225
+
190
226
  ```python
191
227
  from vectara_agentic import Agent
192
228
 
193
229
  agent = Agent(
194
- tools=[query_financial_reports_tool],
195
- topic="10-K financial reports",
196
- custom_instructions="""
197
- - You are a helpful financial assistant in conversation with a user. Use your financial expertise when crafting a query to the tool, to ensure you get the most accurate information.
198
- - You can answer questions, provide insights, or summarize any information from financial reports.
199
- - A user may refer to a company's ticker instead of its full name - consider those the same when a user is asking about a company.
200
- - When calculating a financial metric, make sure you have all the information from tools to complete the calculation.
201
- - In many cases you may need to query tools on each sub-metric separately before computing the final metric.
202
- - When using a tool to obtain financial data, consider the fact that information for a certain year may be reported in the following year's report.
203
- - Report financial data in a consistent manner. For example if you report revenue in thousands, always report revenue in thousands.
204
- """
230
+ tools =
231
+ [ask_finance],
232
+ topic="10-K annual financial reports",
233
+ custom_instructions=financial_assistant_instructions,
234
+ agent_progress_callback=agent_progress_callback
205
235
  )
206
236
  ```
207
237
 
208
- See the [docs](https://vectara.github.io/py-vectara-agentic/latest/) for additional arguments, including `agent_progress_callback` and `query_logging_callback`.
238
+ The `topic` parameter helps identify the agent's area of expertise, while `custom_instructions` lets you customize how the agent behaves and presents information. The agent will combine these with its default general instructions to determine its complete behavior.
239
+
240
+ The `agent_progress_callback` argument is an optional function that will be called when various Agent events occur, and can be used to track agent steps.
209
241
 
210
242
  ### 5. Run a chat interaction
211
243
 
@@ -214,93 +246,41 @@ res = agent.chat("What was the revenue for Apple in 2021?")
214
246
  print(res.response)
215
247
  ```
216
248
 
217
- Note that:
218
- 1. `vectara-agentic` also supports `achat()` and two streaming variants `stream_chat()` and `astream_chat()`.
219
- 2. The response types from `chat()` and `achat()` are of type `AgentResponse`. If you just need the actual string
220
- response it's available as the `response` variable, or just use `str()`. For advanced use-cases you can look
221
- at other `AgentResponse` variables [such as `sources`](https://github.com/run-llama/llama_index/blob/659f9faaafbecebb6e6c65f42143c0bf19274a37/llama-index-core/llama_index/core/chat_engine/types.py#L53).
222
-
223
- ## Advanced Usage: Workflows
224
-
225
- In addition to standard chat interactions, `vectara-agentic` supports custom workflows via the `run()` method.
226
- Workflows allow you to structure multi-step interactions where inputs and outputs are validated using Pydantic models.
227
- To learn more about workflows read [the documentation](https://docs.llamaindex.ai/en/stable/understanding/workflows/basic_flow/)
228
-
229
- ### Defining a Custom Workflow
230
-
231
- Create a workflow by subclassing `llama_index.core.workflow.Workflow` and defining the input/output models:
232
-
233
- ```python
234
- from pydantic import BaseModel
235
- from llama_index.core.workflow import (
236
- StartEvent,StopEvent, Workflow, step,
237
- )
238
-
239
- class MyWorkflow(Workflow):
240
- class InputsModel(BaseModel):
241
- query: str
242
-
243
- class OutputsModel(BaseModel):
244
- answer: str
245
-
246
- @step
247
- async def my_step(self, ev: StartEvent) -> StopEvent:
248
- # do something here
249
- return StopEvent(result="Hello, world!")
250
- ```
251
-
252
- When the `run()` method in vectara-agentic is invoked, it calls the workflow with the following variables in the StartEvent:
253
- * `agent`: the agent object used to call `run()` (self)
254
- * `tools`: the tools provided to the agent. Those can be used as needed in the flow.
255
- * `llm`: a pointer to a LlamaIndex llm, so it can be used in the workflow. For example, one of the steps may call `llm.acomplete(prompt)`
256
- * `verbose`: controls whether extra debug information is displayed
257
- * `inputs`: this is the actual inputs to the workflow provided by the call to `run()` and must be of type `InputsModel`
258
-
259
- ### Using the Workflow with Your Agent
260
-
261
- When initializing your agent, pass the workflow class using the `workflow_cls` parameter:
262
-
263
- ```python
264
- agent = Agent(
265
- tools=[query_financial_reports_tool],
266
- topic="10-K financial reports",
267
- custom_instructions="You are a helpful financial assistant.",
268
- workflow_cls=MyWorkflow, # Provide your custom workflow here
269
- workflow_timeout=120 # Optional: Set a timeout (default is 120 seconds)
270
- )
271
- ```
249
+ > **Note:**
250
+ > 1. `vectara-agentic` also supports `achat()` as well as two streaming variants `stream_chat()` and `astream_chat()`.
251
+ > 2. The response types from `chat()` and `achat()` are of type `AgentResponse`. If you just need the actual string
252
+ > response it's available as the `response` variable, or just use `str()`. For advanced use-cases you can look
253
+ > at other `AgentResponse` variables [such as `sources`](https://github.com/run-llama/llama_index/blob/659f9faaafbecebb6e6c65f42143c0bf19274a37/llama-index-core/llama_index/core/chat_engine/types.py#L53).
272
254
 
273
- ### Running the Workflow
255
+ ## Agent Instructions
274
256
 
275
- Prepare the inputs using your workflow’s `InputsModel` and execute the workflow using `run()`:
257
+ When creating an agent, it already comes with a set of general base instructions, designed carefully to enhance its operation and improve how the agent works.
276
258
 
277
- ```python
278
- # Create an instance of the workflow's input model
279
- inputs = MyWorkflow.InputsModel(query="What is Vectara?", extra_param=42)
259
+ In addition, you can add `custom_instructions` that are specific to your use case that customize how the agent behaves.
280
260
 
281
- # Run the workflow (ensure you're in an async context or use asyncio.run)
282
- workflow_result = asyncio.run(agent.run(inputs))
261
+ When writing custom instructions:
262
+ - Focus on behavior and presentation rather than tool usage (that's what tool descriptions are for)
263
+ - Be precise and clear without overcomplicating
264
+ - Consider edge cases and unusual scenarios
265
+ - Avoid over-specifying behavior based on primary use cases
266
+ - Keep instructions focused on how you want the agent to behave and present information
283
267
 
284
- # Access the output from the workflow's OutputsModel
285
- print(workflow_result.answer)
286
- ```
268
+ The agent will combine both the general instructions and your custom instructions to determine its behavior.
287
269
 
288
- ### Using SubQuestionQueryWorkflow
270
+ It is not recommended to change the general instructions, but it is possible as well to override them with the optional `general_instructions` parameter. If you do change them, your agent may not work as intended, so be careful if overriding these instructions.
289
271
 
290
- vectara-agentic already includes one useful workflow you can use right away (it is also useful as an advanced example)
291
- This workflow is called `SubQuestionQueryWorkflow` and it works by breaking a complex query into sub-queries and then
292
- executing each sub-query with the agent until it reaches a good response.
272
+ ## 🧰 Defining Tools
293
273
 
294
- ## 🧰 Vectara tools
274
+ ### Vectara tools
295
275
 
296
- `vectara-agentic` provides two helper functions to connect with Vectara RAG
276
+ `vectara-agentic` provides two helper functions to connect with Vectara RAG:
297
277
  * `create_rag_tool()` to create an agent tool that connects with a Vectara corpus for querying.
298
278
  * `create_search_tool()` to create a tool to search a Vectara corpus and return a list of matching documents.
299
279
 
300
280
  See the documentation for the full list of arguments for `create_rag_tool()` and `create_search_tool()`,
301
281
  to understand how to configure Vectara query performed by those tools.
302
282
 
303
- ### Creating a Vectara RAG tool
283
+ #### Creating a Vectara RAG tool
304
284
 
305
285
  A Vectara RAG tool is often the main workhorse for any Agentic RAG application, and enables the agent to query
306
286
  one or more Vectara RAG corpora.
@@ -310,7 +290,7 @@ metadata filtering, defined by `tool_args_schema`.
310
290
 
311
291
  For example, in the quickstart example the schema is:
312
292
 
313
- ```
293
+ ```python
314
294
  class QueryFinancialReportsArgs(BaseModel):
315
295
  query: str = Field(..., description="The user query.")
316
296
  year: int | str = Field(..., description=f"The year this query relates to. An integer between {min(years)} and {max(years)} or a string specifying a condition on the year (example: '>2020').")
@@ -335,19 +315,20 @@ There are also additional cool features supported here:
335
315
  Note that `tool_args_type` is an optional dictionary that indicates the level at which metadata filtering
336
316
  is applied for each argument (`doc` or `part`)
337
317
 
338
- ### Creating a Vectara search tool
318
+ #### Creating a Vectara search tool
339
319
 
340
320
  The Vectara search tool allows the agent to list documents that match a query.
341
321
  This can be helpful to the agent to answer queries like "how many documents discuss the iPhone?" or other
342
322
  similar queries that require a response in terms of a list of matching documents.
343
323
 
344
- ## 🛠️ Agent Tools at a Glance
324
+ ### 🛠️ Agent Tools at a Glance
345
325
 
346
- `vectara-agentic` provides a few tools out of the box (see ToolsCatalog for details):
326
+ `vectara-agentic` provides a few tools out of the box (see `ToolsCatalog` for details):
347
327
 
348
- 1. **Standard tools**:
328
+ **1. Standard tools**
349
329
  - `summarize_text`: a tool to summarize a long text into a shorter summary (uses LLM)
350
330
  - `rephrase_text`: a tool to rephrase a given text, given a set of rephrase instructions (uses LLM)
331
+
351
332
  These tools use an LLM and so would use the `Tools` LLM specified in your `AgentConfig`.
352
333
  To instantiate them:
353
334
 
@@ -359,30 +340,82 @@ summarize_text = ToolsCatalog(agent_config).summarize_text
359
340
  This ensures the summarize_text tool is configured with the proper LLM provider and model as
360
341
  specified in the Agent configuration.
361
342
 
362
- 2. **Legal tools**: a set of tools for the legal vertical, such as:
343
+ **2. Legal tools**
344
+ A set of tools for the legal vertical, such as:
363
345
  - `summarize_legal_text`: summarize legal text with a certain point of view
364
346
  - `critique_as_judge`: critique a legal text as a judge, providing their perspective
365
347
 
366
- 3. **Financial tools**: based on tools from Yahoo! Finance:
348
+ **3. Financial tools**
349
+ Based on tools from Yahoo! Finance:
367
350
  - tools to understand the financials of a public company like: `balance_sheet`, `income_statement`, `cash_flow`
368
351
  - `stock_news`: provides news about a company
369
352
  - `stock_analyst_recommendations`: provides stock analyst recommendations for a company.
370
353
 
371
- 4. **Database tools**: providing tools to inspect and query a database
354
+ **4. Database tools**
355
+ Providing tools to inspect and query a database:
372
356
  - `list_tables`: list all tables in the database
373
357
  - `describe_tables`: describe the schema of tables in the database
374
358
  - `load_data`: returns data based on a SQL query
375
359
  - `load_sample_data`: returns the first 25 rows of a table
376
360
  - `load_unique_values`: returns the top unique values for a given column
377
361
 
378
- In addition, we include various other tools from LlamaIndex ToolSpecs:
379
- * Tavily search, EXA.AI and Brave Search
380
- * arxiv
381
- * neo4j & Kuzu for Graph DB integration
382
- * Google tools (including gmail, calendar, and search)
383
- * Slack
384
-
385
- Note that some of these tools may require API keys as environment variables
362
+ **5. Additional integrations**
363
+ vectara-agentic includes various other tools from LlamaIndex ToolSpecs:
364
+
365
+ * **Search Tools**
366
+ * Tavily Search: Real-time web search using [Tavily API](https://tavily.com/)
367
+ ```python
368
+ from vectara_agentic.tools_catalog import ToolsCatalog
369
+ tavily_tool = ToolsCatalog(agent_config).tavily_search
370
+ ```
371
+ * EXA.AI: Advanced web search and data extraction
372
+ ```python
373
+ exa_tool = ToolsCatalog(agent_config).exa_search
374
+ ```
375
+ * Brave Search: Web search using Brave's search engine
376
+ ```python
377
+ brave_tool = ToolsCatalog(agent_config).brave_search
378
+ ```
379
+
380
+ * **Academic Tools**
381
+ * arXiv: Search and retrieve academic papers
382
+ ```python
383
+ arxiv_tool = ToolsCatalog(agent_config).arxiv_search
384
+ ```
385
+
386
+ * **Graph Database Tools**
387
+ * Neo4j: Graph database integration
388
+ ```python
389
+ neo4j_tool = ToolsCatalog(agent_config).neo4j_query
390
+ ```
391
+ * Kuzu: Lightweight graph database
392
+ ```python
393
+ kuzu_tool = ToolsCatalog(agent_config).kuzu_query
394
+ ```
395
+
396
+ * **Google Tools**
397
+ * Gmail: Read and send emails
398
+ ```python
399
+ gmail_tool = ToolsCatalog(agent_config).gmail
400
+ ```
401
+ * Calendar: Manage calendar events
402
+ ```python
403
+ calendar_tool = ToolsCatalog(agent_config).calendar
404
+ ```
405
+ * Search: Google search integration
406
+ ```python
407
+ google_search_tool = ToolsCatalog(agent_config).google_search
408
+ ```
409
+
410
+ * **Communication Tools**
411
+ * Slack: Send messages and interact with Slack
412
+ ```python
413
+ slack_tool = ToolsCatalog(agent_config).slack
414
+ ```
415
+
416
+ For detailed setup instructions and API key requirements, please refer the instructions on [LlamaIndex hub](https://llamahub.ai/?tab=tools) for the specific tool.
417
+
418
+ ### Creating custom tools
386
419
 
387
420
  You can create your own tool directly from a Python function using the `create_tool()` method of the `ToolsFactory` class:
388
421
 
@@ -393,165 +426,237 @@ def mult_func(x, y):
393
426
  mult_tool = ToolsFactory().create_tool(mult_func)
394
427
  ```
395
428
 
396
- Note: When you define your own Python functions as tools, implement them at the top module level,
397
- and not as nested functions. Nested functions are not supported if you use serialization
398
- (dumps/loads or from_dict/to_dict).
429
+ > **Important:** When you define your own Python functions as tools, implement them at the top module level,
430
+ > and not as nested functions. Nested functions are not supported if you use serialization
431
+ > (dumps/loads or from_dict/to_dict).
399
432
 
400
- ## 🛠️ Configuration
433
+ ### Tool Validation
401
434
 
402
- ## Configuring Vectara-agentic
435
+ When creating an agent, you can enable tool validation by setting `validate_tools=True`. This will check that any tools mentioned in your custom instructions actually exist in the agent's tool set:
403
436
 
404
- The main way to control the behavior of `vectara-agentic` is by passing an `AgentConfig` object to your `Agent` when creating it.
405
- For example:
437
+ ```python
438
+ agent = Agent(
439
+ tools=[...],
440
+ topic="financial reports",
441
+ custom_instructions="Always use the get_company_info tool first...",
442
+ validate_tools=True # Will raise an error if get_company_info tool doesn't exist
443
+ )
444
+ ```
445
+
446
+ This helps catch errors where your instructions reference tools that aren't available to the agent.
447
+
448
+ ## 🔄 Advanced Usage: Workflows
449
+
450
+ In addition to standard chat interactions, `vectara-agentic` supports custom workflows via the `run()` method.
451
+ Workflows allow you to structure multi-step interactions where inputs and outputs are validated using Pydantic models.
452
+ To learn more about workflows read [the documentation](https://docs.llamaindex.ai/en/stable/understanding/workflows/basic_flow/)
453
+
454
+ ### What are Workflows?
455
+
456
+ Workflows provide a structured way to handle complex, multi-step interactions with your agent. They're particularly useful when:
457
+
458
+ - You need to break down complex queries into simpler sub-questions
459
+ - You want to implement a specific sequence of operations
460
+ - You need to maintain state between different steps of a process
461
+ - You want to parallelize certain operations for better performance
462
+
463
+ ### Defining a Custom Workflow
464
+
465
+ Create a workflow by subclassing `llama_index.core.workflow.Workflow` and defining the input/output models:
406
466
 
407
467
  ```python
408
- agent_config = AgentConfig(
409
- agent_type = AgentType.REACT,
410
- main_llm_provider = ModelProvider.ANTHROPIC,
411
- main_llm_model_name = 'claude-3-5-sonnet-20241022',
412
- tool_llm_provider = ModelProvider.TOGETHER,
413
- tool_llm_model_name = 'meta-llama/Llama-3.3-70B-Instruct-Turbo'
468
+ from pydantic import BaseModel
469
+ from llama_index.core.workflow import (
470
+ StartEvent, StopEvent, Workflow, step,
414
471
  )
415
472
 
473
+ class MyWorkflow(Workflow):
474
+ class InputsModel(BaseModel):
475
+ query: str
476
+
477
+ class OutputsModel(BaseModel):
478
+ answer: str
479
+
480
+ @step
481
+ async def my_step(self, ev: StartEvent) -> StopEvent:
482
+ # do something here
483
+ return StopEvent(result="Hello, world!")
484
+ ```
485
+
486
+ When the `run()` method in vectara-agentic is invoked, it calls the workflow with the following variables in the StartEvent:
487
+ * `agent`: the agent object used to call `run()` (self)
488
+ * `tools`: the tools provided to the agent. Those can be used as needed in the flow.
489
+ * `llm`: a pointer to a LlamaIndex llm, so it can be used in the workflow. For example, one of the steps may call `llm.acomplete(prompt)`
490
+ * `verbose`: controls whether extra debug information is displayed
491
+ * `inputs`: this is the actual inputs to the workflow provided by the call to `run()` and must be of type `InputsModel`
492
+
493
+ ### Using the Workflow with Your Agent
494
+
495
+ When initializing your agent, pass the workflow class using the `workflow_cls` parameter:
496
+
497
+ ```python
416
498
  agent = Agent(
417
499
  tools=[query_financial_reports_tool],
418
500
  topic="10-K financial reports",
419
- custom_instructions="You are a helpful financial assistant in conversation with a user.",
420
- agent_config=agent_config
501
+ custom_instructions="You are a helpful financial assistant.",
502
+ workflow_cls=MyWorkflow, # Provide your custom workflow here
503
+ workflow_timeout=120 # Optional: Set a timeout (default is 120 seconds)
421
504
  )
422
505
  ```
423
506
 
424
- The `AgentConfig` object may include the following items:
425
- - `agent_type`: the agent type. Valid values are `REACT`, `LLMCOMPILER`, `LATS` or `OPENAI` (default: `OPENAI`).
426
- - `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` or `FIREWORKS` (default: `OPENAI`).
427
- - `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider).
428
- - `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
429
- - `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
430
- - `max_reasoning_steps`: the maximum number of reasoning steps (iterations for React and function calls for OpenAI agent, respectively). Defaults to 50.
507
+ ### Running the Workflow
431
508
 
432
- If any of these are not provided, `AgentConfig` first tries to read the values from the OS environment.
509
+ Prepare the inputs using your workflow's `InputsModel` and execute the workflow using `run()`:
433
510
 
434
- ## Configuring Vectara tools: rag_tool, or search_tool
511
+ ```python
512
+ # Create an instance of the workflow's input model
513
+ inputs = MyWorkflow.InputsModel(query="What is Vectara?", extra_param=42)
435
514
 
436
- When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, and `vectara_corpus_key` to the factory.
515
+ # Run the workflow (ensure you're in an async context or use asyncio.run)
516
+ workflow_result = asyncio.run(agent.run(inputs))
437
517
 
438
- If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY` and `VECTARA_CORPUS_KEY`). Note that `VECTARA_CORPUS_KEY` can be a single KEY or a comma-separated list of KEYs (if you want to query multiple corpora).
518
+ # Access the output from the workflow's OutputsModel
519
+ print(workflow_result.answer)
520
+ ```
439
521
 
440
- These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
522
+ ### Built-in Workflows
523
+
524
+ `vectara-agentic` includes two powerful workflow implementations that you can use right away:
441
525
 
442
- ## Setting up a privately hosted LLM
526
+ #### 1. `SubQuestionQueryWorkflow`
443
527
 
444
- If you want to setup vectara-agentic to use your own self-hosted LLM endpoint, follow the example below
528
+ This workflow breaks down complex queries into simpler sub-questions, executes them in parallel, and then combines the answers:
445
529
 
446
530
  ```python
447
- config = AgentConfig(
448
- agent_type=AgentType.REACT,
449
- main_llm_provider=ModelProvider.PRIVATE,
450
- main_llm_model_name="meta-llama/Meta-Llama-3.1-8B-Instruct",
451
- private_llm_api_base="http://vllm-server.company.com/v1",
452
- private_llm_api_key="TEST_API_KEY",
453
- )
454
- agent = Agent(agent_config=config, tools=tools, topic=topic,
455
- custom_instructions=custom_instructions)
456
- ```
531
+ from vectara_agentic.sub_query_workflow import SubQuestionQueryWorkflow
457
532
 
458
- In this case we specify the Main LLM provider to be privately hosted with Llama-3.1-8B as the model.
459
- - The `ModelProvider.PRIVATE` specifies a privately hosted LLM.
460
- - The `private_llm_api_base` specifies the api endpoint to use, and the `private_llm_api_key`
461
- specifies the private API key requires to use this service.
533
+ agent = Agent(
534
+ tools=[query_financial_reports_tool],
535
+ topic="10-K financial reports",
536
+ custom_instructions="You are a helpful financial assistant.",
537
+ workflow_cls=SubQuestionQueryWorkflow
538
+ )
462
539
 
463
- ## ℹ️ Additional Information
540
+ # Run the workflow with a complex query
541
+ inputs = SubQuestionQueryWorkflow.InputsModel(
542
+ query="Compare Apple's revenue growth to Google's between 2020 and 2023"
543
+ )
544
+ result = asyncio.run(agent.run(inputs))
545
+ print(result.response)
546
+ ```
464
547
 
465
- ### About Custom Instructions for your Agent
548
+ The workflow works in three steps:
549
+ 1. **Query**: Breaks down the complex query into sub-questions
550
+ 2. **Sub-question**: Executes each sub-question in parallel (using 4 workers by default)
551
+ 3. **Combine answers**: Synthesizes all the answers into a coherent response
466
552
 
467
- The custom instructions you provide to the agent guide its behavior.
468
- Here are some guidelines when creating your instructions:
469
- - Write precise and clear instructions, without overcomplicating.
470
- - Consider edge cases and unusual or atypical scenarios.
471
- - Be cautious to not over-specify behavior based on your primary use-case, as it may limit the agent's ability to behave properly in others.
553
+ #### 2. `SequentialSubQuestionsWorkflow`
472
554
 
473
- ### Diagnostics
555
+ This workflow is similar to `SubQuestionQueryWorkflow` but executes sub-questions sequentially, where each question can depend on the answer to the previous question:
474
556
 
475
- The `Agent` class defines a few helpful methods to help you understand the internals of your application.
476
- * The `report()` method prints out the agent object’s type, the tools, and the LLMs used for the main agent and tool calling.
477
- * The `token_counts()` method tells you how many tokens you have used in the current session for both the main agent and tool calling LLMs. This can be helpful if you want to track spend by token.
557
+ ```python
558
+ from vectara_agentic.sub_query_workflow import SequentialSubQuestionsWorkflow
478
559
 
479
- ### Serialization
560
+ agent = Agent(
561
+ tools=[query_financial_reports_tool],
562
+ topic="10-K financial reports",
563
+ custom_instructions="You are a helpful financial assistant.",
564
+ workflow_cls=SequentialSubQuestionsWorkflow
565
+ )
480
566
 
481
- The `Agent` class supports serialization. Use the `dumps()` to serialize and `loads()` to read back from a serialized stream.
567
+ # Run the workflow with a complex query that requires sequential reasoning
568
+ inputs = SequentialSubQuestionsWorkflow.InputsModel(
569
+ query="What was the revenue growth rate of the company with the highest market cap in 2022?"
570
+ )
571
+ result = asyncio.run(agent.run(inputs))
572
+ print(result.response)
573
+ ```
482
574
 
483
- Note: due to cloudpickle limitations, if a tool contains Python `weakref` objects, serialization won't work and an exception will be raised.
575
+ The workflow works in two steps:
576
+ 1. **Query**: Breaks down the complex query into sequential sub-questions
577
+ 2. **Sub-question**: Executes each sub-question in sequence, passing the answer from one question to the next
484
578
 
485
- ### Observability
579
+ ### When to Use Each Workflow Type
486
580
 
487
- vectara-agentic supports observability via the existing integration of LlamaIndex and Arize Phoenix.
488
- First, set `VECTARA_AGENTIC_OBSERVER_TYPE` to `ARIZE_PHOENIX` in `AgentConfig` (or env variable).
581
+ - **Use SubQuestionQueryWorkflow** when:
582
+ - Your query can be broken down into independent sub-questions
583
+ - You want to parallelize the execution for better performance
584
+ - The sub-questions don't depend on each other's answers
489
585
 
490
- Then you can use Arize Phoenix in three ways:
491
- 1. **Locally**.
492
- 1. If you have a local phoenix server that you've run using e.g. `python -m phoenix.server.main serve`, vectara-agentic will send all traces to it.
493
- 2. If not, vectara-agentic will run a local instance during the agent's lifecycle, and will close it when finished.
494
- 3. In both cases, traces will be sent to the local instance, and you can see the dashboard at `http://localhost:6006`
495
- 2. **Hosted Instance**. In this case the traces are sent to the Phoenix instances hosted on Arize.
496
- 1. Go to `https://app.phoenix.arize.com`, setup an account if you don't have one.
497
- 2. create an API key and put it in the `PHOENIX_API_KEY` environment variable - this indicates you want to use the hosted version.
498
- 3. To view the traces go to `https://app.phoenix.arize.com`.
586
+ - **Use SequentialSubQuestionsWorkflow** when:
587
+ - Your query requires sequential reasoning
588
+ - Each sub-question depends on the answer to the previous question
589
+ - You need to build up information step by step
499
590
 
500
- Now when you run your agent, all call traces are sent to Phoenix and recorded.
501
- In addition, vectara-agentic also records `FCS` (factual consistency score, aka HHEM) values into Arize for every Vectara RAG call. You can see those results in the `Feedback` column of the arize UI.
591
+ - **Create a custom workflow** when:
592
+ - You have a specific sequence of operations that doesn't fit the built-in workflows
593
+ - You need to implement complex business logic
594
+ - You want to integrate with external systems or APIs in a specific way
502
595
 
503
- ## 🌐 API Endpoint
596
+ ## 🛠️ Configuration
504
597
 
505
- `vectara-agentic` can be easily hosted locally or on a remote machine behind an API endpoint, by following theses steps:
598
+ ### Configuring Vectara-agentic
506
599
 
507
- ### Step 1: Setup your API key
508
- Ensure that you have your API key set up as an environment variable:
600
+ The main way to control the behavior of `vectara-agentic` is by passing an `AgentConfig` object to your `Agent` when creating it.
601
+ For example:
509
602
 
510
- ```
511
- export VECTARA_AGENTIC_API_KEY=<YOUR-ENDPOINT-API-KEY>
512
- ```
603
+ ```python
604
+ from vectara_agentic import AgentConfig, AgentType, ModelProvider
513
605
 
514
- if you don't specify an Endpoint API key it uses the default "dev-api-key".
606
+ agent_config = AgentConfig(
607
+ agent_type = AgentType.REACT,
608
+ main_llm_provider = ModelProvider.ANTHROPIC,
609
+ main_llm_model_name = 'claude-3-5-sonnet-20241022',
610
+ tool_llm_provider = ModelProvider.TOGETHER,
611
+ tool_llm_model_name = 'meta-llama/Llama-3.3-70B-Instruct-Turbo'
612
+ )
515
613
 
516
- ### Step 2: Start the API Server
517
- Initialize the agent and start the FastAPI server by following this example:
614
+ agent = Agent(
615
+ tools=[query_financial_reports_tool],
616
+ topic="10-K financial reports",
617
+ custom_instructions="You are a helpful financial assistant in conversation with a user.",
618
+ agent_config=agent_config
619
+ )
620
+ ```
518
621
 
622
+ The `AgentConfig` object may include the following items:
623
+ - `agent_type`: the agent type. Valid values are `REACT`, `LLMCOMPILER`, `LATS` or `OPENAI` (default: `OPENAI`).
624
+ - `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` or `FIREWORKS` (default: `OPENAI`).
625
+ - `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider).
626
+ - `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
627
+ - `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
628
+ - `max_reasoning_steps`: the maximum number of reasoning steps (iterations for React and function calls for OpenAI agent, respectively). Defaults to 50.
519
629
 
520
- ```
521
- from vectara_agentic.agent import Agent
522
- from vectara_agentic.agent_endpoint import start_app
523
- agent = Agent(...) # Initialize your agent with appropriate parameters
524
- start_app(agent)
525
- ```
630
+ If any of these are not provided, `AgentConfig` first tries to read the values from the OS environment.
526
631
 
527
- You can customize the host and port by passing them as arguments to `start_app()`:
528
- * Default: host="0.0.0.0" and port=8000.
529
- For example:
530
- ```
531
- start_app(agent, host="0.0.0.0", port=8000)
532
- ```
632
+ ### Configuring Vectara tools: `rag_tool`, or `search_tool`
533
633
 
534
- ### Step 3: Access the API Endpoint
535
- Once the server is running, you can interact with it using curl or any HTTP client. For example:
634
+ When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, and `vectara_corpus_key` to the factory.
536
635
 
537
- ```
538
- curl -G "http://<remote-server-ip>:8000/chat" \
539
- --data-urlencode "message=What is Vectara?" \
540
- -H "X-API-Key: <YOUR-ENDPOINT-API-KEY>"
541
- ```
636
+ If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY` and `VECTARA_CORPUS_KEY`). Note that `VECTARA_CORPUS_KEY` can be a single KEY or a comma-separated list of KEYs (if you want to query multiple corpora).
542
637
 
543
- ## 🤝 Contributing
638
+ These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
544
639
 
545
- We welcome contributions! Please see our [contributing guide](https://github.com/vectara/py-vectara-agentic/blob/main/CONTRIBUTING.md) for more information.
640
+ ### Setting up a privately hosted LLM
546
641
 
547
- ## 📝 License
642
+ If you want to setup `vectara-agentic` to use your own self-hosted LLM endpoint, follow the example below:
548
643
 
549
- This project is licensed under the Apache 2.0 License. See the [LICENSE](https://github.com/vectara/py-vectara-agentic/blob/master/LICENSE) file for details.
644
+ ```python
645
+ from vectara_agentic import AgentConfig, AgentType, ModelProvider
646
+
647
+ config = AgentConfig(
648
+ agent_type=AgentType.REACT,
649
+ main_llm_provider=ModelProvider.PRIVATE,
650
+ main_llm_model_name="meta-llama/Meta-Llama-3.1-8B-Instruct",
651
+ private_llm_api_base="http://vllm-server.company.com/v1",
652
+ private_llm_api_key="TEST_API_KEY",
653
+ )
550
654
 
551
- ## 📞 Contact
655
+ agent = Agent(
656
+ agent_config=config,
657
+ tools=tools,
658
+ topic=topic,
659
+ custom_instructions=custom_instructions
660
+ )
661
+ ```
552
662
 
553
- - Website: [vectara.com](https://vectara.com)
554
- - Twitter: [@vectara](https://twitter.com/vectara)
555
- - GitHub: [@vectara](https://github.com/vectara)
556
- - LinkedIn: [@vectara](https://www.linkedin.com/company/vectara/)
557
- - Discord: [Join our community](https://discord.gg/GFb8gMz6UH)