vectara-agentic 0.1.25__tar.gz → 0.1.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vectara-agentic might be problematic. Click here for more details.
- {vectara_agentic-0.1.25/vectara_agentic.egg-info → vectara_agentic-0.1.26}/PKG-INFO +49 -13
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/README.md +47 -11
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/requirements.txt +1 -1
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/_version.py +1 -1
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/agent.py +2 -2
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/tools.py +11 -4
- vectara_agentic-0.1.26/vectara_agentic/tools_catalog.py +155 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26/vectara_agentic.egg-info}/PKG-INFO +49 -13
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic.egg-info/requires.txt +1 -1
- vectara_agentic-0.1.25/vectara_agentic/tools_catalog.py +0 -136
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/LICENSE +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/MANIFEST.in +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/setup.cfg +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/setup.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/tests/__init__.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/tests/test_agent.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/tests/test_tools.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/__init__.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/_callback.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/_observability.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/_prompts.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/agent_config.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/agent_endpoint.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/db_tools.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/types.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic/utils.py +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic.egg-info/SOURCES.txt +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic.egg-info/dependency_links.txt +0 -0
- {vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.26
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -21,7 +21,7 @@ Requires-Dist: llama-index-indices-managed-vectara==0.3.1
|
|
|
21
21
|
Requires-Dist: llama-index-agent-llm-compiler==0.3.0
|
|
22
22
|
Requires-Dist: llama-index-agent-lats==0.3.0
|
|
23
23
|
Requires-Dist: llama-index-agent-openai==0.4.3
|
|
24
|
-
Requires-Dist: llama-index-llms-openai==0.3.
|
|
24
|
+
Requires-Dist: llama-index-llms-openai==0.3.18
|
|
25
25
|
Requires-Dist: llama-index-llms-anthropic==0.6.4
|
|
26
26
|
Requires-Dist: llama-index-llms-together==0.3.1
|
|
27
27
|
Requires-Dist: llama-index-llms-groq==0.3.1
|
|
@@ -265,11 +265,22 @@ similar queries that require a response in terms of a list of matching documents
|
|
|
265
265
|
|
|
266
266
|
## 🛠️ Agent Tools at a Glance
|
|
267
267
|
|
|
268
|
-
`vectara-agentic` provides a few tools out of the box:
|
|
268
|
+
`vectara-agentic` provides a few tools out of the box (see ToolsCatalog for details):
|
|
269
|
+
|
|
269
270
|
1. **Standard tools**:
|
|
270
271
|
- `summarize_text`: a tool to summarize a long text into a shorter summary (uses LLM)
|
|
271
272
|
- `rephrase_text`: a tool to rephrase a given text, given a set of rephrase instructions (uses LLM)
|
|
272
|
-
|
|
273
|
+
These tools use an LLM and so would use the `Tools` LLM specified in your `AgentConfig`.
|
|
274
|
+
To instantiate them:
|
|
275
|
+
|
|
276
|
+
```python
|
|
277
|
+
from vectara_agentic.tools_catalog import ToolsCatalog
|
|
278
|
+
summarize_text = ToolsCatalog(agent_config).summarize_text
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
This ensures the summarize_text tool is configured with the proper LLM provider and model as
|
|
282
|
+
specified in the Agent configuration.
|
|
283
|
+
|
|
273
284
|
2. **Legal tools**: a set of tools for the legal vertical, such as:
|
|
274
285
|
- `summarize_legal_text`: summarize legal text with a certain point of view
|
|
275
286
|
- `critique_as_judge`: critique a legal text as a judge, providing their perspective
|
|
@@ -306,19 +317,44 @@ mult_tool = ToolsFactory().create_tool(mult_func)
|
|
|
306
317
|
|
|
307
318
|
## 🛠️ Configuration
|
|
308
319
|
|
|
320
|
+
## Configuring Vectara-agentic
|
|
321
|
+
|
|
309
322
|
The main way to control the behavior of `vectara-agentic` is by passing an `AgentConfig` object to your `Agent` when creating it.
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
323
|
+
For example:
|
|
324
|
+
|
|
325
|
+
```python
|
|
326
|
+
agent_config = AgentConfig(
|
|
327
|
+
agent_type = AgentType.REACT,
|
|
328
|
+
main_llm_provider = ModelProvider.ANTHROPIC,
|
|
329
|
+
main_llm_model_name = 'claude-3-5-sonnet-20241022',
|
|
330
|
+
tool_llm_provider = ModelProvider.TOGETHER,
|
|
331
|
+
tool_llm_model_name = 'meta-llama/Llama-3.3-70B-Instruct-Turbo'
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
agent = Agent(
|
|
335
|
+
tools=[query_financial_reports_tool],
|
|
336
|
+
topic="10-K financial reports",
|
|
337
|
+
custom_instructions="You are a helpful financial assistant in conversation with a user.",
|
|
338
|
+
agent_config=agent_config
|
|
339
|
+
)
|
|
340
|
+
```
|
|
341
|
+
|
|
342
|
+
The `AgentConfig` object may include the following items:
|
|
343
|
+
- `agent_type`: the agent type. Valid values are `REACT`, `LLMCOMPILER`, `LATS` or `OPENAI` (default: `OPENAI`).
|
|
344
|
+
- `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` or `FIREWORKS` (default: `OPENAI`).
|
|
345
|
+
- `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider).
|
|
346
|
+
- `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
|
|
347
|
+
- `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
|
|
318
348
|
|
|
319
349
|
If any of these are not provided, `AgentConfig` first tries to read the values from the OS environment.
|
|
320
350
|
|
|
321
|
-
|
|
351
|
+
## Configuring Vectara RAG or search tools
|
|
352
|
+
|
|
353
|
+
When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, `vectara_customer_id`, and `vectara_corpus_id` to the factory.
|
|
354
|
+
|
|
355
|
+
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY`, `VECTARA_CUSTOMER_ID` and `VECTARA_CORPUS_ID`). Note that `VECTARA_CORPUS_ID` can be a single ID or a comma-separated list of IDs (if you want to query multiple corpora).
|
|
356
|
+
|
|
357
|
+
These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
|
|
322
358
|
|
|
323
359
|
## ℹ️ Additional Information
|
|
324
360
|
|
|
@@ -198,11 +198,22 @@ similar queries that require a response in terms of a list of matching documents
|
|
|
198
198
|
|
|
199
199
|
## 🛠️ Agent Tools at a Glance
|
|
200
200
|
|
|
201
|
-
`vectara-agentic` provides a few tools out of the box:
|
|
201
|
+
`vectara-agentic` provides a few tools out of the box (see ToolsCatalog for details):
|
|
202
|
+
|
|
202
203
|
1. **Standard tools**:
|
|
203
204
|
- `summarize_text`: a tool to summarize a long text into a shorter summary (uses LLM)
|
|
204
205
|
- `rephrase_text`: a tool to rephrase a given text, given a set of rephrase instructions (uses LLM)
|
|
205
|
-
|
|
206
|
+
These tools use an LLM and so would use the `Tools` LLM specified in your `AgentConfig`.
|
|
207
|
+
To instantiate them:
|
|
208
|
+
|
|
209
|
+
```python
|
|
210
|
+
from vectara_agentic.tools_catalog import ToolsCatalog
|
|
211
|
+
summarize_text = ToolsCatalog(agent_config).summarize_text
|
|
212
|
+
```
|
|
213
|
+
|
|
214
|
+
This ensures the summarize_text tool is configured with the proper LLM provider and model as
|
|
215
|
+
specified in the Agent configuration.
|
|
216
|
+
|
|
206
217
|
2. **Legal tools**: a set of tools for the legal vertical, such as:
|
|
207
218
|
- `summarize_legal_text`: summarize legal text with a certain point of view
|
|
208
219
|
- `critique_as_judge`: critique a legal text as a judge, providing their perspective
|
|
@@ -239,19 +250,44 @@ mult_tool = ToolsFactory().create_tool(mult_func)
|
|
|
239
250
|
|
|
240
251
|
## 🛠️ Configuration
|
|
241
252
|
|
|
253
|
+
## Configuring Vectara-agentic
|
|
254
|
+
|
|
242
255
|
The main way to control the behavior of `vectara-agentic` is by passing an `AgentConfig` object to your `Agent` when creating it.
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
256
|
+
For example:
|
|
257
|
+
|
|
258
|
+
```python
|
|
259
|
+
agent_config = AgentConfig(
|
|
260
|
+
agent_type = AgentType.REACT,
|
|
261
|
+
main_llm_provider = ModelProvider.ANTHROPIC,
|
|
262
|
+
main_llm_model_name = 'claude-3-5-sonnet-20241022',
|
|
263
|
+
tool_llm_provider = ModelProvider.TOGETHER,
|
|
264
|
+
tool_llm_model_name = 'meta-llama/Llama-3.3-70B-Instruct-Turbo'
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
agent = Agent(
|
|
268
|
+
tools=[query_financial_reports_tool],
|
|
269
|
+
topic="10-K financial reports",
|
|
270
|
+
custom_instructions="You are a helpful financial assistant in conversation with a user.",
|
|
271
|
+
agent_config=agent_config
|
|
272
|
+
)
|
|
273
|
+
```
|
|
274
|
+
|
|
275
|
+
The `AgentConfig` object may include the following items:
|
|
276
|
+
- `agent_type`: the agent type. Valid values are `REACT`, `LLMCOMPILER`, `LATS` or `OPENAI` (default: `OPENAI`).
|
|
277
|
+
- `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` or `FIREWORKS` (default: `OPENAI`).
|
|
278
|
+
- `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider).
|
|
279
|
+
- `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
|
|
280
|
+
- `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
|
|
251
281
|
|
|
252
282
|
If any of these are not provided, `AgentConfig` first tries to read the values from the OS environment.
|
|
253
283
|
|
|
254
|
-
|
|
284
|
+
## Configuring Vectara RAG or search tools
|
|
285
|
+
|
|
286
|
+
When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, `vectara_customer_id`, and `vectara_corpus_id` to the factory.
|
|
287
|
+
|
|
288
|
+
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY`, `VECTARA_CUSTOMER_ID` and `VECTARA_CORPUS_ID`). Note that `VECTARA_CORPUS_ID` can be a single ID or a comma-separated list of IDs (if you want to query multiple corpora).
|
|
289
|
+
|
|
290
|
+
These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
|
|
255
291
|
|
|
256
292
|
## ℹ️ Additional Information
|
|
257
293
|
|
|
@@ -3,7 +3,7 @@ llama-index-indices-managed-vectara==0.3.1
|
|
|
3
3
|
llama-index-agent-llm-compiler==0.3.0
|
|
4
4
|
llama-index-agent-lats==0.3.0
|
|
5
5
|
llama-index-agent-openai==0.4.3
|
|
6
|
-
llama-index-llms-openai==0.3.
|
|
6
|
+
llama-index-llms-openai==0.3.18
|
|
7
7
|
llama-index-llms-anthropic==0.6.4
|
|
8
8
|
llama-index-llms-together==0.3.1
|
|
9
9
|
llama-index-llms-groq==0.3.1
|
|
@@ -397,8 +397,8 @@ class Agent:
|
|
|
397
397
|
print(f"- {tool.metadata.name}")
|
|
398
398
|
else:
|
|
399
399
|
print("- tool without metadata")
|
|
400
|
-
print(f"Agent LLM = {get_llm(LLMRole.MAIN).metadata.model_name}")
|
|
401
|
-
print(f"Tool LLM = {get_llm(LLMRole.TOOL).metadata.model_name}")
|
|
400
|
+
print(f"Agent LLM = {get_llm(LLMRole.MAIN, config=self.agent_config).metadata.model_name}")
|
|
401
|
+
print(f"Tool LLM = {get_llm(LLMRole.TOOL, config=self.agent_config).metadata.model_name}")
|
|
402
402
|
|
|
403
403
|
def token_counts(self) -> dict:
|
|
404
404
|
"""
|
|
@@ -19,9 +19,10 @@ from llama_index.core.tools.types import ToolMetadata, ToolOutput
|
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
from .types import ToolType
|
|
22
|
-
from .tools_catalog import
|
|
22
|
+
from .tools_catalog import ToolsCatalog, get_bad_topics
|
|
23
23
|
from .db_tools import DBLoadSampleData, DBLoadUniqueValues, DBLoadData
|
|
24
24
|
from .utils import is_float
|
|
25
|
+
from .agent_config import AgentConfig
|
|
25
26
|
|
|
26
27
|
LI_packages = {
|
|
27
28
|
"yahoo_finance": ToolType.QUERY,
|
|
@@ -624,6 +625,9 @@ class ToolsFactory:
|
|
|
624
625
|
A factory class for creating agent tools.
|
|
625
626
|
"""
|
|
626
627
|
|
|
628
|
+
def __init__(self, agent_config: AgentConfig = None) -> None:
|
|
629
|
+
self.agent_config = agent_config
|
|
630
|
+
|
|
627
631
|
def create_tool(self, function: Callable, tool_type: ToolType = ToolType.QUERY) -> VectaraTool:
|
|
628
632
|
"""
|
|
629
633
|
Create a tool from a function.
|
|
@@ -686,7 +690,8 @@ class ToolsFactory:
|
|
|
686
690
|
"""
|
|
687
691
|
Create a list of standard tools.
|
|
688
692
|
"""
|
|
689
|
-
|
|
693
|
+
tc = ToolsCatalog(self.agent_config)
|
|
694
|
+
return [self.create_tool(tool) for tool in [tc.summarize_text, tc.rephrase_text, tc.critique_text]]
|
|
690
695
|
|
|
691
696
|
def guardrail_tools(self) -> List[FunctionTool]:
|
|
692
697
|
"""
|
|
@@ -711,7 +716,8 @@ class ToolsFactory:
|
|
|
711
716
|
"""
|
|
712
717
|
Use this tool to summarize legal text with no more than summary_max_length characters.
|
|
713
718
|
"""
|
|
714
|
-
|
|
719
|
+
tc = ToolsCatalog(self.agent_config)
|
|
720
|
+
return tc.summarize_text(text, expertise="law")
|
|
715
721
|
|
|
716
722
|
def critique_as_judge(
|
|
717
723
|
text: str = Field(description="the original text."),
|
|
@@ -719,7 +725,8 @@ class ToolsFactory:
|
|
|
719
725
|
"""
|
|
720
726
|
Critique the legal document.
|
|
721
727
|
"""
|
|
722
|
-
|
|
728
|
+
tc = ToolsCatalog(self.agent_config)
|
|
729
|
+
return tc.critique_text(
|
|
723
730
|
text,
|
|
724
731
|
role="judge",
|
|
725
732
|
point_of_view="""
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module contains the tools catalog for the Vectara Agentic.
|
|
3
|
+
"""
|
|
4
|
+
from typing import List
|
|
5
|
+
from datetime import date
|
|
6
|
+
|
|
7
|
+
from inspect import signature
|
|
8
|
+
import requests
|
|
9
|
+
|
|
10
|
+
from pydantic import Field
|
|
11
|
+
|
|
12
|
+
from .types import LLMRole
|
|
13
|
+
from .agent_config import AgentConfig
|
|
14
|
+
from .utils import get_llm
|
|
15
|
+
|
|
16
|
+
req_session = requests.Session()
|
|
17
|
+
|
|
18
|
+
get_headers = {
|
|
19
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0",
|
|
20
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
|
21
|
+
"Accept-Language": "en-US,en;q=0.5",
|
|
22
|
+
"Accept-Encoding": "gzip, deflate",
|
|
23
|
+
"Connection": "keep-alive",
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
def get_current_date() -> str:
|
|
27
|
+
"""
|
|
28
|
+
Returns: the current date.
|
|
29
|
+
"""
|
|
30
|
+
return date.today().strftime("%A, %B %d, %Y")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def remove_self_from_signature(func):
|
|
34
|
+
"""Decorator to remove 'self' from a method's signature for introspection."""
|
|
35
|
+
sig = signature(func)
|
|
36
|
+
params = list(sig.parameters.values())
|
|
37
|
+
# Remove the first parameter if it is named 'self'
|
|
38
|
+
if params and params[0].name == "self":
|
|
39
|
+
params = params[1:]
|
|
40
|
+
new_sig = sig.replace(parameters=params)
|
|
41
|
+
func.__signature__ = new_sig
|
|
42
|
+
return func
|
|
43
|
+
|
|
44
|
+
class ToolsCatalog:
|
|
45
|
+
"""
|
|
46
|
+
A curated set of tools for vectara-agentic
|
|
47
|
+
"""
|
|
48
|
+
def __init__(self, agent_config: AgentConfig):
|
|
49
|
+
self.agent_config = agent_config
|
|
50
|
+
|
|
51
|
+
@remove_self_from_signature
|
|
52
|
+
def summarize_text(
|
|
53
|
+
self,
|
|
54
|
+
text: str = Field(description="the original text."),
|
|
55
|
+
expertise: str = Field(
|
|
56
|
+
description="the expertise to apply to the summarization.",
|
|
57
|
+
),
|
|
58
|
+
) -> str:
|
|
59
|
+
"""
|
|
60
|
+
This is a helper tool.
|
|
61
|
+
Use this tool to summarize text using a given expertise
|
|
62
|
+
with no more than summary_max_length characters.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
text (str): The original text.
|
|
66
|
+
expertise (str): The expertise to apply to the summarization.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
str: The summarized text.
|
|
70
|
+
"""
|
|
71
|
+
if not isinstance(expertise, str):
|
|
72
|
+
return "Please provide a valid string for expertise."
|
|
73
|
+
if not isinstance(text, str):
|
|
74
|
+
return "Please provide a valid string for text."
|
|
75
|
+
expertise = "general" if len(expertise) < 3 else expertise.lower()
|
|
76
|
+
prompt = (
|
|
77
|
+
f"As an expert in {expertise}, summarize the provided text "
|
|
78
|
+
"into a concise summary.\n"
|
|
79
|
+
f"Original text: {text}\nSummary:"
|
|
80
|
+
)
|
|
81
|
+
llm = get_llm(LLMRole.TOOL, config=self.agent_config)
|
|
82
|
+
response = llm.complete(prompt)
|
|
83
|
+
return response.text
|
|
84
|
+
|
|
85
|
+
@remove_self_from_signature
|
|
86
|
+
def rephrase_text(
|
|
87
|
+
self,
|
|
88
|
+
text: str = Field(description="the original text."),
|
|
89
|
+
instructions: str = Field(description="the specific instructions for how to rephrase the text."),
|
|
90
|
+
) -> str:
|
|
91
|
+
"""
|
|
92
|
+
This is a helper tool.
|
|
93
|
+
Use this tool to rephrase the text according to the provided instructions.
|
|
94
|
+
For example, instructions could be "as a 5 year old would say it."
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
text (str): The original text.
|
|
98
|
+
instructions (str): The specific instructions for how to rephrase the text.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
str: The rephrased text.
|
|
102
|
+
"""
|
|
103
|
+
prompt = (
|
|
104
|
+
f"Rephrase the provided text according to the following instructions: {instructions}.\n"
|
|
105
|
+
"If the input is Markdown, keep the output in Markdown as well.\n"
|
|
106
|
+
f"Original text: {text}\nRephrased text:"
|
|
107
|
+
)
|
|
108
|
+
llm = get_llm(LLMRole.TOOL, config=self.agent_config)
|
|
109
|
+
response = llm.complete(prompt)
|
|
110
|
+
return response.text
|
|
111
|
+
|
|
112
|
+
@remove_self_from_signature
|
|
113
|
+
def critique_text(
|
|
114
|
+
self,
|
|
115
|
+
text: str = Field(description="the original text."),
|
|
116
|
+
role: str = Field(default=None, description="the role of the person providing critique."),
|
|
117
|
+
point_of_view: str = Field(default=None, description="the point of view with which to provide critique."),
|
|
118
|
+
) -> str:
|
|
119
|
+
"""
|
|
120
|
+
This is a helper tool.
|
|
121
|
+
Critique the text from the specified point of view.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
text (str): The original text.
|
|
125
|
+
role (str): The role of the person providing critique.
|
|
126
|
+
point_of_view (str): The point of view with which to provide critique.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
str: The critique of the text.
|
|
130
|
+
"""
|
|
131
|
+
if role:
|
|
132
|
+
prompt = f"As a {role}, critique the provided text from the point of view of {point_of_view}."
|
|
133
|
+
else:
|
|
134
|
+
prompt = f"Critique the provided text from the point of view of {point_of_view}."
|
|
135
|
+
prompt += "\nStructure the critique as bullet points.\n"
|
|
136
|
+
prompt += f"Original text: {text}\nCritique:"
|
|
137
|
+
llm = get_llm(LLMRole.TOOL, config=self.agent_config)
|
|
138
|
+
response = llm.complete(prompt)
|
|
139
|
+
return response.text
|
|
140
|
+
|
|
141
|
+
#
|
|
142
|
+
# Guardrails tool: returns list of topics to avoid
|
|
143
|
+
#
|
|
144
|
+
def get_bad_topics() -> List[str]:
|
|
145
|
+
"""
|
|
146
|
+
Get the list of topics to avoid in the response.
|
|
147
|
+
"""
|
|
148
|
+
return [
|
|
149
|
+
"politics",
|
|
150
|
+
"religion",
|
|
151
|
+
"violence",
|
|
152
|
+
"hate speech",
|
|
153
|
+
"adult content",
|
|
154
|
+
"illegal activities",
|
|
155
|
+
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.2
|
|
2
2
|
Name: vectara_agentic
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.26
|
|
4
4
|
Summary: A Python package for creating AI Assistants and AI Agents with Vectara
|
|
5
5
|
Home-page: https://github.com/vectara/py-vectara-agentic
|
|
6
6
|
Author: Ofer Mendelevitch
|
|
@@ -21,7 +21,7 @@ Requires-Dist: llama-index-indices-managed-vectara==0.3.1
|
|
|
21
21
|
Requires-Dist: llama-index-agent-llm-compiler==0.3.0
|
|
22
22
|
Requires-Dist: llama-index-agent-lats==0.3.0
|
|
23
23
|
Requires-Dist: llama-index-agent-openai==0.4.3
|
|
24
|
-
Requires-Dist: llama-index-llms-openai==0.3.
|
|
24
|
+
Requires-Dist: llama-index-llms-openai==0.3.18
|
|
25
25
|
Requires-Dist: llama-index-llms-anthropic==0.6.4
|
|
26
26
|
Requires-Dist: llama-index-llms-together==0.3.1
|
|
27
27
|
Requires-Dist: llama-index-llms-groq==0.3.1
|
|
@@ -265,11 +265,22 @@ similar queries that require a response in terms of a list of matching documents
|
|
|
265
265
|
|
|
266
266
|
## 🛠️ Agent Tools at a Glance
|
|
267
267
|
|
|
268
|
-
`vectara-agentic` provides a few tools out of the box:
|
|
268
|
+
`vectara-agentic` provides a few tools out of the box (see ToolsCatalog for details):
|
|
269
|
+
|
|
269
270
|
1. **Standard tools**:
|
|
270
271
|
- `summarize_text`: a tool to summarize a long text into a shorter summary (uses LLM)
|
|
271
272
|
- `rephrase_text`: a tool to rephrase a given text, given a set of rephrase instructions (uses LLM)
|
|
272
|
-
|
|
273
|
+
These tools use an LLM and so would use the `Tools` LLM specified in your `AgentConfig`.
|
|
274
|
+
To instantiate them:
|
|
275
|
+
|
|
276
|
+
```python
|
|
277
|
+
from vectara_agentic.tools_catalog import ToolsCatalog
|
|
278
|
+
summarize_text = ToolsCatalog(agent_config).summarize_text
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
This ensures the summarize_text tool is configured with the proper LLM provider and model as
|
|
282
|
+
specified in the Agent configuration.
|
|
283
|
+
|
|
273
284
|
2. **Legal tools**: a set of tools for the legal vertical, such as:
|
|
274
285
|
- `summarize_legal_text`: summarize legal text with a certain point of view
|
|
275
286
|
- `critique_as_judge`: critique a legal text as a judge, providing their perspective
|
|
@@ -306,19 +317,44 @@ mult_tool = ToolsFactory().create_tool(mult_func)
|
|
|
306
317
|
|
|
307
318
|
## 🛠️ Configuration
|
|
308
319
|
|
|
320
|
+
## Configuring Vectara-agentic
|
|
321
|
+
|
|
309
322
|
The main way to control the behavior of `vectara-agentic` is by passing an `AgentConfig` object to your `Agent` when creating it.
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
323
|
+
For example:
|
|
324
|
+
|
|
325
|
+
```python
|
|
326
|
+
agent_config = AgentConfig(
|
|
327
|
+
agent_type = AgentType.REACT,
|
|
328
|
+
main_llm_provider = ModelProvider.ANTHROPIC,
|
|
329
|
+
main_llm_model_name = 'claude-3-5-sonnet-20241022',
|
|
330
|
+
tool_llm_provider = ModelProvider.TOGETHER,
|
|
331
|
+
tool_llm_model_name = 'meta-llama/Llama-3.3-70B-Instruct-Turbo'
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
agent = Agent(
|
|
335
|
+
tools=[query_financial_reports_tool],
|
|
336
|
+
topic="10-K financial reports",
|
|
337
|
+
custom_instructions="You are a helpful financial assistant in conversation with a user.",
|
|
338
|
+
agent_config=agent_config
|
|
339
|
+
)
|
|
340
|
+
```
|
|
341
|
+
|
|
342
|
+
The `AgentConfig` object may include the following items:
|
|
343
|
+
- `agent_type`: the agent type. Valid values are `REACT`, `LLMCOMPILER`, `LATS` or `OPENAI` (default: `OPENAI`).
|
|
344
|
+
- `main_llm_provider` and `tool_llm_provider`: the LLM provider for main agent and for the tools. Valid values are `OPENAI`, `ANTHROPIC`, `TOGETHER`, `GROQ`, `COHERE`, `BEDROCK`, `GEMINI` or `FIREWORKS` (default: `OPENAI`).
|
|
345
|
+
- `main_llm_model_name` and `tool_llm_model_name`: agent model name for agent and tools (default depends on provider).
|
|
346
|
+
- `observer`: the observer type; should be `ARIZE_PHOENIX` or if undefined no observation framework will be used.
|
|
347
|
+
- `endpoint_api_key`: a secret key if using the API endpoint option (defaults to `dev-api-key`)
|
|
318
348
|
|
|
319
349
|
If any of these are not provided, `AgentConfig` first tries to read the values from the OS environment.
|
|
320
350
|
|
|
321
|
-
|
|
351
|
+
## Configuring Vectara RAG or search tools
|
|
352
|
+
|
|
353
|
+
When creating a `VectaraToolFactory`, you can pass in a `vectara_api_key`, `vectara_customer_id`, and `vectara_corpus_id` to the factory.
|
|
354
|
+
|
|
355
|
+
If not passed in, it will be taken from the environment variables (`VECTARA_API_KEY`, `VECTARA_CUSTOMER_ID` and `VECTARA_CORPUS_ID`). Note that `VECTARA_CORPUS_ID` can be a single ID or a comma-separated list of IDs (if you want to query multiple corpora).
|
|
356
|
+
|
|
357
|
+
These values will be used as credentials when creating Vectara tools - in `create_rag_tool()` and `create_search_tool()`.
|
|
322
358
|
|
|
323
359
|
## ℹ️ Additional Information
|
|
324
360
|
|
|
@@ -3,7 +3,7 @@ llama-index-indices-managed-vectara==0.3.1
|
|
|
3
3
|
llama-index-agent-llm-compiler==0.3.0
|
|
4
4
|
llama-index-agent-lats==0.3.0
|
|
5
5
|
llama-index-agent-openai==0.4.3
|
|
6
|
-
llama-index-llms-openai==0.3.
|
|
6
|
+
llama-index-llms-openai==0.3.18
|
|
7
7
|
llama-index-llms-anthropic==0.6.4
|
|
8
8
|
llama-index-llms-together==0.3.1
|
|
9
9
|
llama-index-llms-groq==0.3.1
|
|
@@ -1,136 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
This module contains the tools catalog for the Vectara Agentic.
|
|
3
|
-
"""
|
|
4
|
-
from typing import List
|
|
5
|
-
from functools import lru_cache
|
|
6
|
-
from datetime import date
|
|
7
|
-
import requests
|
|
8
|
-
|
|
9
|
-
from pydantic import Field
|
|
10
|
-
|
|
11
|
-
from .types import LLMRole
|
|
12
|
-
from .utils import get_llm
|
|
13
|
-
|
|
14
|
-
req_session = requests.Session()
|
|
15
|
-
|
|
16
|
-
get_headers = {
|
|
17
|
-
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0",
|
|
18
|
-
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
|
19
|
-
"Accept-Language": "en-US,en;q=0.5",
|
|
20
|
-
"Accept-Encoding": "gzip, deflate",
|
|
21
|
-
"Connection": "keep-alive",
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
def get_current_date() -> str:
|
|
25
|
-
"""
|
|
26
|
-
Returns: the current date.
|
|
27
|
-
"""
|
|
28
|
-
return date.today().strftime("%A, %B %d, %Y")
|
|
29
|
-
|
|
30
|
-
#
|
|
31
|
-
# Standard Tools
|
|
32
|
-
#
|
|
33
|
-
@lru_cache(maxsize=None)
|
|
34
|
-
def summarize_text(
|
|
35
|
-
text: str = Field(description="the original text."),
|
|
36
|
-
expertise: str = Field(
|
|
37
|
-
description="the expertise to apply to the summarization.",
|
|
38
|
-
),
|
|
39
|
-
) -> str:
|
|
40
|
-
"""
|
|
41
|
-
This is a helper tool.
|
|
42
|
-
Use this tool to summarize text using a given expertise
|
|
43
|
-
with no more than summary_max_length characters.
|
|
44
|
-
|
|
45
|
-
Args:
|
|
46
|
-
text (str): The original text.
|
|
47
|
-
expertise (str): The expertise to apply to the summarization.
|
|
48
|
-
|
|
49
|
-
Returns:
|
|
50
|
-
str: The summarized text.
|
|
51
|
-
"""
|
|
52
|
-
if not isinstance(expertise, str):
|
|
53
|
-
return "Please provide a valid string for expertise."
|
|
54
|
-
if not isinstance(text, str):
|
|
55
|
-
return "Please provide a valid string for text."
|
|
56
|
-
expertise = "general" if len(expertise) < 3 else expertise.lower()
|
|
57
|
-
prompt = f"As an expert in {expertise}, summarize the provided text"
|
|
58
|
-
prompt += " into a concise summary."
|
|
59
|
-
prompt += f"\noriginal text: {text}\nsummary:"
|
|
60
|
-
llm = get_llm(LLMRole.TOOL)
|
|
61
|
-
response = llm.complete(prompt)
|
|
62
|
-
return response.text
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
@lru_cache(maxsize=None)
|
|
66
|
-
def rephrase_text(
|
|
67
|
-
text: str = Field(description="the original text."),
|
|
68
|
-
instructions: str = Field(description="the specific instructions for how to rephrase the text."),
|
|
69
|
-
) -> str:
|
|
70
|
-
"""
|
|
71
|
-
This is a helper tool.
|
|
72
|
-
Use this tool to rephrase the text according to the provided instructions.
|
|
73
|
-
For example, instructions could be "as a 5 year old would say it."
|
|
74
|
-
|
|
75
|
-
Args:
|
|
76
|
-
text (str): The original text.
|
|
77
|
-
instructions (str): The specific instructions for how to rephrase the text.
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
str: The rephrased text.
|
|
81
|
-
"""
|
|
82
|
-
prompt = f"""
|
|
83
|
-
Rephrase the provided text according to the following instructions: {instructions}.
|
|
84
|
-
If the input is Markdown, keep the output in Markdown as well.
|
|
85
|
-
original text: {text}
|
|
86
|
-
rephrased text:
|
|
87
|
-
"""
|
|
88
|
-
llm = get_llm(LLMRole.TOOL)
|
|
89
|
-
response = llm.complete(prompt)
|
|
90
|
-
return response.text
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
@lru_cache(maxsize=None)
|
|
94
|
-
def critique_text(
|
|
95
|
-
text: str = Field(description="the original text."),
|
|
96
|
-
role: str = Field(default=None, description="the role of the person providing critique."),
|
|
97
|
-
point_of_view: str = Field(default=None, description="the point of view with which to provide critique."),
|
|
98
|
-
) -> str:
|
|
99
|
-
"""
|
|
100
|
-
This is a helper tool.
|
|
101
|
-
Critique the text from the specified point of view.
|
|
102
|
-
|
|
103
|
-
Args:
|
|
104
|
-
text (str): The original text.
|
|
105
|
-
role (str): The role of the person providing critique.
|
|
106
|
-
point_of_view (str): The point of view with which to provide critique.
|
|
107
|
-
|
|
108
|
-
Returns:
|
|
109
|
-
str: The critique of the text.
|
|
110
|
-
"""
|
|
111
|
-
if role:
|
|
112
|
-
prompt = f"As a {role}, critique the provided text from the point of view of {point_of_view}."
|
|
113
|
-
else:
|
|
114
|
-
prompt = f"Critique the provided text from the point of view of {point_of_view}."
|
|
115
|
-
prompt += "Structure the critique as bullet points.\n"
|
|
116
|
-
prompt += f"Original text: {text}\nCritique:"
|
|
117
|
-
llm = get_llm(LLMRole.TOOL)
|
|
118
|
-
response = llm.complete(prompt)
|
|
119
|
-
return response.text
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
#
|
|
123
|
-
# Guardrails tool: returns list of topics to avoid
|
|
124
|
-
#
|
|
125
|
-
def get_bad_topics() -> List[str]:
|
|
126
|
-
"""
|
|
127
|
-
Get the list of topics to avoid in the response.
|
|
128
|
-
"""
|
|
129
|
-
return [
|
|
130
|
-
"politics",
|
|
131
|
-
"religion",
|
|
132
|
-
"violence",
|
|
133
|
-
"hate speech",
|
|
134
|
-
"adult content",
|
|
135
|
-
"illegal activities",
|
|
136
|
-
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{vectara_agentic-0.1.25 → vectara_agentic-0.1.26}/vectara_agentic.egg-info/dependency_links.txt
RENAMED
|
File without changes
|
|
File without changes
|